From 218372bf03eb729357828b49f83e208e9dbb3961 Mon Sep 17 00:00:00 2001 From: RajivTS Date: Sat, 8 Jan 2022 12:54:17 +0530 Subject: [PATCH] Combined changes for supporting NotExpr evaluation. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updated planProjectionOperators and planSelectionOperators to include case for tree.NotExpr Replaced RunTests with RunTestsWithoutAllNullsInjection Fixes for build & lint issues Removed unused field Addressing PR review comments backupccl: use SpanGroup.Sub instead of CoveringMerge Release note: none. rowenc: reduce reliance on the entire TableDescriptor This change reduces uses of the entire TableDescriptor. Release note: None descpb: add and use NoColumnID constant This change adds a `descpb.NoColumnID` constant so we don't have to use `descpb.ColumnID(encoding.NoColumnID)` everywhere. Release note: None rowenc: remove DatumAlloc.scratch This commit removes the `DatumAlloc.scratch` field which is always nil. Release note: None sql: move `DatumAlloc` to sem/tree This commit moves `DatumAlloc` from `sql/rowenc` to `sql/sem/tree`. This is a fairly low-level construct and is not used exclusively for row encoding/decoding. Release note: None rowenc: move low-level key encoding functions to subpackage The `rowenc` package contains a hefty mix of functions operating at different conceptual levels - some use higher level constructs like table and index descriptors, others are lower level utilities. The naming of these functions doesn't help, for example `EncodeTableKey` sounds like a top-level function but is actually a low level utility that appends a single value to a key This commit moves the lower level utilities for encoding datum values into keys to the `rowenc/keyside` package. Release note: None rowenc: minor cleanup of array decoding code The code around array decoding was confusing; we now have a single `decodeArray` variant which is the inverse of `encodeArray`. Release note: None rowenc: move low-level value encoding functions to subpackage This commit moves the lower level utilities for encoding datum values into values to the `rowenc/valueside` package. Release note: None valueside: introduce ColumnIDDelta type When encoding multiple values, we encode the differences between the ColumnIDs. This difference is passed to `valueside.Encode`, but it is passed as a `descpb.ColumnID`. This commit introduces a new type to make this distinction more evident. Release note: None valueside: add Decoder helper We have a few copies of the same logic of decoding multiple SQL values from a Value, in particular in range feed handling code. This commit adds a helper type that centralizes this logic, reducing duplication. Note that the query executione engines retain their own specialized implementations. Release note: None sql: release BatchFlowCoordinator objects to pool This commit adds `BatchFlowCoordinator` objects to their `vectorizedFlowCreator`'s `releasables` slice, which ensures that their `Release` method is called and that they are properly recycled. Before this change, heap profiles and instrumentation both indicated that these objects were not being recycled as intended. For example, heap profiles looked like: ``` Type: alloc_objects Time: Dec 30, 2021 at 2:10pm (EST) Active filters: focus=Pool\).Get Showing nodes accounting for 92189, 0.57% of 16223048 total ----------------------------------------------------------+------------- flat flat% sum% cum cum% calls calls% + context ----------------------------------------------------------+------------- 71505 79.69% | github.com/cockroachdb/cockroach/pkg/sql/colflow.NewBatchFlowCoordinator /Users/nathan/Go/src/github.com/cockroachdb/cockroach/pkg/sql/colflow/flow_coordinator.go:218 10923 12.17% | github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency.newLockTableGuardImpl /Users/nathan/Go/src/github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock_table.go:452 2048 2.28% | github.com/cockroachdb/cockroach/pkg/sql/catalog/nstree.(*Map).maybeInitialize /Users/nathan/Go/src/github.com/cockroachdb/cockroach/pkg/sql/catalog/nstree/map.go:112 1170 1.30% | github.com/cockroachdb/cockroach/pkg/sql/colexec.newMaterializerInternal /Users/nathan/Go/src/github.com/cockroachdb/cockroach/pkg/sql/colexec/materializer.go:179 1030 1.15% | github.com/cockroachdb/pebble.(*Iterator).Clone /Users/nathan/Go/src/github.com/cockroachdb/cockroach/vendor/github.com/cockroachdb/pebble/iterator.go:1228 585 0.65% | github.com/cockroachdb/cockroach/pkg/sql.MakeDistSQLReceiver /Users/nathan/Go/src/github.com/cockroachdb/cockroach/pkg/sql/distsql_running.go:841 585 0.65% | github.com/cockroachdb/cockroach/pkg/sql/colfetcher.NewColBatchScan /Users/nathan/Go/src/github.com/cockroachdb/cockroach/pkg/sql/colfetcher/colbatch_scan.go:223 585 0.65% | github.com/cockroachdb/cockroach/pkg/sql/span.MakeBuilder /Users/nathan/Go/src/github.com/cockroachdb/cockroach/pkg/sql/span/span_builder.go:61 585 0.65% | github.com/cockroachdb/cockroach/pkg/storage.mvccGet /Users/nathan/Go/src/github.com/cockroachdb/cockroach/pkg/storage/mvcc.go:859 585 0.65% | github.com/cockroachdb/cockroach/pkg/storage.mvccScanToBytes /Users/nathan/Go/src/github.com/cockroachdb/cockroach/pkg/storage/mvcc.go:2357 128 0.14% | github.com/cockroachdb/pebble.(*DB).newIterInternal /Users/nathan/Go/src/github.com/cockroachdb/cockroach/vendor/github.com/cockroachdb/pebble/db.go:765 0 0% 0% 89729 0.55% | sync.(*Pool).Get /Users/nathan/Go/go/src/sync/pool.go:148 ``` Notice that `sync.(*Pool).Get` is responsible for *0.55%* of heap allocations and **79.69%** of these are from `colflow.NewBatchFlowCoordinator`. After this change, that source of allocations goes away and we see the following impact on micro-benchmarks: ``` name old time/op new time/op delta KV/Scan/SQL/rows=1-10 95.1µs ± 7% 95.9µs ± 5% ~ (p=0.579 n=10+10) KV/Scan/SQL/rows=10-10 100µs ± 3% 103µs ±12% ~ (p=0.829 n=8+10) name old alloc/op new alloc/op delta KV/Scan/SQL/rows=10-10 21.7kB ± 0% 21.5kB ± 0% -0.76% (p=0.000 n=10+10) KV/Scan/SQL/rows=1-10 20.1kB ± 0% 19.9kB ± 0% -0.70% (p=0.000 n=10+9) name old allocs/op new allocs/op delta KV/Scan/SQL/rows=1-10 245 ± 0% 244 ± 0% -0.41% (p=0.000 n=10+10) KV/Scan/SQL/rows=10-10 280 ± 0% 279 ± 0% -0.36% (p=0.001 n=8+9) ``` sql/catalog: restore fast-path in FullIndexColumnIDs This commit restores a [fast-path](https://github.com/cockroachdb/cockroach/commit/c9e116e586f24c5f3a831ac653f14fd03f588b93#diff-19625608f4a6e23e6fe0818f3a621e716615765cb338d18fe34b43f0a535f06dL140) in `FullIndexColumnIDs` which was lost in c9e116e. The fast-path avoided the allocation of a `ColumnID` slice and a `IndexDescriptor_Direction` slice in `FullIndexColumnIDs` when given a unique index. In such cases, these slices are already stored on the `IndexDescriptor`. ``` name old time/op new time/op delta KV/Scan/SQL/rows=1-10 94.9µs ±10% 94.9µs ± 8% ~ (p=0.739 n=10+10) name old alloc/op new alloc/op delta KV/Scan/SQL/rows=1-10 20.1kB ± 0% 20.1kB ± 1% ~ (p=0.424 n=10+10) name old allocs/op new allocs/op delta KV/Scan/SQL/rows=1-10 245 ± 0% 241 ± 0% -1.63% (p=0.000 n=10+8) ``` kv: protect Replica's lastToReplica and lastFromReplica fields with raftMu This commit moves the Replica's lastToReplica and lastFromReplica from under the `Replica.mu` mutex to the `Replica.raftMu` mutex. These are strictly Raft-specific pieces of state, so we don't need fine-grained locking around them. As a reward, we don't need to grab the `Replica.mu` exclusively (or at all) when setting the fields in `Store.withReplicaForRequest`. The locking in `setLastReplicaDescriptors` showed up in a mutex profile under a write-heavy workload. It was responsible for **3.44%** of mutex wait time. Grabbing the mutex was probably also slowing down request processing, as the exclusive lock acquisition had to wait for read locks to be dropped. coldata: operate on Nulls value, not reference This commit changes `col.Vec.SetNulls` to accept a `Nulls` struct by value instead of by pointer. This lets us avoid a heap allocation on each call to `Nulls.Or`. Releaes note: None roachtest: fix silly bug in tlp roachtest I thought that we'd get some basic coverage of roachtests in unit tests at low duration or something, but I guess not. Release note: None sql: support readonly default_with_oids var This is just one less error when importing PGDUMP. Release note (sql change): We now support `default_with_oids` which only accepts being false. importccl,jobs: truncate all job system errors In #73303 we truncated the row data in this error message to prevent problems with large row data preventing the job status from being saved. However, truncating the row means that our "experimental_save_rejected" option does not work as expected. This feature previously saved entire rows and now it would have truncated rows in some case. Now, we only truncate the error when producing the error string. Further, we also add truncation to the job system to prevent other parts of the system from saving too much data to the job system. Release note: None importccl: remove unused argument Release note: None sql/logictest: add diff support to expectation output This adds support for generating a unified diff for expectation mismatches. This can make it easier to spot the differences between two results when the result contains many rows. For example: ``` ../../sql/logictest/testdata/logic_test/system_namespace:54: SELECT * FROM system.namespace expected: 0 0 defaultdb 50 0 0 postgres 51 0 0 system 1 0 0 test 52 1 0 public 29 1 29 comments 24 1 29 database_role_settings 44 1 29 descriptor 3 1 29 descriptor_id_seq 7 1 29 eventlog 12 1 29 jobs 15 1 29 join_tokens 41 1 29 lease 11 1 29 locations 21 1 29 migrations 40 1 29 namespace 30 1 29 protected_ts_meta 31 1 29 protected_ts_records 32 1 29 rangelog 13 1 29 replication_constraint_stats 25 1 29 replication_critical_localities 2 1 29 replication_stats 27 1 29 reports_meta 28 1 29 role_members 23 1 29 role_options 33 1 29 scheduled_jobs 37 1 29 settings 6 1 29 sql_instances 46 1 29 sqlliveness 39 1 29 statement_bundle_chunks 34 1 29 statement_diagnostics 36 1 29 statement_diagnostics_requests 35 1 29 statement_statistics 42 1 29 table_statistics 20 1 29 transaction_statistics 43 1 29 ui 14 1 29 users 4 1 29 web_sessions 19 1 29 zones 5 50 0 public 29 51 0 public 29 52 0 public 29 but found (query options: "rowsort" -> ignore the following ordering of rows) : 0 0 defaultdb 50 0 0 postgres 51 0 0 system 1 0 0 test 52 1 0 public 29 1 29 comments 24 1 29 database_role_settings 44 1 29 descriptor 3 1 29 descriptor_id_seq 7 1 29 eventlog 12 1 29 jobs 15 1 29 join_tokens 41 1 29 lease 11 1 29 locations 21 1 29 migrations 40 1 29 namespace 30 1 29 protected_ts_meta 31 1 29 protected_ts_records 32 1 29 rangelog 13 1 29 replication_constraint_stats 25 1 29 replication_critical_localities 26 1 29 replication_stats 27 1 29 reports_meta 28 1 29 role_members 23 1 29 role_options 33 1 29 scheduled_jobs 37 1 29 settings 6 1 29 sql_instances 46 1 29 sqlliveness 39 1 29 statement_bundle_chunks 34 1 29 statement_diagnostics 36 1 29 statement_diagnostics_requests 35 1 29 statement_statistics 42 1 29 table_statistics 20 1 29 transaction_statistics 43 1 29 ui 14 1 29 users 4 1 29 web_sessions 19 1 29 zones 5 50 0 public 29 51 0 public 29 52 0 public 29 Diff: --- Expected +++ Actual @@ -20,3 +20,3 @@ 1 29 replication_constraint_stats 25 - 1 29 replication_critical_localities 2 + 1 29 replication_critical_localities 26 1 29 replication_stats 27 logic.go:3340: ../../sql/logictest/testdata/logic_test/system_namespace:100: error while processing ``` The diff output is only added when the `-show-diff` flag is provided because in some cases it can produce more noise than it is worth. The diff library used here is the same one already used by the testify libraries we depend on. Release note: None teamcity-trigger: give `kvserver` package higher stress timeout Closes https://github.com/cockroachdb/cockroach/issues/69519. Release note: None backupccl: allow columns of type array in EXPORT PARQUET Previously, EXPORT PARQUET only supported CRDB relations whose columns were scalars of type int, string, float, or boolean. This change allows columns of type array whose values can be int, string, float, boolean. Following the CRDB ARRAY documentation, a value in an exported array can be NULL as well. Informs: #67710 Release note (sql change): EXPORT PARQUET can export columns of type array backupccl: remove old makeImportSpans Release note: none. backupccl: expand some comments on covering code Release note: none. security: make the bcrypt cost configurable Release note (security update): For context, when configuring passwords for SQL users, if the client presents the password in cleartext via ALTER/CREATE USER/ROLE WITH PASSWORD, CockroachDB is responsible for hashing this password before storing it. By default, this hashing uses CockroachDB's bespoke `crdb-bcrypt` algorithm, itself based off the standard Bcrypt algorithm. The cost of this hashing function is now configurable via the new cluster setting `server.user_login.password_hashes.default_cost.crdb_bcrypt`. Its default value is 10, which corresponds to an approximate password check latency of 50-100ms on modern hardware. This value should be increased over time to reflect improvements to CPU performance: the latency should not become so small that it becomes feasible to bruteforce passwords via repeated login attempts. Future versions of CockroachDB will likely update the default accordingly. security: make `server.user_login.min_password_length` visible in doc gen This cluster setting was meant to be exported for visibility in auto-generated docs (we've documented it before). This was an oversight. Release note: None authors: add Fenil Patel to authors Release note: None bench: force the index join in BenchmarkIndexJoin Without using the index hint, we now choose to perform the full scan over the primary index and put the filter on top of that rather than performing a limited scan of the secondary followed by an index join. I confirmed that this is the case on 21.1 and 21.2 binaries, so I'll backport to the latter (in order to make the comparison between releases sane). Release note: None dev: a few improvements to the script * Add `set -e` so if the `dev` build fails, it doesn't try to run the binary anyway. * Add a way to force recompilation of `dev` (useful for developers). Release note: None changefeedccl: Shutdown tenant before main server. Shutdown tenant server before stopping main test server. This elliminates some of the error messages we see in the test output when tenant attempts to connect to some ranges which are no longer accessible. Release Notes: None importccl: fix import pgdump target column bug Previously, if a COPY FROM statement had less columns than the CREATE TABLE schema defined in the dump file, we would get a nil pointer exception. This is because we were not filling the non-targeted columns with a NULL datum. This change fixes that and aligns behvaiour with how INSERT handles non-targeted columns. Release note (bug fix): IMPORT TABLE ... PGDUMP with a COPY FROM statement in the dump file that has less target columns than the CREATE TABLE schema definition would result in a nil pointer exception. roachtest: add new passing tests for pgjdbc nightly Release note: None go.mod: fix ordering The first `require` is for direct dependencies. The second one for indirect dependencies. There's no need for a third one. Release note: None Makefile: specify that code needs to be generated for `roach{test,prod}` Release note: None setting: clean up slot indexes The internal setting code passes around slot indexes as bare integers, and there are confusingly two variants: one is 1-indexed, another is 0-indexed. This change makes all slot indexes 0-indexed and adds a `slotIdx` type. Note: the 1-indexed choice was perhaps useful at the time to help find code paths where the slot index is not initialized, but now the slot index is set along with other mandatory fields by `init()`. Release note: None setting: prevent use of SystemOnly settings from tenant code This change implements the `system-only` semantics in the RFC (#73349). All SystemOnly setting values are now invisible from tenant code. If tenant code attempts to get or set a SystemOnly setting by handle, it results in panics in test builds; in non-test builds, these settings always report the default value. Release note (sql change): System-only cluster settings are no longer visible for non-system tenants. build: fix Pebble nightly benchmarks The Pebble nightly benchmarks stopped running due to a build error stemming from the removal of the generated code from the codebase. Release note: None scpb: move Node to screl, introduce scpb.TargetState This refactoring commit moves scpb.Node to screl and groups the targets into a new scpb.TargetState. Separating the quasi-constant target elements and statuses from the elements' intermediate statuses ultimately makes for cleaner code in the declarative schema changer. Release note: None scplan: make child packages internal, including scgraph and scgraphviz This commit moves scgraph and scgraphviz under scplan, and makes all its child packages internal. Release note: None : authors: add gtr to authors Release note: None sql: format statements for the UI Previously, statements sent to the UI were not formatted, making them difficult to read. With this change, statements are now formatted using a builtin function that prettifies statements (using existing pretty-printing logic). Statements are formatted using pre-determined pretty-printing configuration options mentioned in this issue: #71544. Resolves: #71544 Release note (sql change): statements are now formatted prior to being send to the UI, this is done using a new builtin function that formats statements. ui: added formatting to statements on the details pages Previously, statements displayed on the statement/transaction/index details pages were not formatted. Formatting was added to allow for better readability of statements on these detail pages. Requested statements are now formatted on the server using the existing pretty-printing logic. Statements returned from the statement handler are now formatted. Formatting is done via a new builtin function 'prettify_statement', a wrapper function over the existing pretty-printing logic. Resolves: #71544 Release note (ui change): added formatting to statements on the statement, transaction and index details pages. roachpb: InternalServer API for tenant settings This commit introduces the API that the tenants will use to obtain and list for updates to tenant setting overrides. The API was designed to allow for maximum flexibility on the server side so that it can be implemented as a range feed without any extra state (if necessary). Release note: None schemachanger: fully qualify object names inside event log entries Previously, the original SQL was displayed inside the declarative schema changers event logs, which was both missing redactions and full name resolutions. This was inadequate because the existing schema changer always fully resolved missing names within its entries. To address this, this patch adds new metadata that contains the fully resolved and redacted text. Release note: None schemachanger: full resolve names inside the AST for event logs and errors Previously, the declarative schema changer left the AST untouched when resolving names. This was inadequate because both event log entry generation and some error messages generated expect the fully resolved names inside the AST. To address this, this patch adds support for copying and altering the AST including support for adding annotations. Additionally, all resolved names are now propagated back into the AST and extra validation is introduced to make sure that no unresolved names are left. Release note: None schemachanger: add statement tag to event log entries Previously, the tag field inside the event log entries generated by the declarative schema changer were empty. This was inadequate because the existing schema changer always populated these fields. To address this, this patch will now populate the statement tag field inside the element metadata and into event log entries. Release note: None rfc: token-based authentication for SQL session revival Release note: None vendor: upgrade cockroachdb/apd to v3 This commit picks up the following changes to `cockroachdb/apd`: - https://github.com/cockroachdb/apd/pull/103 - https://github.com/cockroachdb/apd/pull/104 - https://github.com/cockroachdb/apd/pull/107 - https://github.com/cockroachdb/apd/pull/108 - https://github.com/cockroachdb/apd/pull/109 - https://github.com/cockroachdb/apd/pull/110 - https://github.com/cockroachdb/apd/pull/111 Release note (performance improvement): The memory representation of DECIMAL datums has been optimized to save space, avoid heap allocations, and eliminate indirection. This increases the speed of DECIMAL arithmetic and aggregation by up to 20% on large data sets. sql/sem: remove EvalContext.getTmpDec `apd.Decimal` can now be entirely stack allocated during arithmetic, so there's no longer any need for this. With https://github.com/cockroachdb/apd/pull/104, this does not introduce any new heap allocations: ``` ➜ (cd pkg/sql/sem/tree && goescape . | grep moved | wc -l) 328 ``` scripts: add sgrep @aliher1911 showed me this handy awk trick to filter goroutines [here]. I find myself reaching for this frequently, but it always takes me a minute or two to get it right. This helper will make it a lot easier and will perhaps enable many others to reap the benefits as well. [here]: https://github.com/cockroachdb/cockroach/pull/66761#pullrequestreview-691529838 Release note: None sqlproxyccl: better test output This moves the logging output to files and enables inspection of the authentication results in logs crdb-side. Release note: None storage/metamorphic: update clearRangeOp to not conflict with in-flight writes This change updates clearRangeOp to only run on key spans that do not have any transactional writes in-progress. This more closely matches behaviour in KV and avoids cases where we'd trample on intents and other in-flight keys. Also makes a similar adjustment to mvccClearTimeRangeOp. Fixes #72476. Speculatively. Reproduction was nearly impossible. Release note: None. Revert "roachpb: change `Lease.String()`/`SafeFormat()` to support refs." This reverts commit 33fe9d15b2660cc40d255a3d448d88bb35c2c39d. storage,roachpb: Add Key field to WriteTooOldError Currently, WriteTooOldErrors that happen as part of a ranged put operation (eg. MVCCClearTimeRange) can be very opaque, as we don't know what key we tried to "write too old" on. This change addresses that by adding a Key field to WriteTooOldError to store one of the keys the error pertains to. Informs #72476. Release note: None. dev: enumerate explicit list of all generated `.go` files when hoisting We should be able to `find _bazel/bin/go_path -type f -name '*.go'` and list the generated files that way, but bazelbuild/rules_go#3041 is in the way and I can't figure out how to resolve it. Instead we have to do things the hard way: run `bazel aquery` and parse out the paths to all the generated files. In this way we avoid accidentally hoisting out stale symlinks from a previous build. If bazelbuild/rules_go#3041 is resolved upstream then this change can be reverted. Release note: None sql: public schema long running migration Release note: None sql: insert missing public schema namespace entry When restoring a database, a namespace entry for the public schema was not created. Release note: None authors: add msirek to authors Release note: None sql: Add hints to CREATE/ALTER table errors for MR Add some hints when trying to create (or alter to) a multi-region table which indicate that if the database is not multi-region enabled, it should be converted to a multi-region enabled database via a "ALTER DATABASE ... SET PRIMARY REGION " statement. Release note: None catalog: add Index*Columns methods to table descriptor These methods return the columns referenced in indexes as []catalog.Column slices. Release note: None catalog: add Index*ColumnDirections methods to table descriptor This complements the previous commit which added Index*Column methods to catalog.TableDescriptor. Release note: None catalog: remove FullIndexColumnIDs Calls to this function can now be replaced with recently-added IndexFullColumns and IndexFullColumnDirections method calls on the table descriptor. Release note: None tabledesc: improve memory-efficiency of Index*Column* methods This commit removes IndexCompositeColumns (which wasn't actually used and is unlikely to ever be) and generates the indexColumnCache with less memory allocations. The current scheme is causing a statistically-significant performance regression. Release note: None sql: allow the 1PC optimization in some cases in the extended protocol A previous commit (7e2cbf51869fc326974a5665db80da8b29422631) fixed our pgwire implementation so that it does not auto-commit a statement executed in the extended protocol until a Sync message is received. That change also had the undesired effect of disabling the 1PC ("insert fast path") optimization that is critical for write-heavy workloads. With this current commit, the 1PC optimization is allowed again, as long as the statement execution is immediately followed by a Sync message. This still has the correct bugfix semantics, but allows the optimization for the common case of how the extended protocol is used. No release note since this only affects unreleased versions. Release note: None execgen: remove temporary decimals from the helper This commit removes two temporary decimals from `execgen.OverloadHelper` since after the upgrade of the decimal library it can use stack-allocated temporary objects without them escaping to the heap. Release note: None colexec: batch allocate datums for projectTupleOp This has a profound impact on the amount of garbage generated by the delivery query in TPC-C. ``` name old time/op new time/op delta TPCC/mix=delivery=1-16 38.0ms ± 2% 35.8ms ± 1% -5.76% (p=0.000 n=9+8) name old alloc/op new alloc/op delta TPCC/mix=delivery=1-16 8.17MB ± 1% 7.97MB ± 1% -2.36% (p=0.000 n=9+10) name old allocs/op new allocs/op delta TPCC/mix=delivery=1-16 80.2k ± 0% 20.3k ± 1% -74.65% (p=0.000 n=10+9) ``` Leveraged https://github.com/cockroachdb/cockroach/pull/74443 to find this. Release note: None kvserver/loqrecovery: check full key coverage in quorum recovery Previously when doing unsafe replica recovery, if some ranges are missing or represented by stale replicas that were split or merged, recovery will change cluster to inconsistent state with gaps or overlaps in keyspace. This change adds checks for range completeness as well as adds a preference for replicas with higher range applied index. Release note: None sql: fix enum hydration in distsql expression evaluation Fixes: #74442 Previously in some circumstances we could fail to hydrate enum types used in join predicate expressions and possibly other situations. Now types used in ExprHelper are always hydrated during Init phase when a distsql type resolver is being used. Also add a test case for the lookup semi join repro case. Release note (bug fix): Fix panic's possible in some distributed queries using enum's in join predicates. backupccl: add libgeos to BUILD.bazel This is required when using randgen so that libgeos can be initialized when a geometric type is generated. Fixes #73895 Release note: None sql: refactor pg_builtin to use actual grant options Refactor builtins.priv -> privilege.Privilege. Replace privilege.Kind with privilege.Privilege in functions that need access to privilege.Privilege.GrantOption. Release note: None sql: refactor pg_builtin to use actual grant options The builtins has_table_privilege, has_column_privilege, has_any_column_privilege now use privileges.Priv.GrantOption instead of privileges.Kind.GRANT. Release note: None dev: introduce common benchmarking flags Specifically: --bench-mem (to print out memory allocations); --bench-time (to control how long each benchmark is run for); --count (how many times to run it, also added to `dev test`); -v and --show-logs (similar to `dev test`) We also add supports for args-after-double-dash-are-for-bazel within `dev bench`. This commit is light on testing (read: there isn't any), so doesn't bump DEV_VERSION to roll it out to everyone just yet. Release note: None execgen: skip encoding/decoding JSON when hashing it Previously, in order to hash a JSON object we would encode and decode it (due to the behavior of `coldata.JSONs.Get` and the setup of the execgen). This commit refactors how the hash functions are handled in the execgen which allows us to peek inside of `coldata.Bytes` (that is under the `coldata.JSONs`) in order to get direct access to the underlying `[]byte`. This should be a minor performance improvement but also allows us to remove the need for the overload helper in the hashing context. It is possible (although I'm not certain) that the hash computation is now different, so this commit bumps the DistSQL versions to be safe. Release note: None colexechash: fix BCEs in some cases Previously, we were not getting (and not asserting) some of the bounds check eliminations in `rehash` function because we were accessing `.Sliceable` property in the wrong context. This is now fixed (by accessing it via `.Global`) as well as assertions are added correctly. Additionally, this commit pulls out the call to the cancel checker out of the templated block to reduce the amount of code duplication. Release note: None execinfra: prefer leased table descriptors This commit makes better use of the table descriptor protos in the DistSQL specs by first checking if they've already been leased. If so, then we use those instead of re-generating catalog.TableDescriptor instances. This has a statistically-significant impact on memory allocations, as illustrated with this microbenchmark which I ran on my development machine: name old time/op new time/op delta KV/Scan/SQL/rows=1-16 190µs ± 7% 184µs ± 4% -3.60% (p=0.016 n=10+8) KV/Scan/SQL/rows=10-16 196µs ± 4% 198µs ±12% ~ (p=0.762 n=8+10) name old alloc/op new alloc/op delta KV/Scan/SQL/rows=1-16 19.5kB ± 1% 17.4kB ± 1% -11.12% (p=0.000 n=9+10) KV/Scan/SQL/rows=10-16 21.0kB ± 1% 18.9kB ± 1% -10.20% (p=0.000 n=10+10) name old allocs/op new allocs/op delta KV/Scan/SQL/rows=1-16 222 ± 0% 210 ± 1% -5.59% (p=0.000 n=7+10) KV/Scan/SQL/rows=10-16 256 ± 0% 244 ± 0% -4.84% (p=0.000 n=10+7) This change opens us to the possibility of no longer shipping the whole table descriptor proto in the DistSQL spec. Release note: None sql: add missing error check in GetHydratedZoneConfigForNamedZone Closes #74606 Release note: None sql: rename pg_cast_provolatile_dump.csv to pg_cast_dump.csv Release note: None sql: remove castInfo `castMap` now contains all volatility information and `castInfo` is no longer needed. `castInfo` has been removed. For backwards compatibility, some invalid casts between VARBIT and integer types have been added to `castMap`. These casts have unexpected behavior before and after this commit. They are not supported by Postgres. They should be disallowed in the future. See #74577 and 74580 Casts between tuple and string types are now dynamically handled in `lookupCast` support casts with named record types that have different OIDs than `oid.T_record`. Casts from OID and REG* types to INT2 are now allowed to maintain backward compatibility. Release note: None sql: update pg_cast_dump.csv This commit updates the `pg_cast_dump.csv` file with new rows for casts from the JSONB type (OID 3802). It also adds the Postgres version as a column to annotate the version of Postgres that the CSV was generated from. This is important because the `pg_cast` table can change from version to version. This CSV was generated from Postgres version 13.5. Release note: None sql: consistently order pg_cast_dump.csv Release note: None sql: check cast contexts against Postgres's pg_cast table This commit includes the `pg_cast.castcontext` column in `pg_cast_dump.csv` and uses it to validate the `maxContext` field in `castMap`. Note that Postgres's `pg_cast` table does not include all possible casts, such as automatic I/O conversions, so this test is not comprehensive. Release note: None ci: bazelize nightly pebble ycsb, write-throughput benchmarks We still have to take care of the metamorphic nightly. Part of #67335. Release note: None authors: add bardin to authors Release note: None opt: fix corner case with lookup joins and provided orderings This commit fixes a case where the lookup join was passing through a provided ordering with unnecessary columns. This was caused by imperfect FDs at the join level such that the ordering cannot be simplified at the join level but it can be simplified at the level of its input. Note that the case causes an internal error in test builds but there are no known examples of user-visible problems in non-test builds (hence no release note). Fixes #73968. Release note: None rpc: fix span use-after-Finish in internalClientAdapter The internalClientAdapter performs some local "RPCs" by directly calling the server method, without going through gRPC. The streaming RPC calls are made on a different goroutine, and these goroutines were using the callers tracing span. These goroutines could outlive the caller's span, resulting in a use-after-Finish crash. This patch fixes them by creating dedicated RPC spans, mimicking what our gRPC interceptors do. Fixes #74326 Release note: None vendor: Bump pebble to 3d0ff924d13a3d5fdf6e56a391c5c178c18ff196 Changes pulled in: ``` 3d0ff924d13a3d5fdf6e56a391c5c178c18ff196 *: Add trySeekUsingNext optimization to SeekGE 0c503048eb0365981929177c30178add8a56ae3e sstable: add (*sstable.Writer).RangeKey{Set,Unset,Delete} methods fe52b49cc28df62dce9b00c382a5ce217936be56 tool/logs: aggregate compaction logs by node and store ID 8ab4358bc59dfa62e5e34e4b0e5ce81a68f5fe91 sstable: return err from `(BlockPropertyCollector).FinishTable` 91c18ef0ee999980c2869d11e5ce468410acbe8d internal/keyspan: add FragmentIterator interface 953fdb078ff0585489206ae96e1d80ca9f6f90c7 internal/keyspan: implement SetBounds on Iter aa376a819bf67cd6766ee827feed4bf0bd508f1f tool: add compaction log event aggregation tool ``` Release note: None. storage: Use optimized SeekGE in CheckSSTConflicts This nearly reverts #73514 by moving back to calling SeekGE on the engine to skip past any empty spans on either the engine or the SSTable. This is the more optimal approach on average, and given optimizations in cockroachdb/pebble#1412 which this change depends on, it also ends up performing better than a SeekPrefixGE-driven appraoch and the pre-#73514 approach. Improvement when running BenchmarkCheckSSTConflicts against the pre-#73514 revision (vs. this one): ``` name old time/op new time/op delta CheckSSTConflicts/keys=1000/versions=8/sstKeys=1000/overlap=false-24 72.6µs ±11% 66.3µs ± 1% -8.67% (p=0.008 n=5+5) CheckSSTConflicts/keys=1000/versions=8/sstKeys=1000/overlap=true-24 12.2ms ± 1% 1.7ms ± 1% -86.41% (p=0.008 n=5+5) CheckSSTConflicts/keys=1000/versions=8/sstKeys=10000/overlap=false-24 69.8µs ± 2% 67.4µs ± 1% -3.48% (p=0.008 n=5+5) CheckSSTConflicts/keys=1000/versions=8/sstKeys=10000/overlap=true-24 13.3ms ± 3% 2.8ms ± 1% -78.97% (p=0.008 n=5+5) CheckSSTConflicts/keys=1000/versions=64/sstKeys=1000/overlap=false-24 75.8µs ± 3% 63.8µs ± 1% -15.86% (p=0.008 n=5+5) CheckSSTConflicts/keys=1000/versions=64/sstKeys=1000/overlap=true-24 13.0ms ± 1% 1.9ms ± 1% -85.11% (p=0.008 n=5+5) CheckSSTConflicts/keys=1000/versions=64/sstKeys=10000/overlap=false-24 69.8µs ±11% 64.6µs ± 1% -7.45% (p=0.008 n=5+5) CheckSSTConflicts/keys=1000/versions=64/sstKeys=10000/overlap=true-24 14.8ms ± 9% 3.1ms ± 2% -79.05% (p=0.008 n=5+5) CheckSSTConflicts/keys=10000/versions=8/sstKeys=1000/overlap=false-24 66.1µs ± 2% 63.7µs ± 1% -3.65% (p=0.008 n=5+5) CheckSSTConflicts/keys=10000/versions=8/sstKeys=1000/overlap=true-24 14.2ms ± 9% 1.9ms ± 1% -86.55% (p=0.008 n=5+5) CheckSSTConflicts/keys=10000/versions=8/sstKeys=10000/overlap=false-24 72.3µs ±10% 64.5µs ± 0% -10.77% (p=0.008 n=5+5) CheckSSTConflicts/keys=10000/versions=8/sstKeys=10000/overlap=true-24 122ms ± 2% 17ms ± 1% -86.03% (p=0.008 n=5+5) CheckSSTConflicts/keys=10000/versions=64/sstKeys=1000/overlap=false-24 69.0µs ± 9% 62.4µs ± 1% -9.57% (p=0.032 n=5+5) CheckSSTConflicts/keys=10000/versions=64/sstKeys=1000/overlap=true-24 14.0ms ± 1% 2.3ms ± 2% -83.46% (p=0.016 n=4+5) CheckSSTConflicts/keys=10000/versions=64/sstKeys=10000/overlap=false-24 69.4µs ± 9% 62.7µs ± 1% -9.63% (p=0.016 n=5+5) CheckSSTConflicts/keys=10000/versions=64/sstKeys=10000/overlap=true-24 140ms ± 5% 26ms ± 1% -81.70% (p=0.008 n=5+5) CheckSSTConflicts/keys=100000/versions=8/sstKeys=1000/overlap=false-24 69.2µs ±10% 62.5µs ± 1% -9.66% (p=0.008 n=5+5) CheckSSTConflicts/keys=100000/versions=8/sstKeys=1000/overlap=true-24 15.3ms ±11% 2.3ms ± 1% -85.21% (p=0.008 n=5+5) CheckSSTConflicts/keys=100000/versions=8/sstKeys=10000/overlap=false-24 69.7µs ±12% 63.6µs ± 1% ~ (p=0.095 n=5+5) CheckSSTConflicts/keys=100000/versions=8/sstKeys=10000/overlap=true-24 148ms ± 6% 28ms ± 2% -80.90% (p=0.008 n=5+5) CheckSSTConflicts/keys=100000/versions=64/sstKeys=1000/overlap=false-24 67.1µs ±10% 61.1µs ± 2% -8.93% (p=0.016 n=5+5) CheckSSTConflicts/keys=100000/versions=64/sstKeys=1000/overlap=true-24 14.4ms ± 2% 2.5ms ± 5% -82.45% (p=0.016 n=4+5) CheckSSTConflicts/keys=100000/versions=64/sstKeys=10000/overlap=false-24 68.9µs ±21% 62.2µs ± 1% -9.76% (p=0.008 n=5+5) CheckSSTConflicts/keys=100000/versions=64/sstKeys=10000/overlap=true-24 204ms ±14% 42ms ± 5% -79.44% (p=0.008 n=5+5) ``` Fixes #66410. Release note: None. execinfrapb: add a helper for index joins based on the JoinReaderSpec Release note: None rowexec: refactor the joinReader to not exceed the batch size The joinReader operates by buffering the input rows until a certain size limit (which is dependent on the strategy). Previously, the buffering would stop right after the size limit is reached or exceeded, and this commit refactors the code to not exceed the limit except in a case of a single large row. This is what we already do for vectorized index joins. Release note: None sql,kv: introduce Streamer API and use it for index joins in some cases This commit introduces the Streamer API (see https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20210617_index_lookups_memory_limits.md) as well as its implementation for the simplest case - when requests are unique and can be served in any order. It additionally hooks up the implementation to be used by the index joins in both execution engines. There are three main pieces that this commit adds: 1. the `Streamer` struct itself. It is largely the same as described in the RFC. Some notable changes are: - `Cancel` is renamed to `Close` and is made blocking to ensure that all goroutines kicked off by the `Streamer` exit before `Close` returns. - `Shrink` has been removed from the `budget` struct (see below for more details). - furthermore, the `budget` interface has been unexported and the `streamer` has been tightly coupled with the `budget`'s implementation. - the TODO about collecting DistSQL metadata is removed because we are collecting the LeafTxnFinalState already when using the LeafTxn. 2. the limited `Streamer` implementation - only `OutOfOrder` mode is supported when the requests are unique. Notably, buffering and caching of the results is not implemented yet. 3. `TxnKVStreamer` component that sits below the SQL fetchers, uses the `Streamer`, and is an adapter from `BatchResponse`s to key/value pairs that fetchers understand. Although not used at the moment, `TxnKVStreamer` is written under the assumption that a single result can satisfy multiple requests. The memory budget of the `Streamer` is utilized lazily. The RFC was calling for making a large reservation upfront and then shrinking the budget if we see that we don't need that large reservation; however, while working on this I realized that lazy reservations are a better fit for this. The `Streamer` can reserve up to the specified limit (determined by `distsql_workmem` variable) against the root monitor (in the degenerate case of a single large row more memory will be reserved). The reservation then never shrinks under the assumption that if the reservation has gotten large, it means it was needed for higher concurrency (or large responses), and it is likely to be needed for the same reasons in the future. The layout of the main components of the `Streamer` implementation: - in `Enqueue` we have a logic similar to what DistSender does in order to split each request (that might span multiple ranges) into single-range requests. Those sub-requests are batched together to be evaluated by a single `BatchRequest`. - `workerCoordinator.mainLoop` is responsible for taking single-range batches, estimating the corresponding response size, and issuing requests to be evaluated in parallel while adhering to the provided memory budget. - `workerCoordinator.performRequestAsync` is responsible for populating the `BatchRequest` and then processing the results while updating the memory budget. Current known limitations that will be addressed in the follow-up work: - at the moment a single row can be split across multiple BatchResponses when TargetBytes limit is reached when the table has multiple column families; therefore, we use the streamer only for single column family cases. We will expand the KV API shortly to not split the rows in multiple column family cases. - manual refresh of spans when `ReadWithinUncertaintyIntervalError` is encountered by a single streamer in a single flow is not implemented. It is an optimization that is considered a must for the final implementation in order to not regress on simple cases in terms of retriable errors. This will be implemented shortly as a follow-up. - I'm thinking that eventually we probably want to disable the batch splitting done by the DistSender to eliminate unnecessary blocking when the streamer's splitting was incorrect. This would give us some performance improvements in face of range boundary changes, but it doesn't seem important enough for the initial implementation. Release note: None sql: check equivalent constraint when creating hash index Fixes #68031 Previously we only try to create constraint for shard column if it's newly created. We check duplicate constraint for shard column when `Alter Primary Key` and `Create Index`, however the check is simply a name check. This pr adds logic to check equivalent constraint by checking if the formatted expression string is the same. With this logic we can try to create the constraint no matter if a shard column is newly created or not. With this fix, we also don't need to expose the constraint through `SHOW CREATE TABLE` result since we make sure the constraint is created or skipped if one already exists. Release note (sql change): Before this change, the check constraint on shard column used by hash sharded index was printed in the corresponding `SHOW CREATE TABLE`. The constraint had been shown because cockroach lacked logic to ensure that shard columns which are part of hash sharded indexs always have the check constraint which the optimizer relies on to achieve properly optimized plans on hash sharded indexes. We no longer display this constraint in `SHOW CREATE TABLE` as it is now implied by the `USING HASH` clause on the relevant index. colexec: clean up the usage of the binary overload helper Previously, for projection and selection operators we would always pass in `execgen.OverloadHelper`. Up until recently it served several purposes, but now it is only used in order to pass down the binary function (as well as the eval context) for the cases when we fallback to the row-by-row computation. This commit takes advantage of this observation and cleans up the situation: now the helper is only passed when it is needed which allows us to remove a lot of redundant code. Additionally, the helper itself has been renamed from `OverloadHelper` to `BinaryOverloadHelper`. Release note: None build: Add PATH to .bazelrc for dev builds. Release note: none opt: intern provided physical properties This commit changes the provided physical properties to be referenced with a pointer inside bestProps, in preparation for adding more fields to physical.Provided. Release note: None opt: add Distribution physical property and Distribute enforcer This commit adds a new physical property called "Distribution" to expressions in the optimizer. Currently, Distribution is just the list of regions that are touched by the expression. This commit also adds a new enforcer called "Distribute", which enforces a particular Distribution on its input. This is similar to the way Sort enforces a particular Ordering. Currently, Distribute is only used to enforce that the results of a query end up in the same region as the gateway node. The Distribution property and Distribute enforcer will enable us to perform better costing of query plans in future commits. This commit adds a tiny cost for the Distribute enforcer, but otherwise does not yet take advantage of distribution for costing. In the future, we can use Distribution to better cost distributed joins and aggregations, and accurately cost the Distribute enforcer so as to avoid scanning data in remote regions if possible. This commit represents a departure from the approach taken in the earlier prototype in #43831. Instead of using a "neighborhood" abstraction to represent arbitrary collections of nodes, this commit sticks to the existing abstractions of region and locality. If we need a new abstration in the future, we should be able to simply modify the representation of Distribution. Additionally, this commit does not make an attempt to represent exactly how data is partitioned across regions (e.g., which column is the partitioning key, whether the data is hash or range partitioned, etc.); it only tracks *which* regions contain data. This greatly simplifies the implementation, and I don't think it will significantly reduce the accuracy of costing. Informs #47226 Release note: None cli: add mt cert create-tenant-signing command This key/cert will be used to generate session revival tokens. No release note since this is only intended to be used internally. Release note: None sql/sem/catid: make a low-level package which defines ID types I'm not sure where exactly in the tree this belongs. Release note: None sql/schemachanger/scpb: adopt catid Release note: None sql/catalog/catpb: add package below scpb and descpb This doesn't go all the way to being principled about what is in catpb and what is in descpb, but it pulls the relevant symbols referenced in scpb down into the new package. Release note: None kvserver: fix bug causing spurious rebalance attempts by the store rebalancer When rebalancing a range, the store rebalancer also tries to move the lease to the voter with the lowest QPS. However, previously, the store rebalancer was doing this even when it found no better replacement targets for the existing stores. This meant that we were redundantly transferring leases over to the coldest voter, even when we weren't rebalancing the range. This was likely contributing to the thrashing observed in https://github.com/cockroachdb/cockroach/issues/69817. Release note (bug fix): A bug that could previously cause redundant lease transfers has now been fixed. schemachanger: delete comments when dropping schemas Previously, the declarative schema changer did not clean comments for schema objects when dropping them. This was inadequate because the system.comment table would have rows for dropped schema objects left over when using the declarative schema changer versus the legacy schema changer. To address this, this patch will remove rows from the system.comment table for schema objects that are dropped. Release note: None sql: adopt CommentUpdater from declarative schema changer. Previously, there was duplicate code for adding/deleting comments on schema objects inside the legacy schema changer. So, each comment type would have similar code for setting up an upsert/delete statements. This patch adopts the CommentUpdater interface in the legacy schema changer, so that logic to update/delete comments can be shared with the declarative schema changer, and the simplify code in the legacy schema changer. Release note: None kvserver: don't use `ClearRange` point deletes with estimated MVCC stats `ClearRange` avoids dropping a Pebble range tombstone if the amount of data that's deleted is small (<=512 KB), instead dropping point deletions. It uses MVCC statistics to determine this. However, when clearing an entire range, it will rely on the existing range MVCC stats rather than computing them. These range statistics can be highly inaccurate -- in some cases so inaccurate that they even become negative. This in turn can cause `ClearRange` to submit a huge write batch, which gets rejected by Raft with `command too large`. This patch avoids dropping point deletes if the statistics are estimated (which is only the case when clearing an entire range). Alternatively, it could do a full stats recomputation in this case, but entire range deletions seem likely to be large and/or rare enough that dropping a range tombstone is fine. Release note (bug fix): Fixed a bug where deleting data via schema changes (e.g. when dropping an index or table) could fail with a "command too large" error. backup: add memory monitor to manifest loading Release note: none. sql: add assignment casts for UPSERTs Assignment casts are now added to query plans for upserts, including `UPSERT`, `INSERT .. ON CONFLICT DO NOTHING`, and `INSERT .. ON CONFLICT DO UPDATE ..` statements. Assignment casts are a more general form of the logic for rounding decimal values, so the use of `round_decimal_values` in mutations is no longer needed. This logic has been removed. Fixes #67083 There is no release note because the behavior of upserts should not change with this commit. Release note: None sql: add logic tests for assignment casts of ON UPDATE expressions Release note: None sql: schema changer not to validate shard column constraint Fixes #67613 The shard column constraint is created internally and should be automatically upheld. So not need to verify it when backfilling hash sharded indexes. Release not: None schemachanger: fix error messages for drop dependencies. 1) When a view depends on another view the message text generated was incorrect. 2) When a sequence is OWNED BY a table, it could be dropped even if there were other dependencies. 3) When RESTRICT was not specified DROP DATABASE would generate the wrong error. 4) If the database name is empty we would generate the wrong error. Release note: None sql: modify message for drop schema privilege errors Previously, the legacy schema changer used a less clear message for when a DROP SCHEMA failed due to a privilege error. This patch will improve the message generated to be more clear by using message from the declarative schema changer. The new message will have the form "must be owner of schema ". Release note: None schemachanger: avoid leased descriptors for event logging Previously, when fully resolving descriptor names, we would fetch leased descriptors which could cause the schema change transaction to get pushed out. This was inadequate because in certain scenarios we would perpetually retry transactions, since attempts to generate event log entries acquire leases pushing out the schema change transaction. To address this, this patch intentionally avoids leasing descriptors for event logging. Release note: None build: put regular file at bazel-out by default Release note: none. bulk,backup,import: add Write-at-now settings for RESTORE and IMPORT This adds a new arguemnt to BulkAdderOptions and MakeSSTBatcher control the WriteAtBatchTS field in the AddSSTableRequests it sends. This flag is then optionally set in RESTORE and IMPORT based on the the new hidden cluster settigs 'bulkio.restore_at_current_time.enabled' and 'bulkio.import_at_current_time.enabled' respectively. These settings default to off for now, for testing usage only as setting this flag is known to _significantly_ reduce the performance of those jobs until further work is done. Release note: none. sql/row: remove an allocation for Get responses ``` name old time/op new time/op delta IndexJoin/Cockroach-24 8.02ms ± 9% 7.92ms ± 2% ~ (p=1.000 n=9+10) name old alloc/op new alloc/op delta IndexJoin/Cockroach-24 1.68MB ± 1% 1.62MB ± 1% -3.83% (p=0.000 n=10+9) name old allocs/op new allocs/op delta IndexJoin/Cockroach-24 11.1k ± 1% 10.1k ± 0% -8.85% (p=0.000 n=9+9) ``` Release note: None kvstreamer: refactor memory tokens to reduce allocations ``` name old time/op new time/op delta IndexJoin/Cockroach-24 7.72ms ± 7% 7.51ms ± 3% -2.75% (p=0.001 n=10+9) name old alloc/op new alloc/op delta IndexJoin/Cockroach-24 1.61MB ± 1% 1.60MB ± 1% -0.91% (p=0.001 n=10+9) name old allocs/op new allocs/op delta IndexJoin/Cockroach-24 10.1k ± 1% 9.1k ± 1% -10.24% (p=0.000 n=10+10) ``` Release note: None spanconfig: drop 'experimental_' suffix from settings We're going to start to use this infrastructure for realsies soon. Release note: None spanconfig/testcluster: plumb in testing knobs for tenants We were previously using an empty one with no control at the caller to override specific values. This comes in handy when looking to control knobs for all tenants in these tests. Release note: None migrationsccl: disable reconciliation job creation in tests In a future commit we'll enable the span configs infrastructure by default for all crdb unit tests. Doing so will surface the need to have the reconciliation job disabled for the specific migration tests that assert on the contents of `system.span_configurations` (also written to by the reconciliation job/reconciler). Release note: None spanconfig/sqltranslator: deflake datadriven test This is a follow up to #73531, there we forgot to update package tests to also use consistent reads when looking up descriptors. Looking at the surrounding commentary in these datadriven files and comparing against the actual results, there was a mismatch (now no longer so). Release note: None sql: future-proof TestTruncatePreservesSplitPoints This test previously relied on range split decisions to happen near-instantaneously (especially for the single node variant). This was a fair assumption with the gossiped SystemConfigSpan, but is no longer true with the span configs infrastructure where (i) updated descriptors/zone configs make its way to `system.span_configurations` asynchronously, and (ii) KV learns about learns about `system.span_configurations` updates asynchronously. We update the test to be agnostic to either subsystem (tl;dr - throw in a SucceedsSoon block at the right places). Release note: None sql: future-proof TestScatterResponse This is Yet Another Test that made timing assumptions on how instantaneously range split decisions appear, assumptions that no longer hold under the span configs infrastructure. Adding compatibility is a matter of waiting for splits to appear instead of just expecting it. Release note: None kvserver: future-proof TestElectionAfterRestart This is Yet Another Test that made timing assumptions on how instantaneously range split decisions appear, assumptions that don't hold under the span configs infrastructure. Adding compatibility is a matter of waiting for splits to appear instead of only expecting it. Release note: None multiregionccl: future-proof TestEnsureLocalReadsOnGlobalTables This is Yet Another Test that made timing assumptions on how instantaneously range split decisions appear, assumptions that don't hold under the span configs infrastructure. Adding compatibility is a matter of waiting for splits to appear instead of only expecting it. Release note: None server: future-proof TestAdminAPIDatabaseDetails This is Yet Another Test that made timing assumptions on how instantaneously range split decisions appear, assumptions that don't hold under the span configs infrastructure. Adding compatibility is a matter of waiting for splits to appear instead of only expecting it. Release note: None changefeedccl: future-proof TestChangefeedProtectedTimestamps The low gc.ttlseconds in this test that applies to system.{descriptor,zones}, when run with span configs enabled (in a future commit), runs into errors introduced in #73086. The span configs infrastructure makes use of rangefeeds against these tables within the spanconfig.SQLWatcher process. These rangefeeds error out if the timestamp they're started with is already GC-ed, something that's very likely with low GC TTLs. To accommodate, we simply bump the TTL to a more reasonable 100s. Release note: None kvfollowerreadsccl: future-proof TestBoundedStalenessDataDriven This is Yet Another Test that made timing assumptions on how instantaneously range config decisions are applied, assumptions that don't hold under the span configs infrastructure. Adding compatibility is a matter of waiting for the right configs to appear instead of only expecting it. Release note: None changefeedccl: future-proof TestChangefeedBackfillCheckpoint This testing knob was added in #68374 but I'm not sure that it was necessary? Brief stress runs with an without this flag did not surface anything. In a future commit where we enable span configs by default, we'll actually rely on the reconciliation job running, so we get rid of this flag now. Release note: None liveness: future-proof TestNodeLivenessStatusMap Instead of using hooks that directly mutate the system config span, using SQL statements to tweak zone configs future proofs this test for compatibility with the span configs infrastructure. Release note: None sql: migrate has_database_privilege from evalPrivilegeCheck to ctx.Planner.HasPrivilege refs https://github.com/cockroachdb/cockroach/issues/66173 HasPrivilege is required to support WITH GRANT OPTION Release note: None roachtest: fix sequelize nightly Upstream changed how imports are done, so this library had to be updated. Release note: None kvstreamer: fix potential deadlock We have two different locks in the `Streamer` infrastructure: one is for the `Streamer` itself and another one for the `budget`. Previously, there was no contract about which mutex needs to be acquired first which led to the deadlock detector thinking that there is a potential deadlock situation. This is now fixed by requiring that the `budget`'s mutex is acquired first and by releasing the `Streamer`'s mutex in `Enqueue` early in order to not overlap with the interaction with the `budget`. I believe that it was a false positive (i.e. the deadlock cannot actually occur) because without the support of pipelining, `Enqueue` calls and asynchronous requests evaluation never overlap in time. Still, it's good to fix the order of mutex acquisitions. Release note: None sql: remove PHYSICAL scrub code The PHYSICAL scrub code is experimental and not considered production ready. It complicates a lot of code paths involved in normal query execution (it significantly overloads the semantics of TableReader and of the Fetcher) and is getting in the way of some improvements in how the fetchers work. In particular, we are trying to reduce the amount of information passed to TableReader/Fetcher (which in the non-scrubbing case should be a lot less than the full table descriptor). There are some proposals for a better design floating around, e.g. provide a facility for returning KVs as results from DistSQL and have some higher-level code run the scrub checks. This change removes the code for the PHYSICAL scrub for now. Release note (sql change): the experimental SCRUB PHYSICAL is no longer implemented. vars: add placeholder session variable for xmloption Release note (sql change): The `xmloption` session variable is now accepted, only taking in `content`. Note this does not do anything. clusterversion: improve a version comment Gets rid of a squiggly line in Goland. Release note: None kvserver: plumb in a context into (*Store).GetConfReader We'll use it in a future commit. Release note: None spanconfig/reconciler: export the checkpoint timestamp We'll make use of it in a future commit. Release note: None spanconfig: get rid of ReconciliationDependencies interface It was hollow, simply embedding the spanconfig.Reconciler interface. In a future commit we end up relying on each pod's span config reconciler outside of just the reconciliation job. This makes the interface even awkwarder than it was. Release note: None Tag test rules that fail with TestNoLinkForbidden ``` --- FAIL: TestNoLinkForbidden (0.01s) build.go:56: cannot find package "github.com/cockroachdb/cockroach/pkg/roachpb" in any of: GOROOT/src/github.com/cockroachdb/cockroach/pkg/roachpb (from $GOROOT) /go/src/github.com/cockroachdb/cockroach/pkg/roachpb (from $GOPATH) ``` This is a workaround for #74176. Release note: None parser: parse CREATE MATERIALIZED VIEW ... WITH [NO] DATA WITH NO DATA is currently unimplemented, but we can no-op the WITH DATA. Release note (sql change): `CREATE MATERIALIZED VIEW` syntax now supported `WITH DATA`. dev: only build `:go_path` if you're building a `cockroach` binary Release note: None streamingccl: introduce a builtin crdb_internal.replication_stream_spec Introduce a replication stream builtin crdb_internal.replication_stream_spec which gets a spec for each of the replication stream partition. Consumer can later use this spec to start a processor on this partition to replicate changes. crdb_internal.replication_stream_spec(stream_id, start_from) stream_id: id of the replication stream. start_from: initial timestamp from which the process start replicating changes. A follow-up PR will add checkpoint information into the processor spec Release note (sql change): introduce builtin function for stream replication: crdb_internal.replication_stream_spec. streamingccl: introduce a builtin crdb_internal.replication_stream_spec Introduce a replication stream builtin crdb_internal.replication_stream_spec which gets a spec for each of the replication stream partition. Consumer can later use this spec to start a processor on this partition to replicate changes. crdb_internal.replication_stream_spec(stream_id, start_from) stream_id: id of the replication stream. start_from: initial timestamp from which the process start replicating changes. A follow-up PR will add checkpoint information into the processor spec Release note (sql change): introduce builtin function for stream replication: crdb_internal.replication_stream_spec. streamingccl: partitionedStreamClient that consumes partitioned replication streams Create a partitionedStreamClient that implements streamclient.Client interface and talks to source cluster with crdb_internal builtins for replication stream. Follow-up PRs will make ingestion processors use this client to the source cluster, support backfill checkpoints in the Subscribe API and make more test coverage. Release note: none cloud: bump orchestrator to v21.2.4 Release note: None roachtest: update 22.1 version map to v21.2.4 Release note: None echotest: add testing helper We have a few tests that record the output of formatting operations. Historically this meant pasting the expected output into source code and updating it whenenever necessary. A pattern I've been using over the last year or so is to use datadriven as a convenience wrapper for this, to a) avoid polluting the test code files and b) to be able to use the `-rewrite` flag for convenience. Personally I think this deserves its own helper, as this is a pattern worth spreading. I have another use for it in #71806, and there are a few more across the codebase that I plan to update as I run into them. Release note: None kv: circuit-break requests to unavailable replicas Fixes #33007. Closes #61311. This PR uses the circuit breaker package introduced in #73641 to address issue #33007: When a replica becomes unavailable, it should eagerly refuse traffic that it believes would simply hang. Concretely, every request to the Replica gets a cancelable context that is sensitive to the circuit breaker tripping (relying on context cancellation makes sure we don't end up with a second channel that needs to be plumbed to everyone and their dog; Go makes it really difficult to join two cancellation chains); if the breaker is tripped when the request arrives, it is refused right away. In either case, the outgoing error is augmented to carry information about the tripped breaker. This isn't 100% trivial, since retreating from in-flight replication typically causes an `AmbiguousResultError`, and while we could avoid it in some cases we can't in all. A similar problem occurs when the canceled request was waiting on a lease, in which case the result is a NotLeaseholderError. For any request that made it in, if it enters replication but does not manage to succeed within the timeout specified by the `kv.replica_circuit_breaker.slow_replication_threshold` cluster setting, the breaker is tripped, cancelling all inflight and future requests until the breaker heals. Perhaps surprisingly, the existing "slowness" detection (the informational "have been waiting ... for proposal" warning in `executeWriteBatch`) was moved deeper into replication (`refreshProposalsLocked`), where it now trips the breaker. This has the advantage of providing a unified path for lease requests (which don't go through `executeWriteBatch`) and pipelined writes (which return before waiting on the inflight replication process). To make this work, we need to pick a little fight with how leases are (not) refreshed (#74711) and we need to remember the ticks at which a proposal was first inserted (despite potential reproposals). Perhaps surprisingly, when the breaker is tripped, *all* traffic to the Replica gets the fail-fast behavior, not just mutations. This is because even though a read may look good to serve based on the lease, we would also need to check for latches, and in particular we would need to fail-fast if there is a transitive dependency on any write (since that write is presumably stuck). This is not trivial and so we don't do it in this first iteration (see #74799). A tripped breaker deploys a background probe which sends a `ProbeRequest` (introduced in #72972) to determine when to heal; this is roughly the case whenever replicating a command through Raft is possible for the Replica, either by appending to the log as the Raft leader, or by forwarding to the Raft leader. A tiny bit of special casing allows requests made by the probe to bypass the breaker. As of this PR, the cluster setting defaults to zero (disabling the entire mechanism) until some necessary follow-up items have been addressed (see #74705). For example, the breaker-sensitive context cancelation is a toy implementation that comes with too much of a performance overhead (one extra goroutine per request); #74707 will address that. Other things not done here that we certainly want in the 22.1 release are UI work (#74713) and metrics (#74505). The release note is deferred to #74705 (where breakers are turned on). Release note: None touchie sql,server: add VIEWACTIVITYREDACTED role This commit adds the new VIEWACTIVITYREDACTED role. This role should act the same as VIEWACTIVITY for the majority of uses, but it restricts the usage of Statements Diagnostics Bundle, which contains PII information. It is possible for a user to have both VIEWACTIVITY and VIEWACTIVITYREDACTED, but in this case the VIEWACTIVITYREDACTED takes precedent and the user can't use the features restricted by it. Fixes #74716 Release note (sql change): Creation of the new role `VIEWACTIVITYREDACTED` that works similar as VIEWACTIVITY but restricts the usage of Statements Diagnostics Bundle. It is possible for a user to have both roles (VIEWACTIVITY and VIEWACTIVITYREDACTED), but the role VIEWACTIVITYREDACTED takes precedent on restrictions. authors: add natelong to authors Release note: none streampb: delete `stream.pb.go` This made it in when a commit from December was merged. Release note: None bazel: upgrade `rules_go` to pull in new changes Pull in the following two changes: ``` 23b381cc compilepkg: fix stored file path truncation 6b312978 Revert "Stage Go sources in a directory named after the package." ``` Release note: None colexechash: fix an internal error with distinct mode This commit fixes a bug with the hash table when it is used by the unordered distinct when NULLs are treated as different. This is the case when UPSERT or INSERT ... ON CONFLICT queries have to perform `upsert-distinct-on` operation. The problem was that we were updating some internal state (`GroupID` slice responsible for tracking what is the current duplicate candidate for each row being probed) in more cases than necessary. The code path in question is used for two purposes: - first, when we're removing the duplicates from within the batch, without looking at the state of the hash table at all. In this case we do want the update mentioned above; - next, when the batch only contains unique rows, we want to remove the duplicates when comparing against the hash table. In this case we do not want the update. The bug is fixed by refactoring the code to not update the internal state at all; instead, we now rely on the `distinct` flag for each row to tell us that the row is distinct within the batch, and we then correctly populate `HeadID` value for it (which was the ultimate goal all the time, and previously we used `GroupID` value as an intermediary). This mistake would not result in incorrect results (because `distinct` flag is still marked correctly) and could only result in an internal error due to index out of bounds. In particular, for the error to occur the last row in the vectorized batch must have a NULL value in any column (except for the last one) used for the distinctness check. Release note (bug fix): Previously, CockroachDB could encounter an internal error when performing UPSERT or INSERT ... ON CONFLICT queries in some cases when the new rows contained NULL values (either NULLS explicitly specified or NULLs used since some columns were omitted). colexechash: cleanup the previous commit The idea behind this commit is to remove some dead code (and actually get a minor performance improvement) by skipping the check of a conditional that is always true as well as removing another conditional that is always false. We chose to not squash this commit into the previous one because the latter will be backported, and we want to reduce the amount of code changes for the backport. Release note: None colexectestutils: increase test coverage by randomizing batch length We are already randomizing the max batch size used in a run of our unit tests; however, previously it would be fixed for a single run. This commit makes it so that on each batch produced by our test operators we independently decide what length to use - with 50% chance we keep on using the maximum while in the remaining cases we'll pick a random length that doesn't exceed the maximum. Release note: None colexechash: remove some dead code This commit takes advantage of the observation that `probeSel` argument in some templated function calls is never `nil` and removes some code that was present for the case when it is `nil`. Release note: None colexechash: combine two conditionals into one in distinct mode Previously, we had the code of the form ``` if keyID != 0 { ... } if keyID == 0 { ... } ``` being generated for the distinct mode. This commit adjusts the template to generate ``` if keyID != 0 { ... } else { ... } ``` instead. Release note: None build: add to nightly and latest tag values Previously, it was unclear if a nightly build was generated from a release branch or a custom branch. This commit clarifies that by incorporating `` into both the nightly tag and latest tag values. Specifically: - Nightly tag - Before: `v21.1.12-110-g697d71136a` - After : `release-21.1-v21.1.12-110-g697d71136a` - Latest tag - Before: `latest-v21.1-build` - After : `latest-release-21.1-build` - Latest qualified tag - Before: `latest-v21.1-qualified-build` - After : `latest-release-21.1-qualified-build` Release note: None cdc: Allow webhook sink to provide client certificates to the remote webhook server Release note (enterprise change): Client certificates may now be provided for the webhook changefeed sink. sql: add regression tests inserting decimals in scientific notation Implementation of assignment casts fixed incorrect behavior when inserting literal values given in scientific notation into `DECIMAL` columns. This commit adds regression tests for this issue. Informs #59489 Release note (bug fix): Previously, the scale of a `DECIMAL` column was not enforced when values given in scientific notation (e.g., `6e3`) where inserted into the column. This bug has been fixed. tree,parser: add support for ON CONFLICT ON CONSTRAINT This commit adds support to the parser for the ON CONFLICT ON CONSTRAINT form of INSERT ON CONFLICT. Release note: None sql: implement ON CONFLICT ON CONSTRAINT This commit adds support for the ON CONFLICT ON CONSTRAINT form of INSERT ON CONFLICT, which allows users to explicitly specify "arbiter indexes" for the conflict detection and resolution rather than using the ON CONFLICT (columns...) form, which infers the arbiter indexes instead. Release note (sql change): add support for the ON CONFLICT ON CONSTRAINT form of INSERT ON CONFLICT. This form is added for compatibility with PostgreSQL. It permits explicitly selecting an arbiter index for INSERT ON CONFLICT, rather than inferring one using a column list, which is the default behavior. sql: remove unused scanColumnsConfig field Release note: None sql: don't check column visibility when initializing scanNode This commit removes a left-over piece of code which verifies the visibility of scanNode columns. Execution is not the place to do this, these semantics are implemented in the optbuilder. Release note: None sql: simplify scanColumnsConfig This change removes two fields from `scanColumnsConfig`: - `addUnwantedAsHidden`: the last use of this was in the physical scrub, which has recently been.. scrubbed. - `visibility`: this was always set to `ScanVisibilityPublicAndNotPublic`. We also simplify various code that checked the visibility. Release note: None sql: remove index flags logic from scanNode This commit removes leftover logic related to index hints which is now defunct. Released note: None sql: remove privilege checks at scanNode init time This commit removes privilege checks when initializing a scanNode or a zig-zag join. This is no longer the business of this code (all checks happen in optbuilder). Release note: None sql: clean up unnamed struct in scanColumnsConfig This commit replaces an unnamed struct with two fields. Release note: None execinfrapb: remove ScanVisibility This commit removes the `ScanVisibility` type and all `Visibility` fields from processor specs. The planner now always expects that the "internal schema" contains both public and non-public columns. Release note: None deps: bump apache/thrift Release note: None streamingccl/streampb: ensure there's at least one go source file When the directory is empty (prior to deriving .pb.go from .proto), the go compiler has no package name for that directory. This prevents the execution of `go mod tidy` and `make vendor_rebuild`, and even prevents the generation of the `.pb.go` file, resulting in a catch-22. Release note: None jobs,backup,vendor: switch from gorhill/cronexpr to robfig/cron gorhill/cronexpr is abandoned, and we recently found a known panic that was crashing nodes that upstream considered known behavior when an invalid expression was provided. While robfig/cron is a full scheduled job execution framework, we can use its parser separately as well, which is what this change does. Release note (bug fix): Certain malformed backup schedule expressions no longer cause the node to crash. Release note (backward-incompatible change): Non-standard cron expressions that specify seconds or year fields are no longer supported. backup: use correct Context in restore workers Previously some of the workers, which are called by ctxgroup goroutines, were using RestoreDataProcessor.Ctx, instead of the child context that the group created, which, critically, is cancelled if any group task fails. This could mean one worker in the group fails and stops draining a channel and returns an error to the group, which cancels its context, but another worker trying to write to that channel hangs if it is not checking the passed, now cancelled context. Release note (bug fix): fix a case where a RESTORE job could hang if it encountered an error when ingesting restored data. --- .bazelrc | 2 + .gitignore | 1 - AUTHORS | 5 + BUILD.bazel | 6 +- DEPS.bzl | 118 +- Makefile | 2 +- WORKSPACE | 8 +- bazel-out | 3 + build/bazelutil/check.sh | 4 +- .../teamcity-make-and-publish-build.sh | 5 +- build/release/teamcity-mark-build.sh | 4 +- .../nightlies/pebble_nightly_common.sh} | 33 +- .../pebble_nightly_write_throughput.sh | 16 + .../pebble_nightly_write_throughput_impl.sh} | 9 +- .../nightlies/pebble_nightly_ycsb.sh | 16 + .../nightlies/pebble_nightly_ycsb_impl.sh} | 8 +- .../bring-your-own-certs/client.yaml | 2 +- .../cockroachdb-statefulset.yaml | 2 +- cloud/kubernetes/client-secure.yaml | 2 +- cloud/kubernetes/cluster-init-secure.yaml | 2 +- cloud/kubernetes/cluster-init.yaml | 2 +- .../cockroachdb-statefulset-secure.yaml | 2 +- cloud/kubernetes/cockroachdb-statefulset.yaml | 2 +- .../kubernetes/multiregion/client-secure.yaml | 2 +- .../multiregion/cluster-init-secure.yaml | 2 +- .../cockroachdb-statefulset-secure.yaml | 2 +- .../cockroachdb-statefulset-secure-eks.yaml | 2 +- .../cockroachdb-daemonset-insecure.yaml | 2 +- .../cockroachdb-daemonset-secure.yaml | 2 +- .../cockroachdb-statefulset-insecure.yaml | 2 +- .../cockroachdb-statefulset-secure.yaml | 2 +- cloud/kubernetes/v1.6/client-secure.yaml | 2 +- .../kubernetes/v1.6/cluster-init-secure.yaml | 2 +- cloud/kubernetes/v1.6/cluster-init.yaml | 2 +- .../v1.6/cockroachdb-statefulset-secure.yaml | 2 +- .../v1.6/cockroachdb-statefulset.yaml | 2 +- cloud/kubernetes/v1.7/client-secure.yaml | 2 +- .../kubernetes/v1.7/cluster-init-secure.yaml | 2 +- cloud/kubernetes/v1.7/cluster-init.yaml | 2 +- .../v1.7/cockroachdb-statefulset-secure.yaml | 2 +- .../v1.7/cockroachdb-statefulset.yaml | 2 +- dev | 8 +- docs/RFCS/20211203_session_revival_token.md | 168 + docs/generated/http/BUILD.bazel | 1 + docs/generated/redact_safe.md | 11 +- .../settings/settings-for-tenants.txt | 2 + docs/generated/settings/settings.html | 3 + docs/generated/sql/bnf/create_view_stmt.bnf | 8 +- docs/generated/sql/bnf/on_conflict.bnf | 2 + docs/generated/sql/bnf/stmt_block.bnf | 14 +- docs/generated/sql/functions.md | 10 +- go.mod | 31 +- go.sum | 64 +- pkg/BUILD.bazel | 10 +- pkg/bench/bench_test.go | 8 +- pkg/ccl/backupccl/BUILD.bazel | 9 +- pkg/ccl/backupccl/backup_destination.go | 56 +- pkg/ccl/backupccl/backup_job.go | 83 +- pkg/ccl/backupccl/backup_planning.go | 16 +- pkg/ccl/backupccl/backup_test.go | 19 +- pkg/ccl/backupccl/bench_covering_test.go | 48 +- pkg/ccl/backupccl/create_scheduled_backup.go | 10 +- .../backupccl/create_scheduled_backup_test.go | 6 +- pkg/ccl/backupccl/helpers_test.go | 4 +- pkg/ccl/backupccl/import_spans_test.go | 299 - pkg/ccl/backupccl/key_rewriter_test.go | 8 +- pkg/ccl/backupccl/manifest_handling.go | 192 +- pkg/ccl/backupccl/restore_data_processor.go | 44 +- .../backupccl/restore_data_processor_test.go | 4 +- pkg/ccl/backupccl/restore_job.go | 45 +- pkg/ccl/backupccl/restore_planning.go | 25 +- pkg/ccl/backupccl/restore_span_covering.go | 235 +- .../backupccl/restore_span_covering_test.go | 12 + pkg/ccl/backupccl/show.go | 18 +- pkg/ccl/backupccl/show_test.go | 4 +- .../backupccl/split_and_scatter_processor.go | 2 +- pkg/ccl/backupccl/targets.go | 14 +- pkg/ccl/changefeedccl/BUILD.bazel | 5 +- pkg/ccl/changefeedccl/avro.go | 16 +- pkg/ccl/changefeedccl/avro_test.go | 2 +- .../cdctest/mock_webhook_sink.go | 17 + pkg/ccl/changefeedccl/cdctest/tls_util.go | 50 + .../changefeedccl/changefeed_processors.go | 2 +- pkg/ccl/changefeedccl/changefeed_test.go | 14 +- pkg/ccl/changefeedccl/encoder.go | 2 +- pkg/ccl/changefeedccl/helpers_test.go | 13 +- pkg/ccl/changefeedccl/kvevent/BUILD.bazel | 2 +- .../kvevent/blocking_buffer_test.go | 4 +- pkg/ccl/changefeedccl/kvfeed/BUILD.bazel | 2 +- pkg/ccl/changefeedccl/kvfeed/kv_feed_test.go | 4 +- pkg/ccl/changefeedccl/rowfetcher_cache.go | 5 +- .../schemafeed/schematestutils/BUILD.bazel | 1 + .../schematestutils/schema_test_utils.go | 7 +- pkg/ccl/changefeedccl/sink.go | 2 +- pkg/ccl/changefeedccl/sink_webhook.go | 22 + pkg/ccl/changefeedccl/sink_webhook_test.go | 28 + pkg/ccl/changefeedccl/testfeed_test.go | 40 +- pkg/ccl/cliccl/BUILD.bazel | 3 +- pkg/ccl/cliccl/debug_backup.go | 12 +- pkg/ccl/importccl/BUILD.bazel | 3 +- pkg/ccl/importccl/exportcsv.go | 2 +- pkg/ccl/importccl/exportcsv_test.go | 107 +- pkg/ccl/importccl/exportparquet.go | 64 +- pkg/ccl/importccl/import_planning.go | 3 +- pkg/ccl/importccl/import_processor.go | 19 + pkg/ccl/importccl/import_stmt_test.go | 20 + pkg/ccl/importccl/import_table_creation.go | 9 +- pkg/ccl/importccl/read_import_base.go | 19 +- pkg/ccl/importccl/read_import_pgdump.go | 20 +- pkg/ccl/importccl/read_import_workload.go | 6 +- .../testdata/boundedstaleness/single_row | 2 +- pkg/ccl/kvccl/kvtenantccl/connector_test.go | 6 + .../testdata/logic_test/alter_table_locality | 2 +- .../logictestccl/testdata/logic_test/as_of | 25 +- .../testdata/logic_test/multi_region | 4 +- .../logic_test/multi_region_zone_configs | 2 +- .../testdata/logic_test/regional_by_row | 4 +- .../logic_test/zone_config_secondary_tenants | 5 +- .../seed_tenant_span_configs_external_test.go | 36 +- pkg/ccl/multiregionccl/BUILD.bazel | 2 +- pkg/ccl/multiregionccl/datadriven_test.go | 2 +- pkg/ccl/multiregionccl/multiregion.go | 14 +- pkg/ccl/multiregionccl/roundtrips_test.go | 32 +- pkg/ccl/partitionccl/BUILD.bazel | 2 +- pkg/ccl/partitionccl/partition.go | 9 +- pkg/ccl/partitionccl/partition_test.go | 3 +- pkg/ccl/serverccl/BUILD.bazel | 1 - pkg/ccl/serverccl/role_authentication_test.go | 5 +- pkg/ccl/serverccl/server_sql_test.go | 6 +- pkg/ccl/serverccl/statusccl/BUILD.bazel | 1 + .../serverccl/statusccl/tenant_status_test.go | 122 +- .../spanconfigcomparedccl/datadriven_test.go | 8 +- .../datadriven_test.go | 8 +- .../datadriven_test.go | 4 +- .../testdata/tenant/misc | 26 +- pkg/ccl/sqlproxyccl/proxy_handler_test.go | 31 +- pkg/ccl/streamingccl/streamclient/BUILD.bazel | 11 + pkg/ccl/streamingccl/streamclient/client.go | 49 +- .../streamingccl/streamclient/client_test.go | 52 +- .../cockroach_sinkless_replication_client.go | 95 +- ...kroach_sinkless_replication_client_test.go | 31 +- .../streamclient/partitioned_stream_client.go | 269 + .../partitioned_stream_client_test.go | 166 + .../streamclient/random_stream_client.go | 68 +- pkg/ccl/streamingccl/streamingest/BUILD.bazel | 2 + .../stream_ingestion_frontier_processor.go | 2 +- ...tream_ingestion_frontier_processor_test.go | 3 + .../stream_ingestion_processor.go | 34 +- .../stream_ingestion_processor_test.go | 52 +- .../streamingest/stream_ingestion_test.go | 3 + pkg/ccl/streamingccl/streampb/BUILD.bazel | 3 + pkg/ccl/streamingccl/streampb/empty.go | 11 + pkg/ccl/streamingccl/streampb/stream.proto | 42 + .../streamingccl/streamproducer/BUILD.bazel | 1 + .../streamproducer/producer_job_test.go | 9 +- .../streamproducer/replication_manager.go | 11 +- .../replication_stream_planning.go | 54 + .../streamproducer/replication_stream_test.go | 36 +- .../streamproducer/stream_lifetime.go | 21 +- pkg/cli/BUILD.bazel | 2 +- pkg/cli/cert.go | 1 + pkg/cli/debug_recover_loss_of_quorum.go | 5 +- pkg/cli/democluster/demo_cluster.go | 5 + pkg/cli/doctor.go | 2 +- pkg/cli/gen.go | 4 +- pkg/cli/mt.go | 1 + pkg/cli/mt_cert.go | 30 + pkg/clusterversion/cockroach_versions.go | 2 +- pkg/cmd/cmp-sql/BUILD.bazel | 2 +- pkg/cmd/cmp-sql/main.go | 2 +- pkg/cmd/cmpconn/BUILD.bazel | 4 +- pkg/cmd/cmpconn/compare.go | 6 +- pkg/cmd/cmpconn/compare_test.go | 2 +- pkg/cmd/dev/bench.go | 53 +- pkg/cmd/dev/build.go | 116 +- pkg/cmd/dev/test.go | 18 +- pkg/cmd/dev/testdata/build.txt | 2 +- pkg/cmd/dev/testdata/generate.txt | 2 +- pkg/cmd/dev/testdata/recording/build.txt | 110 +- pkg/cmd/dev/testdata/recording/generate.txt | 110 +- pkg/cmd/dev/util.go | 8 + pkg/cmd/roachtest/tests/pgjdbc_blocklist.go | 200 +- .../roachtest/tests/predecessor_version.go | 2 +- pkg/cmd/roachtest/tests/sequelize.go | 2 +- pkg/cmd/roachtest/tests/tlp.go | 2 +- pkg/cmd/teamcity-trigger/main.go | 6 +- pkg/col/coldata/BUILD.bazel | 3 +- pkg/col/coldata/native_types.go | 4 +- pkg/col/coldata/nulls.go | 4 +- pkg/col/coldata/nulls_test.go | 8 +- pkg/col/coldata/vec.eg.go | 2 +- pkg/col/coldata/vec.go | 6 +- pkg/col/coldata/vec_tmpl.go | 2 +- pkg/col/coldataext/BUILD.bazel | 3 +- pkg/col/coldataext/datum_vec.go | 13 +- pkg/col/colserde/BUILD.bazel | 6 +- pkg/col/colserde/record_batch_test.go | 8 +- pkg/col/typeconv/BUILD.bazel | 2 +- pkg/col/typeconv/typeconv.go | 2 +- pkg/jobs/BUILD.bazel | 7 +- pkg/jobs/executor_impl_test.go | 3 +- pkg/jobs/job_scheduler_test.go | 22 +- pkg/jobs/jobs.go | 15 +- pkg/jobs/jobs_test.go | 16 +- pkg/jobs/jobspb/BUILD.bazel | 1 + pkg/jobs/jobspb/jobs.proto | 39 +- pkg/jobs/scheduled_job.go | 7 +- pkg/keys/BUILD.bazel | 10 +- pkg/keys/commenttype_string.go | 28 + pkg/keys/constants.go | 27 +- pkg/keys/printer_test.go | 2 +- pkg/kv/BUILD.bazel | 2 +- pkg/kv/bulk/buffering_adder.go | 1 + pkg/kv/bulk/sst_batcher.go | 15 +- pkg/kv/bulk/sst_batcher_test.go | 9 +- pkg/kv/kvclient/kvcoord/BUILD.bazel | 1 + pkg/kv/kvclient/kvcoord/batch.go | 12 +- pkg/kv/kvclient/kvcoord/batch_test.go | 2 +- pkg/kv/kvclient/kvcoord/dist_sender.go | 4 +- pkg/kv/kvclient/kvcoord/send_test.go | 6 + pkg/kv/kvclient/kvcoord/transport_test.go | 6 + pkg/kv/kvclient/kvcoord/truncate_test.go | 2 +- pkg/kv/kvclient/kvstreamer/BUILD.bazel | 56 + .../kvstreamer/avg_response_estimator.go | 42 + .../kvstreamer/avg_response_estimator_test.go | 56 + pkg/kv/kvclient/kvstreamer/budget.go | 126 + pkg/kv/kvclient/kvstreamer/main_test.go | 31 + pkg/kv/kvclient/kvstreamer/streamer.go | 1175 ++ pkg/kv/kvclient/kvstreamer/streamer_test.go | 271 + pkg/kv/kvclient/rangecache/range_cache.go | 2 +- pkg/kv/kvserver/BUILD.bazel | 8 +- pkg/kv/kvserver/batcheval/cmd_clear_range.go | 13 +- .../batcheval/cmd_clear_range_test.go | 19 +- pkg/kv/kvserver/client_raft_test.go | 22 +- .../client_replica_circuit_breaker_test.go | 588 + pkg/kv/kvserver/client_replica_test.go | 4 +- pkg/kv/kvserver/client_spanconfigs_test.go | 2 +- pkg/kv/kvserver/client_split_test.go | 4 +- pkg/kv/kvserver/closed_timestamp_test.go | 6 +- pkg/kv/kvserver/helpers_test.go | 5 + pkg/kv/kvserver/kvserverbase/bulk_adder.go | 4 + pkg/kv/kvserver/kvserverpb/state.proto | 3 + pkg/kv/kvserver/liveness/BUILD.bazel | 4 - pkg/kv/kvserver/liveness/client_test.go | 15 +- pkg/kv/kvserver/loqrecovery/BUILD.bazel | 1 + pkg/kv/kvserver/loqrecovery/apply.go | 4 +- .../loqrecovery/loqrecoverypb/recovery.go | 12 + .../loqrecovery/loqrecoverypb/recovery.proto | 3 +- pkg/kv/kvserver/loqrecovery/plan.go | 514 +- .../kvserver/loqrecovery/recovery_env_test.go | 61 +- .../loqrecovery/testdata/invalid_input | 23 + .../loqrecovery/testdata/keyspace_coverage | 194 + .../loqrecovery/testdata/learners_lose | 47 +- .../testdata/max_applied_voter_wins | 221 + .../loqrecovery/testdata/max_store_voter_wins | 163 +- pkg/kv/kvserver/loqrecovery/utils.go | 68 +- pkg/kv/kvserver/mvcc_gc_queue_test.go | 6 +- pkg/kv/kvserver/queue.go | 4 +- pkg/kv/kvserver/queue_helpers_testutil.go | 16 +- pkg/kv/kvserver/queue_test.go | 2 +- pkg/kv/kvserver/replica.go | 109 +- pkg/kv/kvserver/replica_circuit_breaker.go | 229 + .../kvserver/replica_circuit_breaker_test.go | 42 + pkg/kv/kvserver/replica_closedts_test.go | 2 +- pkg/kv/kvserver/replica_command.go | 2 +- pkg/kv/kvserver/replica_init.go | 2 + pkg/kv/kvserver/replica_proposal.go | 4 + pkg/kv/kvserver/replica_proposal_buf.go | 3 + pkg/kv/kvserver/replica_raft.go | 83 +- pkg/kv/kvserver/replica_raft_quiesce.go | 16 +- pkg/kv/kvserver/replica_raftstorage.go | 2 +- pkg/kv/kvserver/replica_range_lease.go | 11 +- pkg/kv/kvserver/replica_rangefeed.go | 2 +- pkg/kv/kvserver/replica_send.go | 82 +- pkg/kv/kvserver/replica_test.go | 47 +- pkg/kv/kvserver/replica_write.go | 80 - pkg/kv/kvserver/store.go | 17 +- pkg/kv/kvserver/store_raft.go | 2 +- pkg/kv/kvserver/store_rebalancer.go | 24 +- pkg/kv/kvserver/store_rebalancer_test.go | 62 +- .../testdata/replica_unavailable_error.txt | 3 + pkg/kv/kvserver/testing_knobs.go | 4 + pkg/kv/util.go | 2 +- pkg/migration/migrations/BUILD.bazel | 11 + pkg/migration/migrations/migrations.go | 7 +- .../migrations/public_schema_migration.go | 225 + .../public_schema_migration_external_test.go | 214 + pkg/roachpb/BUILD.bazel | 7 +- pkg/roachpb/api.go | 16 + pkg/roachpb/api.proto | 58 + pkg/roachpb/data.go | 8 +- pkg/roachpb/data_test.go | 2 +- pkg/roachpb/errors.go | 22 +- pkg/roachpb/errors.proto | 7 + pkg/roachpb/mocks_generated.go | 20 + pkg/roachpb/string_test.go | 36 - pkg/rpc/context.go | 46 + pkg/rpc/context_test.go | 6 + pkg/rpc/nodedialer/nodedialer_test.go | 6 + pkg/security/certificate_loader.go | 9 + pkg/security/certificate_manager.go | 48 +- pkg/security/certs.go | 53 + pkg/security/certs_tenant_test.go | 17 + pkg/security/certs_test.go | 15 +- pkg/security/password.go | 40 +- pkg/security/pem.go | 9 +- .../securitytest/test_certs/README.md | 9 +- .../test_certs/ca-client-tenant.crt | 34 +- .../test_certs/ca-client-tenant.key | 50 +- pkg/security/securitytest/test_certs/ca.crt | 34 +- pkg/security/securitytest/test_certs/ca.key | 50 +- .../test_certs/client-tenant.10.crt | 30 +- .../test_certs/client-tenant.10.key | 50 +- .../test_certs/client-tenant.11.crt | 38 +- .../test_certs/client-tenant.11.key | 50 +- .../test_certs/client-tenant.20.crt | 30 +- .../test_certs/client-tenant.20.key | 50 +- .../securitytest/test_certs/client.root.crt | 30 +- .../securitytest/test_certs/client.root.key | 50 +- .../test_certs/client.testuser.crt | 34 +- .../test_certs/client.testuser.key | 50 +- .../test_certs/client.testuser2.crt | 34 +- .../test_certs/client.testuser2.key | 50 +- pkg/security/securitytest/test_certs/node.crt | 34 +- pkg/security/securitytest/test_certs/node.key | 50 +- .../securitytest/test_certs/regenerate.sh | 1 + .../test_certs/tenant-signing.10.crt | 9 + .../test_certs/tenant-signing.10.key | 3 + .../test_certs/tenant-signing.11.crt | 9 + .../test_certs/tenant-signing.11.key | 3 + .../test_certs/tenant-signing.20.crt | 9 + .../test_certs/tenant-signing.20.key | 3 + pkg/security/x509.go | 32 + pkg/server/BUILD.bazel | 3 +- pkg/server/admin.go | 73 +- pkg/server/admin_cluster_test.go | 9 +- pkg/server/admin_test.go | 4 +- pkg/server/authentication_test.go | 3 +- pkg/server/combined_statement_stats.go | 12 +- pkg/server/diagnostics/BUILD.bazel | 1 + pkg/server/diagnostics/reporter.go | 4 +- pkg/server/index_usage_stats.go | 12 +- pkg/server/node.go | 7 + pkg/server/server_sql.go | 7 +- pkg/server/settingswatcher/BUILD.bazel | 4 +- pkg/server/settingswatcher/row_decoder.go | 77 +- .../settingswatcher/settings_watcher.go | 27 +- .../settings_watcher_external_test.go | 25 +- pkg/server/statement_diagnostics_requests.go | 8 +- pkg/server/statements.go | 2 +- pkg/server/status.go | 39 +- pkg/server/status_test.go | 135 +- pkg/server/tenant_status.go | 12 +- pkg/settings/BUILD.bazel | 2 + pkg/settings/bool.go | 8 +- pkg/settings/common.go | 87 +- pkg/settings/duration.go | 10 +- pkg/settings/float.go | 8 +- pkg/settings/int.go | 10 +- pkg/settings/registry.go | 43 +- pkg/settings/setting.go | 12 +- pkg/settings/settings_test.go | 94 +- pkg/settings/string.go | 4 +- pkg/settings/updater.go | 4 + pkg/settings/values.go | 132 +- pkg/settings/version.go | 6 +- pkg/spanconfig/spanconfig.go | 21 +- pkg/spanconfig/spanconfigjob/job.go | 6 +- .../spanconfigkvsubscriber/BUILD.bazel | 4 +- .../span_config_decoder.go | 66 +- pkg/spanconfig/spanconfigmanager/manager.go | 16 +- .../spanconfigmanager/manager_test.go | 6 +- .../spanconfigreconciler/BUILD.bazel | 1 + .../spanconfigreconciler/reconciler.go | 33 +- .../spanconfigsqlwatcher/zonesdecoder.go | 2 +- pkg/spanconfig/spanconfigstore/store.go | 2 +- .../spanconfigtestcluster/cluster.go | 25 +- .../spanconfigtestcluster/tenant_state.go | 10 +- pkg/sql/BUILD.bazel | 12 +- pkg/sql/alter_database.go | 17 +- pkg/sql/alter_primary_key.go | 31 +- pkg/sql/alter_table.go | 5 +- pkg/sql/alter_table_locality.go | 15 +- pkg/sql/as_of_test.go | 2 +- pkg/sql/authorization.go | 24 + pkg/sql/backfill.go | 55 +- pkg/sql/backfill/backfill.go | 18 +- pkg/sql/backfill_test.go | 184 + pkg/sql/buffer_util.go | 2 +- pkg/sql/catalog/BUILD.bazel | 2 + pkg/sql/catalog/catformat/BUILD.bazel | 1 + pkg/sql/catalog/catformat/index_test.go | 3 +- pkg/sql/catalog/catpb/BUILD.bazel | 37 + pkg/sql/catalog/catpb/catalog.proto | 125 + pkg/sql/catalog/catpb/constraint.go | 38 + pkg/sql/catalog/catpb/doc.go | 13 + .../catalog/{descpb => catpb}/multiregion.go | 15 +- pkg/sql/catalog/colinfo/BUILD.bazel | 1 + pkg/sql/catalog/colinfo/col_type_info.go | 47 - pkg/sql/catalog/colinfo/system_columns.go | 11 +- pkg/sql/catalog/dbdesc/BUILD.bazel | 1 + pkg/sql/catalog/dbdesc/database_desc.go | 3 +- pkg/sql/catalog/descpb/BUILD.bazel | 5 +- pkg/sql/catalog/descpb/constraint.go | 41 +- pkg/sql/catalog/descpb/structured.go | 21 +- pkg/sql/catalog/descpb/structured.proto | 128 +- pkg/sql/catalog/descriptor.go | 41 +- .../catalog/descs/dist_sql_type_resolver.go | 30 - pkg/sql/catalog/descs/leased_descriptors.go | 20 +- pkg/sql/catalog/descs/table.go | 20 + pkg/sql/catalog/multiregion/BUILD.bazel | 2 + pkg/sql/catalog/multiregion/region_config.go | 19 +- .../catalog/multiregion/region_config_test.go | 9 +- pkg/sql/catalog/multiregion/validate_table.go | 22 +- pkg/sql/catalog/systemschema/BUILD.bazel | 1 + pkg/sql/catalog/systemschema/system.go | 15 +- pkg/sql/catalog/table_elements.go | 31 +- pkg/sql/catalog/tabledesc/BUILD.bazel | 3 +- pkg/sql/catalog/tabledesc/column.go | 74 +- pkg/sql/catalog/tabledesc/index.go | 3 +- pkg/sql/catalog/tabledesc/index_test.go | 3 +- pkg/sql/catalog/tabledesc/safe_format_test.go | 10 +- pkg/sql/catalog/tabledesc/structured.go | 38 +- pkg/sql/catalog/tabledesc/table.go | 9 +- pkg/sql/catalog/tabledesc/table_desc.go | 73 + pkg/sql/catalog/tabledesc/validate.go | 9 +- pkg/sql/catalog/tabledesc/validate_test.go | 23 +- pkg/sql/catalog/typedesc/BUILD.bazel | 1 + .../typedesc/table_implicit_record_type.go | 11 +- pkg/sql/catalog/typedesc/type_desc.go | 29 +- pkg/sql/colconv/vec_to_datum.eg.go | 32 +- pkg/sql/colconv/vec_to_datum_tmpl.go | 9 +- pkg/sql/colencoding/BUILD.bazel | 8 +- pkg/sql/colencoding/key_encoding.go | 21 +- pkg/sql/colencoding/value_encoding.go | 6 +- pkg/sql/colencoding/value_encoding_test.go | 7 +- pkg/sql/colexec/BUILD.bazel | 6 +- pkg/sql/colexec/aggregators_test.go | 2 +- pkg/sql/colexec/aggregators_util.go | 13 +- pkg/sql/colexec/case_test.go | 2 +- pkg/sql/colexec/colbuilder/execplan.go | 44 +- pkg/sql/colexec/colexecagg/BUILD.bazel | 3 +- pkg/sql/colexec/colexecagg/aggregate_funcs.go | 2 +- .../colexecagg/any_not_null_agg_tmpl.go | 2 +- pkg/sql/colexec/colexecagg/avg_agg_tmpl.go | 25 +- .../colexecagg/hash_any_not_null_agg.eg.go | 6 +- pkg/sql/colexec/colexecagg/hash_avg_agg.eg.go | 61 +- .../colexec/colexecagg/hash_min_max_agg.eg.go | 10 +- pkg/sql/colexec/colexecagg/hash_sum_agg.eg.go | 61 +- .../colexec/colexecagg/hash_sum_int_agg.eg.go | 2 +- .../colexec/colexecagg/min_max_agg_tmpl.go | 2 +- .../colexecagg/ordered_any_not_null_agg.eg.go | 6 +- .../colexec/colexecagg/ordered_avg_agg.eg.go | 85 +- .../colexecagg/ordered_min_max_agg.eg.go | 10 +- .../colexec/colexecagg/ordered_sum_agg.eg.go | 85 +- .../colexecagg/ordered_sum_int_agg.eg.go | 2 +- pkg/sql/colexec/colexecagg/sum_agg_tmpl.go | 27 +- .../colexec/colexecagg/window_avg_agg.eg.go | 110 +- .../colexecagg/window_min_max_agg.eg.go | 10 +- .../colexec/colexecagg/window_sum_agg.eg.go | 110 +- .../colexecagg/window_sum_int_agg.eg.go | 2 +- pkg/sql/colexec/colexecargs/BUILD.bazel | 1 + pkg/sql/colexec/colexecbase/BUILD.bazel | 3 +- pkg/sql/colexec/colexecbase/cast.eg.go | 69 +- pkg/sql/colexec/colexecbase/cast_tmpl.go | 19 +- pkg/sql/colexec/colexecbase/const.eg.go | 2 +- pkg/sql/colexec/colexecbase/const_tmpl.go | 2 +- pkg/sql/colexec/colexecbase/distinct.eg.go | 2 +- pkg/sql/colexec/colexeccmp/BUILD.bazel | 1 + pkg/sql/colexec/colexechash/BUILD.bazel | 6 +- pkg/sql/colexec/colexechash/hash_utils.eg.go | 135 +- pkg/sql/colexec/colexechash/hash_utils.go | 10 +- .../colexec/colexechash/hash_utils_test.go | 10 +- .../colexec/colexechash/hash_utils_tmpl.go | 24 +- pkg/sql/colexec/colexechash/hashtable.go | 11 +- .../colexechash/hashtable_distinct.eg.go | 15199 ++++++---------- pkg/sql/colexec/colexechash/hashtable_tmpl.go | 129 +- pkg/sql/colexec/colexecjoin/BUILD.bazel | 3 +- .../colexecjoin/mergejoiner_exceptall.eg.go | 2 +- .../colexecjoin/mergejoiner_fullouter.eg.go | 2 +- .../colexecjoin/mergejoiner_inner.eg.go | 2 +- .../mergejoiner_intersectall.eg.go | 2 +- .../colexecjoin/mergejoiner_leftanti.eg.go | 2 +- .../colexecjoin/mergejoiner_leftouter.eg.go | 2 +- .../colexecjoin/mergejoiner_leftsemi.eg.go | 2 +- .../colexecjoin/mergejoiner_rightanti.eg.go | 2 +- .../colexecjoin/mergejoiner_rightouter.eg.go | 2 +- .../colexecjoin/mergejoiner_rightsemi.eg.go | 2 +- .../colexec/colexecjoin/mergejoiner_tmpl.go | 2 +- pkg/sql/colexec/colexecproj/BUILD.bazel | 4 +- .../colexecproj/proj_const_left_ops.eg.go | 4069 ++--- .../colexecproj/proj_const_ops_tmpl.go | 29 +- .../colexecproj/proj_const_right_ops.eg.go | 6325 +++---- .../colexec/colexecproj/proj_like_ops.eg.go | 64 +- .../colexecproj/proj_non_const_ops.eg.go | 5889 ++---- .../colexecproj/proj_non_const_ops_tmpl.go | 42 +- .../colexecproj/projection_ops_test.go | 3 +- pkg/sql/colexec/colexecsel/BUILD.bazel | 3 +- pkg/sql/colexec/colexecsel/sel_like_ops.eg.go | 48 - .../colexec/colexecsel/selection_ops.eg.go | 4151 +---- .../colexec/colexecsel/selection_ops_tmpl.go | 25 +- pkg/sql/colexec/colexecspan/BUILD.bazel | 4 +- .../colexec/colexecspan/span_assembler.eg.go | 2 +- .../colexecspan/span_assembler_tmpl.go | 2 +- .../colexec/colexecspan/span_encoder.eg.go | 20 +- .../colexec/colexecspan/span_encoder_tmpl.go | 4 +- pkg/sql/colexec/colexectestutils/BUILD.bazel | 6 +- pkg/sql/colexec/colexectestutils/utils.go | 37 +- pkg/sql/colexec/colexecutils/BUILD.bazel | 1 + pkg/sql/colexec/colexecwindow/BUILD.bazel | 5 +- .../colexecwindow/min_max_removable_agg.eg.go | 2 +- .../min_max_removable_agg_tmpl.go | 2 +- .../colexecwindow/range_offset_handler.eg.go | 630 +- .../range_offset_handler_tmpl.go | 31 +- .../colexec/colexecwindow/window_framer.eg.go | 5 +- .../colexecwindow/window_framer_tmpl.go | 5 +- .../colexecwindow/window_functions_test.go | 2 +- pkg/sql/colexec/columnarizer.go | 3 +- pkg/sql/colexec/distinct_test.go | 22 + pkg/sql/colexec/execgen/BUILD.bazel | 3 +- .../execgen/cmd/execgen/avg_agg_gen.go | 4 - .../execgen/cmd/execgen/cast_gen_util.go | 4 +- .../execgen/cmd/execgen/distinct_gen.go | 2 +- .../execgen/cmd/execgen/hash_utils_gen.go | 2 +- .../execgen/cmd/execgen/hashtable_gen.go | 19 +- .../execgen/cmd/execgen/overloads_base.go | 24 +- .../execgen/cmd/execgen/overloads_bin.go | 16 +- .../execgen/cmd/execgen/overloads_cmp.go | 16 +- .../execgen/cmd/execgen/overloads_hash.go | 61 +- .../execgen/cmd/execgen/span_encoder_gen.go | 2 +- .../execgen/cmd/execgen/sum_agg_gen.go | 6 - .../execgen/cmd/execgen/vec_to_datum_gen.go | 14 +- ...verloads_util.go => overloads_bin_util.go} | 20 +- pkg/sql/colexec/hash_aggregator.go | 4 +- pkg/sql/colexec/hashjoiner_test.go | 2 +- pkg/sql/colexec/joiner_utils_test.go | 2 +- pkg/sql/colexec/not_expr_ops.go | 208 + pkg/sql/colexec/not_expr_ops_test.go | 164 + pkg/sql/colexec/ordered_aggregator.go | 4 +- pkg/sql/colexec/rowstovec.eg.go | 4 +- pkg/sql/colexec/rowstovec_test.go | 2 +- pkg/sql/colexec/rowstovec_tmpl.go | 6 +- pkg/sql/colexec/select_in.eg.go | 2 +- pkg/sql/colexec/select_in_tmpl.go | 2 +- pkg/sql/colexec/sort_partitioner.eg.go | 2 +- pkg/sql/colexec/tuple_proj_op.go | 19 +- pkg/sql/colexec/types_integration_test.go | 2 +- pkg/sql/colexec/values.go | 3 +- pkg/sql/colexecop/BUILD.bazel | 1 + pkg/sql/colfetcher/BUILD.bazel | 5 +- pkg/sql/colfetcher/cfetcher.go | 112 +- pkg/sql/colfetcher/cfetcher_setup.go | 9 +- pkg/sql/colfetcher/colbatch_scan.go | 10 +- pkg/sql/colfetcher/index_join.go | 106 +- pkg/sql/colflow/BUILD.bazel | 1 + pkg/sql/colflow/explain_vec.go | 2 +- pkg/sql/colflow/vectorized_flow.go | 8 +- pkg/sql/colmem/allocator.go | 18 +- pkg/sql/comment_on_constraint.go | 34 +- pkg/sql/comment_on_database.go | 44 +- pkg/sql/comment_on_index.go | 62 +- pkg/sql/comment_on_schema.go | 28 +- pkg/sql/comment_on_table.go | 35 +- pkg/sql/commenter/BUILD.bazel | 23 + pkg/sql/commenter/comment_updater.go | 178 + pkg/sql/commenter/comment_updater_factory.go | 53 + pkg/sql/conn_executor.go | 5 +- pkg/sql/conn_executor_exec.go | 44 +- pkg/sql/conn_io.go | 30 + pkg/sql/copy.go | 3 +- pkg/sql/copy_file_upload.go | 3 +- pkg/sql/crdb_internal.go | 27 +- pkg/sql/create_index.go | 73 +- pkg/sql/create_role.go | 2 +- pkg/sql/create_schema.go | 49 +- pkg/sql/create_table.go | 49 +- pkg/sql/delete.go | 7 +- pkg/sql/delete_preserving_index_test.go | 2 +- pkg/sql/delete_range.go | 3 +- pkg/sql/descriptor.go | 9 +- pkg/sql/distsql/columnar_operators_test.go | 8 +- pkg/sql/distsql/server.go | 27 +- pkg/sql/distsql_physical_planner.go | 98 +- pkg/sql/distsql_plan_scrub_physical.go | 53 - pkg/sql/distsql_plan_stats.go | 2 +- pkg/sql/distsql_running.go | 12 +- pkg/sql/distsql_spec_exec_factory.go | 14 +- pkg/sql/drop_index.go | 9 +- pkg/sql/drop_schema.go | 2 +- pkg/sql/event_log.go | 20 +- pkg/sql/exec_factory_util.go | 27 +- pkg/sql/exec_util.go | 18 +- pkg/sql/execinfra/BUILD.bazel | 6 +- pkg/sql/execinfra/flow_context.go | 60 +- pkg/sql/execinfra/processorsbase.go | 2 +- pkg/sql/execinfra/server_config.go | 3 + pkg/sql/execinfra/testutils.go | 3 +- pkg/sql/execinfra/utils.go | 6 +- pkg/sql/execinfra/version.go | 13 +- pkg/sql/execinfrapb/BUILD.bazel | 4 +- pkg/sql/execinfrapb/expr.go | 10 +- pkg/sql/execinfrapb/flow_diagram.go | 3 +- pkg/sql/execinfrapb/processors.go | 48 +- pkg/sql/execinfrapb/processors_bulk_io.proto | 3 +- pkg/sql/execinfrapb/processors_sql.proto | 43 +- pkg/sql/explain_ddl.go | 5 +- pkg/sql/explain_vec.go | 5 +- pkg/sql/faketreeeval/evalctx.go | 4 +- pkg/sql/flowinfra/BUILD.bazel | 2 + pkg/sql/flowinfra/cluster_test.go | 4 +- pkg/sql/flowinfra/flow.go | 6 +- pkg/sql/flowinfra/stream_encoder.go | 3 +- pkg/sql/indexbackfiller_test.go | 4 +- pkg/sql/information_schema.go | 13 +- pkg/sql/insert_fast_path.go | 2 +- pkg/sql/internal_test.go | 4 + pkg/sql/inverted/expression.go | 2 +- pkg/sql/job_exec_context.go | 6 +- pkg/sql/job_exec_context_test_util.go | 4 +- pkg/sql/logictest/BUILD.bazel | 1 + pkg/sql/logictest/logic.go | 33 +- .../testdata/logic_test/alter_primary_key | 21 +- .../auto_span_config_reconciliation_job | 2 +- pkg/sql/logictest/testdata/logic_test/cast | 627 +- .../testdata/logic_test/cluster_settings | 18 + .../testdata/logic_test/create_table | 3 +- .../testdata/logic_test/distsql_enum | 60 +- .../logictest/testdata/logic_test/drop_index | 2 +- .../testdata/logic_test/hash_sharded_index | 174 +- .../testdata/logic_test/information_schema | 2 + pkg/sql/logictest/testdata/logic_test/insert | 24 + .../testdata/logic_test/new_schema_changer | 83 +- .../logictest/testdata/logic_test/pg_catalog | 6 + .../testdata/logic_test/privilege_builtins | 2 +- pkg/sql/logictest/testdata/logic_test/role | 19 +- pkg/sql/logictest/testdata/logic_test/schema | 2 +- pkg/sql/logictest/testdata/logic_test/scrub | 7 +- pkg/sql/logictest/testdata/logic_test/set | 36 + .../logictest/testdata/logic_test/show_source | 2 + pkg/sql/logictest/testdata/logic_test/upsert | 25 + .../logictest/testdata/logic_test/zone_config | 2 +- .../logic_test/zone_config_system_tenant | 2 +- pkg/sql/memsize/BUILD.bazel | 2 +- pkg/sql/memsize/constants.go | 2 +- pkg/sql/opt/constraint/constraint.go | 30 +- pkg/sql/opt/constraint/constraint_test.go | 49 + pkg/sql/opt/distribution/BUILD.bazel | 31 + pkg/sql/opt/distribution/distribution.go | 144 + pkg/sql/opt/distribution/distribution_test.go | 108 + pkg/sql/opt/exec/execbuilder/relational.go | 20 + .../opt/exec/execbuilder/testdata/distsql_agg | 2 + pkg/sql/opt/exec/execbuilder/testdata/enums | 10 + pkg/sql/opt/exec/execbuilder/testdata/explain | 76 +- .../exec/execbuilder/testdata/inverted_index | 12 + pkg/sql/opt/exec/execbuilder/testdata/stats | 14 + pkg/sql/opt/invertedexpr/geo_expression.go | 6 +- pkg/sql/opt/invertedidx/geo.go | 2 +- pkg/sql/opt/invertedidx/json_array.go | 2 +- pkg/sql/opt/memo/BUILD.bazel | 4 +- pkg/sql/opt/memo/expr_format.go | 9 + pkg/sql/opt/memo/group.go | 6 +- pkg/sql/opt/memo/interner.go | 11 +- pkg/sql/opt/memo/interner_test.go | 20 + pkg/sql/opt/memo/memo.go | 2 +- pkg/sql/opt/memo/testdata/memo | 2 +- pkg/sql/opt/norm/BUILD.bazel | 4 +- pkg/sql/opt/norm/general_funcs.go | 2 +- pkg/sql/opt/norm/groupby_funcs.go | 4 +- .../opt/norm/testdata/rules/fold_constants | 1 + pkg/sql/opt/ops/enforcer.opt | 12 + pkg/sql/opt/optbuilder/fk_cascade.go | 4 +- pkg/sql/opt/optbuilder/insert.go | 92 +- pkg/sql/opt/optbuilder/mutation_builder.go | 168 - .../optbuilder/mutation_builder_arbiter.go | 91 +- pkg/sql/opt/optbuilder/scope.go | 5 +- .../optbuilder/testdata/fk-on-update-cascade | 329 +- .../testdata/fk-on-update-set-default | 228 +- .../opt/optbuilder/testdata/partial-indexes | 6 + .../optbuilder/testdata/unique-checks-insert | 63 + .../optbuilder/testdata/unique-checks-upsert | 79 + pkg/sql/opt/optbuilder/testdata/upsert | 1559 +- pkg/sql/opt/optbuilder/update.go | 45 +- pkg/sql/opt/optgen/cmd/optgen/exprs_gen.go | 4 +- pkg/sql/opt/optgen/cmd/optgen/testdata/exprs | 4 +- pkg/sql/opt/optgen/exprgen/expr_gen.go | 2 +- pkg/sql/opt/ordering/BUILD.bazel | 1 + pkg/sql/opt/ordering/distribute.go | 34 + pkg/sql/opt/ordering/lookup_join.go | 11 +- pkg/sql/opt/ordering/ordering.go | 5 + pkg/sql/opt/props/BUILD.bazel | 2 +- pkg/sql/opt/props/histogram.go | 4 +- pkg/sql/opt/props/physical/BUILD.bazel | 17 +- pkg/sql/opt/props/physical/distribution.go | 240 + .../opt/props/physical/distribution_test.go | 201 + pkg/sql/opt/props/physical/provided.go | 29 +- pkg/sql/opt/props/physical/required.go | 16 +- pkg/sql/opt/xform/BUILD.bazel | 1 + pkg/sql/opt/xform/coster.go | 11 + pkg/sql/opt/xform/optimizer.go | 12 +- pkg/sql/opt/xform/physical_props.go | 10 +- pkg/sql/opt/xform/testdata/coster/zone | 157 +- pkg/sql/opt/xform/testdata/external/tpce | 20 +- .../opt/xform/testdata/external/tpce-no-stats | 20 +- pkg/sql/opt/xform/testdata/external/trading | 108 +- .../xform/testdata/external/trading-mutation | 124 +- .../opt/xform/testdata/physprops/distribution | 271 + pkg/sql/opt/xform/testdata/physprops/ordering | 112 +- pkg/sql/opt/xform/testdata/rules/groupby | 8 +- pkg/sql/opt/xform/testdata/rules/join | 171 +- pkg/sql/opt/xform/testdata/rules/join_order | 4 +- pkg/sql/opt/xform/testdata/rules/scan | 104 +- pkg/sql/opt/xform/testdata/rules/select | 14 +- pkg/sql/opt/xform/testdata/rules/set | 2 +- pkg/sql/opt_catalog.go | 17 +- pkg/sql/opt_exec_factory.go | 21 +- pkg/sql/parser/parse_test.go | 3 +- pkg/sql/parser/sql.y | 50 +- pkg/sql/parser/testdata/create_view | 16 + pkg/sql/parser/testdata/upsert | 16 + pkg/sql/partition.go | 2 +- pkg/sql/partition_utils.go | 4 +- pkg/sql/pg_catalog.go | 23 +- pkg/sql/pgwire/BUILD.bazel | 4 +- pkg/sql/pgwire/encoding_test.go | 2 +- pkg/sql/pgwire/testdata/pgtest/notice | 2 +- pkg/sql/pgwire/types.go | 5 +- pkg/sql/physicalplan/BUILD.bazel | 1 + pkg/sql/physicalplan/aggregator_funcs_test.go | 4 +- pkg/sql/planhook.go | 2 +- pkg/sql/planner.go | 15 +- pkg/sql/planner_test.go | 3 +- pkg/sql/privilege/privilege.go | 11 + pkg/sql/randgen/BUILD.bazel | 3 +- pkg/sql/randgen/datum.go | 10 +- pkg/sql/randgen/mutator.go | 7 +- pkg/sql/randgen/schema.go | 2 +- pkg/sql/randgen/type.go | 4 +- pkg/sql/region_util.go | 35 +- pkg/sql/region_util_test.go | 97 +- pkg/sql/resolver.go | 107 +- pkg/sql/roleoption/option_string.go | 6 +- pkg/sql/roleoption/role_option.go | 10 +- pkg/sql/row/BUILD.bazel | 9 +- pkg/sql/row/errors.go | 7 +- pkg/sql/row/fetcher.go | 287 +- pkg/sql/row/fetcher_mvcc_test.go | 6 +- pkg/sql/row/fetcher_test.go | 21 +- pkg/sql/row/helper.go | 5 +- pkg/sql/row/inserter.go | 9 +- pkg/sql/row/kv_batch_streamer.go | 215 + pkg/sql/row/kv_fetcher.go | 11 +- pkg/sql/row/row_converter.go | 3 +- pkg/sql/row/updater.go | 8 +- pkg/sql/row/writer.go | 6 +- pkg/sql/row_source_to_plan_node.go | 2 +- pkg/sql/rowcontainer/disk_row_container.go | 5 +- .../rowcontainer/disk_row_container_test.go | 6 +- pkg/sql/rowcontainer/hash_row_container.go | 4 +- .../rowcontainer/hash_row_container_test.go | 7 +- .../rowcontainer/numbered_row_container.go | 2 +- pkg/sql/rowcontainer/row_container.go | 6 +- pkg/sql/rowcontainer/row_container_test.go | 4 +- pkg/sql/rowenc/BUILD.bazel | 28 +- pkg/sql/rowenc/column_type_encoding.go | 1471 -- pkg/sql/rowenc/encoded_datum.go | 40 +- pkg/sql/rowenc/encoded_datum_test.go | 21 +- pkg/sql/rowenc/index_encoding.go | 43 +- pkg/sql/rowenc/index_encoding_test.go | 352 +- pkg/sql/rowenc/keyside/BUILD.bazel | 47 + pkg/sql/rowenc/keyside/array.go | 80 + pkg/sql/rowenc/keyside/decode.go | 284 + pkg/sql/rowenc/keyside/doc.go | 18 + pkg/sql/rowenc/keyside/encode.go | 177 + .../keyside_test.go} | 227 +- pkg/sql/rowenc/partition.go | 10 +- pkg/sql/rowenc/valueside/BUILD.bazel | 54 + pkg/sql/rowenc/valueside/array.go | 313 + pkg/sql/rowenc/valueside/array_test.go | 152 + pkg/sql/rowenc/valueside/decode.go | 275 + pkg/sql/rowenc/valueside/doc.go | 28 + pkg/sql/rowenc/valueside/encode.go | 120 + pkg/sql/rowenc/valueside/legacy.go | 363 + pkg/sql/rowenc/valueside/tuple.go | 60 + pkg/sql/rowenc/valueside/valueside_test.go | 332 + pkg/sql/rowexec/BUILD.bazel | 6 +- pkg/sql/rowexec/aggregator.go | 6 +- pkg/sql/rowexec/bulk_row_writer.go | 4 +- pkg/sql/rowexec/columnbackfiller.go | 2 +- pkg/sql/rowexec/distinct.go | 3 +- pkg/sql/rowexec/indexbackfiller.go | 2 +- pkg/sql/rowexec/inverted_filterer.go | 2 +- pkg/sql/rowexec/inverted_joiner.go | 25 +- pkg/sql/rowexec/joinerbase.go | 2 +- pkg/sql/rowexec/joinreader.go | 213 +- pkg/sql/rowexec/joinreader_test.go | 3 + pkg/sql/rowexec/processor_utils_test.go | 3 +- pkg/sql/rowexec/processors.go | 6 +- pkg/sql/rowexec/project_set.go | 2 +- pkg/sql/rowexec/project_set_test.go | 2 + pkg/sql/rowexec/rowfetcher.go | 10 +- pkg/sql/rowexec/sample_aggregator.go | 4 +- pkg/sql/rowexec/sample_aggregator_test.go | 2 +- pkg/sql/rowexec/sampler.go | 4 +- pkg/sql/rowexec/scrub_tablereader.go | 265 - pkg/sql/rowexec/sorter.go | 3 +- pkg/sql/rowexec/stream_group_accumulator.go | 2 +- pkg/sql/rowexec/stream_merger.go | 4 +- pkg/sql/rowexec/tablereader.go | 12 +- pkg/sql/rowexec/tablereader_test.go | 2 +- pkg/sql/rowexec/utils_test.go | 6 +- pkg/sql/rowexec/values_test.go | 2 +- pkg/sql/rowexec/windower.go | 2 +- pkg/sql/rowexec/zigzagjoiner.go | 32 +- pkg/sql/rowflow/BUILD.bazel | 1 + pkg/sql/rowflow/input_sync.go | 2 +- pkg/sql/rowflow/routers.go | 5 +- pkg/sql/rowflow/routers_test.go | 6 +- pkg/sql/rowflow/row_based_flow.go | 2 +- pkg/sql/scan.go | 121 +- pkg/sql/scatter_test.go | 17 + pkg/sql/schema_change_plan_node.go | 28 +- pkg/sql/schema_changer.go | 15 +- pkg/sql/schema_changer_state.go | 2 +- pkg/sql/schema_changer_test.go | 44 +- pkg/sql/schemachanger/end_to_end_test.go | 4 +- pkg/sql/schemachanger/scbuild/BUILD.bazel | 4 +- .../schemachanger/scbuild/ast_annotator.go | 88 + pkg/sql/schemachanger/scbuild/build.go | 74 +- .../schemachanger/scbuild/builder_state.go | 28 +- pkg/sql/schemachanger/scbuild/builder_test.go | 22 +- .../scbuild/internal/scbuildstmt/BUILD.bazel | 2 + .../internal/scbuildstmt/alter_table.go | 9 +- .../internal/scbuildstmt/common_relation.go | 71 +- .../internal/scbuildstmt/common_util.go | 2 +- .../internal/scbuildstmt/create_index.go | 10 +- .../internal/scbuildstmt/dependencies.go | 46 +- .../internal/scbuildstmt/drop_database.go | 6 +- .../internal/scbuildstmt/drop_schema.go | 9 +- .../internal/scbuildstmt/drop_sequence.go | 9 +- .../internal/scbuildstmt/drop_table.go | 43 +- .../scbuild/internal/scbuildstmt/drop_type.go | 6 +- .../scbuild/internal/scbuildstmt/drop_view.go | 20 +- .../scbuild/internal/scbuildstmt/process.go | 1 - .../schemachanger/scbuild/name_resolver.go | 5 +- .../scbuild/table_element_id_generator.go | 2 +- ...cker.go => target_enqueuer_and_checker.go} | 28 +- .../scbuild/testdata/drop_database | 122 + .../scbuild/testdata/drop_schema | 76 + .../scbuild/testdata/drop_sequence | 10 + .../schemachanger/scbuild/testdata/drop_table | 78 + .../schemachanger/scbuild/testdata/drop_view | 30 + pkg/sql/schemachanger/scdeps/build_deps.go | 7 + pkg/sql/schemachanger/scdeps/exec_deps.go | 19 +- pkg/sql/schemachanger/scdeps/run_deps.go | 55 +- .../scdeps/sctestdeps/BUILD.bazel | 1 + .../scdeps/sctestdeps/test_deps.go | 60 +- .../scdeps/sctestdeps/test_state.go | 22 + .../scdeps/sctestutils/BUILD.bazel | 1 - .../scdeps/sctestutils/sctestutils.go | 12 +- pkg/sql/schemachanger/scexec/BUILD.bazel | 5 +- pkg/sql/schemachanger/scexec/dependencies.go | 34 +- pkg/sql/schemachanger/scexec/exec_mutation.go | 173 +- .../scexec/executor_external_test.go | 220 +- .../scexec/mocks_generated_test.go | 24 +- .../scexec/scmutationexec/BUILD.bazel | 2 + .../scexec/scmutationexec/scmutationexec.go | 96 +- pkg/sql/schemachanger/schemachanger_test.go | 2 +- pkg/sql/schemachanger/scjob/job.go | 12 +- pkg/sql/schemachanger/scop/BUILD.bazel | 1 + pkg/sql/schemachanger/scop/mutation.go | 65 +- .../scop/mutation_visitor_generated.go | 36 + pkg/sql/schemachanger/scpb/BUILD.bazel | 14 +- .../scpb/constants.go} | 12 +- .../schemachanger/scpb/element_generator.go | 8 +- pkg/sql/schemachanger/scpb/elements.proto | 408 + .../schemachanger/scpb/elements_generated.go | 210 +- pkg/sql/schemachanger/scpb/metadata.go | 54 - pkg/sql/schemachanger/scpb/scpb.proto | 368 +- .../schemachanger/scpb/{node.go => state.go} | 44 +- pkg/sql/schemachanger/scpb/uml/table.puml | 46 + pkg/sql/schemachanger/scplan/BUILD.bazel | 17 +- .../{ => internal}/deprules/BUILD.bazel | 4 +- .../scplan/{ => internal}/deprules/helpers.go | 0 .../{ => internal}/deprules/registry.go | 10 +- .../scplan/{ => internal}/deprules/rules.go | 10 +- .../{ => internal}/deprules/rules_test.go | 0 .../{ => internal}/deprules/testdata/rules | 176 +- .../scplan/{ => internal}/opgen/BUILD.bazel | 11 +- .../scplan/{ => internal}/opgen/op_funcs.go | 26 +- .../scplan/{ => internal}/opgen/op_gen.go | 20 +- .../opgen/opgen_check_constraint.go | 0 .../opgen_check_constraint_type_reference.go | 0 .../{ => internal}/opgen/opgen_column.go | 18 +- .../internal/opgen/opgen_column_comment.go | 40 + .../{ => internal}/opgen/opgen_column_name.go | 0 .../opgen/opgen_column_type_reference.go | 0 .../opgen_computed_expr_type_reference.go | 0 .../opgen/opgen_constraint_comment.go | 41 + .../opgen/opgen_constraint_name.go | 0 .../{ => internal}/opgen/opgen_database.go | 9 +- .../internal/opgen/opgen_database_comment.go | 39 + .../opgen/opgen_db_schema_entry.go | 0 .../opgen_default_expr_type_reference.go | 0 .../opgen/opgen_default_expression.go | 0 .../opgen/opgen_in_foreign_key.go | 0 .../internal/opgen/opgen_index_comment.go | 40 + .../{ => internal}/opgen/opgen_index_name.go | 0 .../{ => internal}/opgen/opgen_locality.go | 0 .../{ => internal}/opgen/opgen_namespace.go | 0 .../opgen_on_update_expr_type_reference.go | 0 .../opgen/opgen_out_foreign_key.go | 0 .../{ => internal}/opgen/opgen_owner.go | 0 .../opgen/opgen_partitioning.go | 0 .../opgen/opgen_primary_index.go | 0 .../opgen/opgen_relation_depended_on_by.go | 0 .../{ => internal}/opgen/opgen_schema.go | 9 +- .../internal/opgen/opgen_schema_comment.go | 39 + .../opgen/opgen_secondary_index.go | 0 .../{ => internal}/opgen/opgen_sequence.go | 9 +- .../opgen/opgen_sequence_dependency.go | 0 .../opgen/opgen_sequence_owned_by.go | 0 .../{ => internal}/opgen/opgen_table.go | 9 +- .../internal/opgen/opgen_table_comment.go | 39 + .../scplan/{ => internal}/opgen/opgen_type.go | 9 +- .../opgen/opgen_unique_constraint.go | 0 .../opgen/opgen_user_privileges.go | 0 .../scplan/{ => internal}/opgen/opgen_view.go | 9 +- .../opgen/opgen_view_depends_on_type.go | 0 .../scplan/{ => internal}/opgen/register.go | 0 .../{ => internal}/opgen/register_test.go | 0 .../scplan/{ => internal}/opgen/specs.go | 0 .../scplan/{ => internal}/opgen/target.go | 6 +- .../{ => scplan/internal}/scgraph/BUILD.bazel | 3 +- .../internal}/scgraph/dep_edge_tree.go | 10 +- .../internal}/scgraph/dep_edge_tree_test.go | 41 +- .../internal}/scgraph/depedgekind_string.go | 0 .../{ => scplan/internal}/scgraph/edge.go | 19 +- .../{ => scplan/internal}/scgraph/graph.go | 102 +- .../internal}/scgraph/graph_test.go | 52 +- .../internal}/scgraph/iteration.go | 7 +- .../internal}/scgraphviz/BUILD.bazel | 7 +- .../internal}/scgraphviz/graphviz.go | 118 +- .../scplan/{ => internal}/scopt/BUILD.bazel | 4 +- .../scplan/{ => internal}/scopt/registry.go | 8 +- .../scplan/{ => internal}/scopt/rules.go | 0 .../scplan/{ => internal}/scstage/BUILD.bazel | 4 +- .../scplan/{ => internal}/scstage/build.go | 217 +- .../scplan/{ => internal}/scstage/stage.go | 113 +- pkg/sql/schemachanger/scplan/plan.go | 61 +- pkg/sql/schemachanger/scplan/plan_test.go | 41 +- .../schemachanger/scplan/testdata/alter_table | 155 +- .../scplan/testdata/create_index | 21 +- .../scplan/testdata/drop_database | 310 +- .../schemachanger/scplan/testdata/drop_schema | 213 +- .../scplan/testdata/drop_sequence | 54 +- .../schemachanger/scplan/testdata/drop_table | 104 +- .../schemachanger/scplan/testdata/drop_type | 38 +- .../schemachanger/scplan/testdata/drop_view | 130 +- pkg/sql/schemachanger/screl/BUILD.bazel | 1 + pkg/sql/schemachanger/screl/attr.go | 38 +- pkg/sql/schemachanger/screl/attr_string.go | 6 +- pkg/sql/schemachanger/screl/compare.go | 2 +- pkg/sql/schemachanger/screl/format.go | 6 +- .../screl/node.go} | 14 +- pkg/sql/schemachanger/screl/query_test.go | 25 +- pkg/sql/schemachanger/screl/scalars.go | 6 +- pkg/sql/schemachanger/scrun/BUILD.bazel | 3 - pkg/sql/schemachanger/scrun/scrun.go | 87 +- .../testdata/alter_table_add_column | 2 +- pkg/sql/schemachanger/testdata/drop | 51 +- pkg/sql/scrub.go | 58 +- pkg/sql/scrub_physical.go | 200 - pkg/sql/scrub_test.go | 398 +- pkg/sql/sem/builtins/BUILD.bazel | 4 +- pkg/sql/sem/builtins/aggregate_builtins.go | 60 +- pkg/sql/sem/builtins/builtins.go | 77 +- pkg/sql/sem/builtins/math_builtins.go | 2 +- pkg/sql/sem/builtins/pg_builtins.go | 362 +- pkg/sql/sem/builtins/replication_builtins.go | 45 +- pkg/sql/sem/builtins/window_frame_builtins.go | 2 +- pkg/sql/sem/catid/BUILD.bazel | 8 + pkg/sql/sem/catid/ids.go | 36 + pkg/sql/sem/tree/BUILD.bazel | 7 +- pkg/sql/sem/tree/as_of.go | 2 +- pkg/sql/sem/tree/cast.go | 628 +- pkg/sql/sem/tree/cast_test.go | 232 +- pkg/sql/sem/tree/constant_test.go | 2 +- pkg/sql/sem/tree/datum.go | 29 +- pkg/sql/{rowenc => sem/tree}/datum_alloc.go | 163 +- pkg/sql/sem/tree/decimal.go | 2 +- pkg/sql/sem/tree/eval.go | 53 +- pkg/sql/sem/tree/insert.go | 16 +- pkg/sql/sem/tree/pretty.go | 25 +- pkg/sql/sem/tree/select.go | 3 +- pkg/sql/sem/tree/table_ref.go | 12 +- pkg/sql/sem/tree/testdata/pg_cast_dump.csv | 156 + .../testdata/pg_cast_provolatile_dump.csv | 153 - pkg/sql/sem/tree/type_check.go | 20 +- pkg/sql/sem/tree/window_funcs_test.go | 2 +- pkg/sql/set_cluster_setting.go | 2 +- pkg/sql/set_var.go | 2 +- pkg/sql/show_cluster_setting.go | 4 +- pkg/sql/show_create.go | 3 +- pkg/sql/show_create_clauses.go | 14 +- pkg/sql/show_test.go | 10 +- pkg/sql/span/BUILD.bazel | 1 + pkg/sql/span/span_builder.go | 22 +- pkg/sql/split.go | 6 +- pkg/sql/sqlerrors/errors.go | 5 + .../sqlinstance/instancestorage/BUILD.bazel | 2 +- .../sqlinstance/instancestorage/row_codec.go | 85 +- .../sqlstats/persistedsqlstats/BUILD.bazel | 2 +- .../persistedsqlstats/cluster_settings.go | 4 +- .../scheduled_sql_stats_compaction_test.go | 6 +- .../sqlstatsutil/BUILD.bazel | 2 +- .../sqlstatsutil/json_impl.go | 2 +- pkg/sql/stats/BUILD.bazel | 1 + pkg/sql/stats/histogram.go | 4 +- pkg/sql/stats/json.go | 7 +- pkg/sql/stats/row_sampling.go | 2 +- pkg/sql/stats/stats_cache.go | 9 +- pkg/sql/stmtdiagnostics/BUILD.bazel | 1 + pkg/sql/tablewriter_delete.go | 3 +- pkg/sql/tests/BUILD.bazel | 1 + pkg/sql/tests/insert_fast_path_test.go | 73 + pkg/sql/tests/system_table_test.go | 15 - pkg/sql/tests/truncate_test.go | 60 +- pkg/sql/truncate.go | 3 +- pkg/sql/type_change.go | 5 +- pkg/sql/unsupported_vars.go | 4 +- pkg/sql/update.go | 7 +- pkg/sql/values_test.go | 2 +- pkg/sql/vars.go | 6 + pkg/sql/zone_config.go | 5 +- pkg/storage/metamorphic/operands.go | 29 + pkg/storage/metamorphic/operations.go | 51 +- pkg/storage/mvcc.go | 2 +- pkg/storage/mvcc_test.go | 2 +- pkg/storage/pebble_mvcc_scanner.go | 14 +- pkg/storage/sst.go | 54 +- pkg/storage/sst_iterator.go | 19 +- .../conditional_put_with_txn_enable_separated | 2 +- .../conditional_put_write_too_old | 4 +- .../testdata/mvcc_histories/delete_range | 2 +- pkg/storage/testdata/mvcc_histories/increment | 4 +- .../mvcc_histories/read_fail_on_more_recent | 18 +- .../update_existing_key_old_version | 4 +- .../testdata/mvcc_histories/write_too_old | 4 +- pkg/streaming/BUILD.bazel | 2 +- pkg/streaming/api.go | 12 +- pkg/testutils/echotest/BUILD.bazel | 9 + pkg/testutils/echotest/echotest.go | 44 + pkg/testutils/keysutils/BUILD.bazel | 2 +- pkg/testutils/keysutils/pretty_scanner.go | 4 +- pkg/testutils/lint/lint_test.go | 4 +- pkg/testutils/skip/BUILD.bazel | 1 + pkg/testutils/skip/skip.go | 9 + .../src/sessions/sessionDetails.tsx | 2 +- .../src/sql/sqlhighlight.module.scss | 1 - .../statementDetails.fixture.ts | 38 +- .../statementDetails.selectors.ts | 47 +- .../statementsPage/statementsPage.fixture.ts | 122 +- .../statementsPage.selectors.ts | 3 + .../src/statementsTable/statementsTable.tsx | 2 +- .../statementsTableContent.tsx | 12 +- .../cluster-ui/src/transactionsPage/utils.ts | 1 + .../cluster-ui/src/util/appStats/appStats.ts | 6 +- .../src/routes/RedirectToStatementDetails.tsx | 2 +- .../src/views/statements/statementDetails.tsx | 47 +- .../src/views/statements/statements.spec.tsx | 31 +- .../src/views/statements/statementsPage.tsx | 3 + pkg/util/duration/BUILD.bazel | 1 + pkg/util/duration/duration.go | 27 +- pkg/util/encoding/BUILD.bazel | 4 +- pkg/util/encoding/decimal.go | 2 +- pkg/util/encoding/decimal_test.go | 2 +- pkg/util/encoding/encoding.go | 4 +- pkg/util/encoding/encoding_test.go | 2 +- pkg/util/hlc/hlc.go | 10 + pkg/util/json/BUILD.bazel | 4 +- pkg/util/json/encode.go | 2 +- pkg/util/json/encoded.go | 2 +- pkg/util/json/json.go | 5 +- pkg/util/json/json_test.go | 2 +- pkg/util/mon/bytes_usage.go | 55 + pkg/workload/schemachange/BUILD.bazel | 2 +- .../schemachange/operation_generator.go | 22 +- scripts/sgrep | 16 + vendor | 2 +- 1089 files changed, 38501 insertions(+), 37016 deletions(-) create mode 100644 bazel-out rename build/{teamcity-nightly-pebble-common.sh => teamcity/cockroach/nightlies/pebble_nightly_common.sh} (65%) create mode 100755 build/teamcity/cockroach/nightlies/pebble_nightly_write_throughput.sh rename build/{teamcity-nightly-pebble-write-throughput.sh => teamcity/cockroach/nightlies/pebble_nightly_write_throughput_impl.sh} (87%) create mode 100755 build/teamcity/cockroach/nightlies/pebble_nightly_ycsb.sh rename build/{teamcity-nightly-pebble-ycsb.sh => teamcity/cockroach/nightlies/pebble_nightly_ycsb_impl.sh} (87%) create mode 100644 docs/RFCS/20211203_session_revival_token.md delete mode 100644 pkg/ccl/backupccl/import_spans_test.go create mode 100644 pkg/ccl/streamingccl/streamclient/partitioned_stream_client.go create mode 100644 pkg/ccl/streamingccl/streamclient/partitioned_stream_client_test.go create mode 100644 pkg/ccl/streamingccl/streampb/empty.go create mode 100644 pkg/keys/commenttype_string.go create mode 100644 pkg/kv/kvclient/kvstreamer/BUILD.bazel create mode 100644 pkg/kv/kvclient/kvstreamer/avg_response_estimator.go create mode 100644 pkg/kv/kvclient/kvstreamer/avg_response_estimator_test.go create mode 100644 pkg/kv/kvclient/kvstreamer/budget.go create mode 100644 pkg/kv/kvclient/kvstreamer/main_test.go create mode 100644 pkg/kv/kvclient/kvstreamer/streamer.go create mode 100644 pkg/kv/kvclient/kvstreamer/streamer_test.go create mode 100644 pkg/kv/kvserver/client_replica_circuit_breaker_test.go create mode 100644 pkg/kv/kvserver/loqrecovery/testdata/invalid_input create mode 100644 pkg/kv/kvserver/loqrecovery/testdata/keyspace_coverage create mode 100644 pkg/kv/kvserver/loqrecovery/testdata/max_applied_voter_wins create mode 100644 pkg/kv/kvserver/replica_circuit_breaker.go create mode 100644 pkg/kv/kvserver/replica_circuit_breaker_test.go create mode 100644 pkg/kv/kvserver/testdata/replica_unavailable_error.txt create mode 100644 pkg/migration/migrations/public_schema_migration.go create mode 100644 pkg/migration/migrations/public_schema_migration_external_test.go create mode 100644 pkg/security/securitytest/test_certs/tenant-signing.10.crt create mode 100644 pkg/security/securitytest/test_certs/tenant-signing.10.key create mode 100644 pkg/security/securitytest/test_certs/tenant-signing.11.crt create mode 100644 pkg/security/securitytest/test_certs/tenant-signing.11.key create mode 100644 pkg/security/securitytest/test_certs/tenant-signing.20.crt create mode 100644 pkg/security/securitytest/test_certs/tenant-signing.20.key create mode 100644 pkg/sql/backfill_test.go create mode 100644 pkg/sql/catalog/catpb/BUILD.bazel create mode 100644 pkg/sql/catalog/catpb/catalog.proto create mode 100644 pkg/sql/catalog/catpb/constraint.go create mode 100644 pkg/sql/catalog/catpb/doc.go rename pkg/sql/catalog/{descpb => catpb}/multiregion.go (83%) rename pkg/sql/colexec/execgen/{overloads_util.go => overloads_bin_util.go} (51%) create mode 100644 pkg/sql/colexec/not_expr_ops.go create mode 100644 pkg/sql/colexec/not_expr_ops_test.go create mode 100644 pkg/sql/commenter/BUILD.bazel create mode 100644 pkg/sql/commenter/comment_updater.go create mode 100644 pkg/sql/commenter/comment_updater_factory.go delete mode 100644 pkg/sql/distsql_plan_scrub_physical.go create mode 100644 pkg/sql/opt/distribution/BUILD.bazel create mode 100644 pkg/sql/opt/distribution/distribution.go create mode 100644 pkg/sql/opt/distribution/distribution_test.go create mode 100644 pkg/sql/opt/ordering/distribute.go create mode 100644 pkg/sql/opt/props/physical/distribution.go create mode 100644 pkg/sql/opt/props/physical/distribution_test.go create mode 100644 pkg/sql/opt/xform/testdata/physprops/distribution create mode 100644 pkg/sql/row/kv_batch_streamer.go delete mode 100644 pkg/sql/rowenc/column_type_encoding.go create mode 100644 pkg/sql/rowenc/keyside/BUILD.bazel create mode 100644 pkg/sql/rowenc/keyside/array.go create mode 100644 pkg/sql/rowenc/keyside/decode.go create mode 100644 pkg/sql/rowenc/keyside/doc.go create mode 100644 pkg/sql/rowenc/keyside/encode.go rename pkg/sql/rowenc/{column_type_encoding_test.go => keyside/keyside_test.go} (61%) create mode 100644 pkg/sql/rowenc/valueside/BUILD.bazel create mode 100644 pkg/sql/rowenc/valueside/array.go create mode 100644 pkg/sql/rowenc/valueside/array_test.go create mode 100644 pkg/sql/rowenc/valueside/decode.go create mode 100644 pkg/sql/rowenc/valueside/doc.go create mode 100644 pkg/sql/rowenc/valueside/encode.go create mode 100644 pkg/sql/rowenc/valueside/legacy.go create mode 100644 pkg/sql/rowenc/valueside/tuple.go create mode 100644 pkg/sql/rowenc/valueside/valueside_test.go delete mode 100644 pkg/sql/rowexec/scrub_tablereader.go create mode 100644 pkg/sql/schemachanger/scbuild/ast_annotator.go rename pkg/sql/schemachanger/scbuild/{node_enqueuer_and_checker.go => target_enqueuer_and_checker.go} (55%) rename pkg/sql/{execinfra/scanbase.go => schemachanger/scpb/constants.go} (50%) create mode 100644 pkg/sql/schemachanger/scpb/elements.proto delete mode 100644 pkg/sql/schemachanger/scpb/metadata.go rename pkg/sql/schemachanger/scpb/{node.go => state.go} (59%) rename pkg/sql/schemachanger/scplan/{ => internal}/deprules/BUILD.bazel (89%) rename pkg/sql/schemachanger/scplan/{ => internal}/deprules/helpers.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/deprules/registry.go (81%) rename pkg/sql/schemachanger/scplan/{ => internal}/deprules/rules.go (98%) rename pkg/sql/schemachanger/scplan/{ => internal}/deprules/rules_test.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/deprules/testdata/rules (80%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/BUILD.bazel (85%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/op_funcs.go (66%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/op_gen.go (75%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_check_constraint.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_check_constraint_type_reference.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_column.go (85%) create mode 100644 pkg/sql/schemachanger/scplan/internal/opgen/opgen_column_comment.go rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_column_name.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_column_type_reference.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_computed_expr_type_reference.go (100%) create mode 100644 pkg/sql/schemachanger/scplan/internal/opgen/opgen_constraint_comment.go rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_constraint_name.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_database.go (85%) create mode 100644 pkg/sql/schemachanger/scplan/internal/opgen/opgen_database_comment.go rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_db_schema_entry.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_default_expr_type_reference.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_default_expression.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_in_foreign_key.go (100%) create mode 100644 pkg/sql/schemachanger/scplan/internal/opgen/opgen_index_comment.go rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_index_name.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_locality.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_namespace.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_on_update_expr_type_reference.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_out_foreign_key.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_owner.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_partitioning.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_primary_index.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_relation_depended_on_by.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_schema.go (85%) create mode 100644 pkg/sql/schemachanger/scplan/internal/opgen/opgen_schema_comment.go rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_secondary_index.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_sequence.go (84%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_sequence_dependency.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_sequence_owned_by.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_table.go (85%) create mode 100644 pkg/sql/schemachanger/scplan/internal/opgen/opgen_table_comment.go rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_type.go (85%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_unique_constraint.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_user_privileges.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_view.go (85%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/opgen_view_depends_on_type.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/register.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/register_test.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/specs.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/opgen/target.go (96%) rename pkg/sql/schemachanger/{ => scplan/internal}/scgraph/BUILD.bazel (94%) rename pkg/sql/schemachanger/{ => scplan/internal}/scgraph/dep_edge_tree.go (86%) rename pkg/sql/schemachanger/{ => scplan/internal}/scgraph/dep_edge_tree_test.go (80%) rename pkg/sql/schemachanger/{ => scplan/internal}/scgraph/depedgekind_string.go (100%) rename pkg/sql/schemachanger/{ => scplan/internal}/scgraph/edge.go (89%) rename pkg/sql/schemachanger/{ => scplan/internal}/scgraph/graph.go (76%) rename pkg/sql/schemachanger/{ => scplan/internal}/scgraph/graph_test.go (75%) rename pkg/sql/schemachanger/{ => scplan/internal}/scgraph/iteration.go (86%) rename pkg/sql/schemachanger/{ => scplan/internal}/scgraphviz/BUILD.bazel (71%) rename pkg/sql/schemachanger/{ => scplan/internal}/scgraphviz/graphviz.go (73%) rename pkg/sql/schemachanger/scplan/{ => internal}/scopt/BUILD.bazel (81%) rename pkg/sql/schemachanger/scplan/{ => internal}/scopt/registry.go (87%) rename pkg/sql/schemachanger/scplan/{ => internal}/scopt/rules.go (100%) rename pkg/sql/schemachanger/scplan/{ => internal}/scstage/BUILD.bazel (85%) rename pkg/sql/schemachanger/scplan/{ => internal}/scstage/build.go (69%) rename pkg/sql/schemachanger/scplan/{ => internal}/scstage/stage.go (66%) rename pkg/sql/{rowenc/helpers_test.go => schemachanger/screl/node.go} (57%) delete mode 100644 pkg/sql/scrub_physical.go create mode 100644 pkg/sql/sem/catid/BUILD.bazel create mode 100644 pkg/sql/sem/catid/ids.go rename pkg/sql/{rowenc => sem/tree}/datum_alloc.go (67%) create mode 100644 pkg/sql/sem/tree/testdata/pg_cast_dump.csv delete mode 100644 pkg/sql/sem/tree/testdata/pg_cast_provolatile_dump.csv create mode 100644 pkg/sql/tests/insert_fast_path_test.go create mode 100644 pkg/testutils/echotest/BUILD.bazel create mode 100644 pkg/testutils/echotest/echotest.go create mode 100755 scripts/sgrep diff --git a/.bazelrc b/.bazelrc index 29f44012e841..46e9b0725c1c 100644 --- a/.bazelrc +++ b/.bazelrc @@ -43,6 +43,8 @@ build:devdarwinx86_64 --platforms=//build/toolchains:darwin_x86_64 build:devdarwinx86_64 --config=dev build:dev --define cockroach_bazel_dev=y build:dev --stamp --workspace_status_command=./build/bazelutil/stamp.sh +build:dev --action_env=PATH +build:dev --host_action_env=PATH build:nonogo --define cockroach_nonogo=y # vi: ft=sh diff --git a/.gitignore b/.gitignore index 5bfcab4c56c9..096602e43a19 100644 --- a/.gitignore +++ b/.gitignore @@ -34,7 +34,6 @@ zcgo_flags*.go build/Railroad.jar # Bazel generated symlinks -/bazel-* /_bazel # Per-user .bazelrc diff --git a/AUTHORS b/AUTHORS index ff47c128af70..4083f525c9fa 100644 --- a/AUTHORS +++ b/AUTHORS @@ -83,6 +83,7 @@ Asit Mahato Austen McClernon Barry He bc +Ben Bardin Ben Darnell Bilal Akhtar Bill Cao @@ -161,6 +162,7 @@ fabio Faizan Qazi fangwens Francis Bergin +Fenil Patel funkygao Garvit Juniwal Gary Lau @@ -168,6 +170,7 @@ George Buckerfield georgebuckerfield George Utsin Georgia Hong +Gerardo Torres Gustav Paul Haines Chan hainesc Harshit Chopra @@ -275,6 +278,7 @@ Marcus Gartner Marcus Westin Marius Posta Marjan Ansar +Mark Sirek msirek Marko Bonaći Martin Bertschler Marylia Gutierrez @@ -300,6 +304,7 @@ Mufeez Amjad Namrata Kodali Nancy Vargas Balderas Nandu Pokhrel +Nate Long Nate Stewart Nate Nathan Johnson Nathan VanBenschoten <@cockroachlabs.com> diff --git a/BUILD.bazel b/BUILD.bazel index 158488af0cee..e86a6130a2c8 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -260,12 +260,8 @@ nogo( go_path( name = "go_path", - # TODO(ricky): change back to 'link' when https://github.com/bazelbuild/rules_go/issues/3041 - # is resolved. - mode = "copy", + mode = "link", deps = [ "//pkg/cmd/cockroach-short", - "//pkg/cmd/roachprod", - "//pkg/cmd/roachtest", ], ) diff --git a/DEPS.bzl b/DEPS.bzl index 2da414226dae..96da972c08c7 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -214,10 +214,10 @@ def go_deps(): name = "com_github_apache_thrift", build_file_proto_mode = "disable_global", importpath = "github.com/apache/thrift", - sha256 = "d75265e363da943c24e7ed69104bf018429024a50968421e48a6ab3e624733c2", - strip_prefix = "github.com/apache/thrift@v0.13.0", + sha256 = "f9e5418fda5dff9f5e1a892a127472fc621d417b3ee1351e53141509233fb1d5", + strip_prefix = "github.com/apache/thrift@v0.15.0", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/apache/thrift/com_github_apache_thrift-v0.13.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/apache/thrift/com_github_apache_thrift-v0.15.0.zip", ], ) go_repository( @@ -1111,13 +1111,13 @@ def go_deps(): ], ) go_repository( - name = "com_github_cockroachdb_apd_v2", + name = "com_github_cockroachdb_apd_v3", build_file_proto_mode = "disable_global", - importpath = "github.com/cockroachdb/apd/v2", - sha256 = "25204b49acbbf2f900f4f82b7f07481847f4f78452aff64dcbd05c543b21e35d", - strip_prefix = "github.com/cockroachdb/apd/v2@v2.0.2", + importpath = "github.com/cockroachdb/apd/v3", + sha256 = "c461aad0b12e37d042402e0ad2478092d846b1981b5708bb7a63fcc07a154816", + strip_prefix = "github.com/cockroachdb/apd/v3@v3.0.0", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/apd/v2/com_github_cockroachdb_apd_v2-v2.0.2.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/apd/v3/com_github_cockroachdb_apd_v3-v3.0.0.zip", ], ) go_repository( @@ -1178,10 +1178,10 @@ def go_deps(): patches = [ "@cockroach//build/patches:com_github_cockroachdb_errors.patch", ], - sha256 = "fa95cd3d43353cd585926053adf845fa6f2b039e1d97d14edd80355aec919da0", - strip_prefix = "github.com/cockroachdb/errors@v1.8.5", + sha256 = "564b50a67c19ee075899d7cbbdf9d60e1187394767acc2f72f9b34d0b4d041f0", + strip_prefix = "github.com/cockroachdb/errors@v1.8.6", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/errors/com_github_cockroachdb_errors-v1.8.5.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/errors/com_github_cockroachdb_errors-v1.8.6.zip", ], ) go_repository( @@ -1218,10 +1218,10 @@ def go_deps(): name = "com_github_cockroachdb_pebble", build_file_proto_mode = "disable_global", importpath = "github.com/cockroachdb/pebble", - sha256 = "00fb9a072e2ac79ba16462bcb58b892f0625a259b87bd84722cb47016706afd4", - strip_prefix = "github.com/cockroachdb/pebble@v0.0.0-20220107174839-c75a2e96a7e8", + sha256 = "a3dd93db0e77ac631f48db4529fcd324eeec43a732a1717ce0a1819975e3732f", + strip_prefix = "github.com/cockroachdb/pebble@v0.0.0-20220112164547-3d0ff924d13a", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20220107174839-c75a2e96a7e8.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20220112164547-3d0ff924d13a.zip", ], ) go_repository( @@ -1738,10 +1738,10 @@ def go_deps(): name = "com_github_datadog_zstd", build_file_proto_mode = "disable_global", importpath = "github.com/DataDog/zstd", - sha256 = "3ff4837fd63967e37a0b820559d80d448b47c4951c8df7e57045ed07055cb835", - strip_prefix = "github.com/DataDog/zstd@v1.4.8", + sha256 = "00989c1f3f3426aa6ed41f641193ff6a0d81031ba0b7fd81250e7e923d2f18a4", + strip_prefix = "github.com/DataDog/zstd@v1.5.0", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/DataDog/zstd/com_github_datadog_zstd-v1.4.8.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/DataDog/zstd/com_github_datadog_zstd-v1.5.0.zip", ], ) go_repository( @@ -2329,10 +2329,10 @@ def go_deps(): name = "com_github_fraugster_parquet_go", build_file_proto_mode = "disable_global", importpath = "github.com/fraugster/parquet-go", - sha256 = "cd372b4123f3ead83e1bdfd1f021d8286a8e28fb68d5884fa59657076a64f783", - strip_prefix = "github.com/fraugster/parquet-go@v0.4.0", + sha256 = "66beb8f2218c31ca4ca3d3dbcc91c256f3379750ade924016f4179982446edd7", + strip_prefix = "github.com/fraugster/parquet-go@v0.6.1", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/fraugster/parquet-go/com_github_fraugster_parquet_go-v0.4.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/fraugster/parquet-go/com_github_fraugster_parquet_go-v0.6.1.zip", ], ) go_repository( @@ -3403,16 +3403,6 @@ def go_deps(): "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/gordonklaus/ineffassign/com_github_gordonklaus_ineffassign-v0.0.0-20200309095847-7953dde2c7bf.zip", ], ) - go_repository( - name = "com_github_gorhill_cronexpr", - build_file_proto_mode = "disable_global", - importpath = "github.com/gorhill/cronexpr", - sha256 = "742d8957d3f9fe773150fb3164868a755b2af5b705b38c72c45ca5386715c617", - strip_prefix = "github.com/gorhill/cronexpr@v0.0.0-20180427100037-88b0669f7d75", - urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/gorhill/cronexpr/com_github_gorhill_cronexpr-v0.0.0-20180427100037-88b0669f7d75.zip", - ], - ) go_repository( name = "com_github_gorilla_context", build_file_proto_mode = "disable_global", @@ -4596,10 +4586,10 @@ def go_deps(): name = "com_github_klauspost_compress", build_file_proto_mode = "disable_global", importpath = "github.com/klauspost/compress", - sha256 = "e5b789e486f1e454ef02344c8235a1a4e285d7b8dd801ff8d667fce6309f3355", - strip_prefix = "github.com/klauspost/compress@v1.13.5", + sha256 = "fcc8b34572dd35f7f432ed228ce87718258fe62ee9ef09636227adb7d22d21ec", + strip_prefix = "github.com/klauspost/compress@v1.14.1", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/klauspost/compress/com_github_klauspost_compress-v1.13.5.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/klauspost/compress/com_github_klauspost_compress-v1.14.1.zip", ], ) go_repository( @@ -6444,6 +6434,16 @@ def go_deps(): "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/retailnext/hllpp/com_github_retailnext_hllpp-v1.0.1-0.20180308014038-101a6d2f8b52.zip", ], ) + go_repository( + name = "com_github_robfig_cron_v3", + build_file_proto_mode = "disable_global", + importpath = "github.com/robfig/cron/v3", + sha256 = "ebe6454642220832a451b8cc50eae5f9150fd8d36b90b242a5de27676be86c70", + strip_prefix = "github.com/robfig/cron/v3@v3.0.1", + urls = [ + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/robfig/cron/v3/com_github_robfig_cron_v3-v3.0.1.zip", + ], + ) go_repository( name = "com_github_rogpeppe_fastuuid", build_file_proto_mode = "disable_global", @@ -7528,10 +7528,10 @@ def go_deps(): name = "com_github_yuin_goldmark", build_file_proto_mode = "disable_global", importpath = "github.com/yuin/goldmark", - sha256 = "920012ef5c68245bc8477f4c8bbc79264032fb6d17035f06bc4c9955f9d7f6c2", - strip_prefix = "github.com/yuin/goldmark@v1.4.0", + sha256 = "ba4763c06e40aaa865a0b90a1603f0be13174cc880acfad9640089c6ca9bd086", + strip_prefix = "github.com/yuin/goldmark@v1.4.1", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/yuin/goldmark/com_github_yuin_goldmark-v1.4.0.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/yuin/goldmark/com_github_yuin_goldmark-v1.4.1.zip", ], ) go_repository( @@ -8368,6 +8368,16 @@ def go_deps(): "https://storage.googleapis.com/cockroach-godeps/gomod/go.opentelemetry.io/otel/exporters/zipkin/io_opentelemetry_go_otel_exporters_zipkin-v1.0.0-RC3.zip", ], ) + go_repository( + name = "io_opentelemetry_go_otel_metric", + build_file_proto_mode = "disable_global", + importpath = "go.opentelemetry.io/otel/metric", + sha256 = "d7ae3abbdcf9ea48ff23a477f324cb3595c77f3eb83f6acde5c0c9300e23fedb", + strip_prefix = "go.opentelemetry.io/otel/metric@v0.20.0", + urls = [ + "https://storage.googleapis.com/cockroach-godeps/gomod/go.opentelemetry.io/otel/metric/io_opentelemetry_go_otel_metric-v0.20.0.zip", + ], + ) go_repository( name = "io_opentelemetry_go_otel_sdk", build_file_proto_mode = "disable_global", @@ -8566,20 +8576,20 @@ def go_deps(): name = "org_golang_x_crypto", build_file_proto_mode = "disable_global", importpath = "golang.org/x/crypto", - sha256 = "c44b45db4eafc3270407fdd0b35be8f3d4219cf42277d42498644aec3fec4681", - strip_prefix = "golang.org/x/crypto@v0.0.0-20210817164053-32db794688a5", + sha256 = "eb2426a7891915213cc5da1da7b6fc6e9e2cf253d518d8e169e038e287f414e3", + strip_prefix = "golang.org/x/crypto@v0.0.0-20210921155107-089bfa567519", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/crypto/org_golang_x_crypto-v0.0.0-20210817164053-32db794688a5.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/crypto/org_golang_x_crypto-v0.0.0-20210921155107-089bfa567519.zip", ], ) go_repository( name = "org_golang_x_exp", build_file_proto_mode = "disable_global", importpath = "golang.org/x/exp", - sha256 = "5e56d6a6c434cc87c222eaf8a4ab44d5f1ea30741d388fa0097b51540ab5c898", - strip_prefix = "golang.org/x/exp@v0.0.0-20210514180818-737f94c0881e", + sha256 = "50e096afbb8e0f073519dd05f6573aefe410a829c87a7c1b64efb8c4a3948c50", + strip_prefix = "golang.org/x/exp@v0.0.0-20220104160115-025e73f80486", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/exp/org_golang_x_exp-v0.0.0-20210514180818-737f94c0881e.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/exp/org_golang_x_exp-v0.0.0-20220104160115-025e73f80486.zip", ], ) go_repository( @@ -8616,20 +8626,20 @@ def go_deps(): name = "org_golang_x_mod", build_file_proto_mode = "disable_global", importpath = "golang.org/x/mod", - sha256 = "8bed1489120e9428a64aca97c3a0d13065a33d1564593aae45e5c731c488f975", - strip_prefix = "golang.org/x/mod@v0.5.1", + sha256 = "6e5454f23b4ebc6c18c8db07bc168c71938269deb92c22c9ce4810903680fccb", + strip_prefix = "golang.org/x/mod@v0.6.0-dev.0.20211013180041-c96bc1413d57", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/mod/org_golang_x_mod-v0.5.1.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/mod/org_golang_x_mod-v0.6.0-dev.0.20211013180041-c96bc1413d57.zip", ], ) go_repository( name = "org_golang_x_net", build_file_proto_mode = "disable_global", importpath = "golang.org/x/net", - sha256 = "e03e4886f5d58569e3c3879df0cf72193a5d1c63acf08d6bda4c11761ee39437", - strip_prefix = "golang.org/x/net@v0.0.0-20210913180222-943fd674d43e", + sha256 = "1fe629b4c79e69ee7f7bfbcc813a984803744167bb04ce7fd2d9d7c4ca9d0af8", + strip_prefix = "golang.org/x/net@v0.0.0-20211015210444-4f30a5c0130f", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/net/org_golang_x_net-v0.0.0-20210913180222-943fd674d43e.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/net/org_golang_x_net-v0.0.0-20211015210444-4f30a5c0130f.zip", ], ) go_repository( @@ -8666,10 +8676,10 @@ def go_deps(): name = "org_golang_x_sys", build_file_proto_mode = "disable_global", importpath = "golang.org/x/sys", - sha256 = "2a250d8b0de456c701e2abc6ade76a4c6daff5a2213811053b333a2edd560908", - strip_prefix = "golang.org/x/sys@v0.0.0-20211210111614-af8b64212486", + sha256 = "a6ae14d45975dcd8a49c0b65c87fa0bc8f5d26d455092e016adff15015de0aaa", + strip_prefix = "golang.org/x/sys@v0.0.0-20220111092808-5a964db01320", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/sys/org_golang_x_sys-v0.0.0-20211210111614-af8b64212486.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/sys/org_golang_x_sys-v0.0.0-20220111092808-5a964db01320.zip", ], ) go_repository( @@ -8706,10 +8716,10 @@ def go_deps(): name = "org_golang_x_tools", build_file_proto_mode = "disable_global", importpath = "golang.org/x/tools", - sha256 = "8e77ec3ce863221e4871e6cb3f0f148cd9f1c956446b4325abe6ec55262229db", - strip_prefix = "golang.org/x/tools@v0.1.7", + sha256 = "4ca90457aebec9b546bff39c89669c7a8185b70c7dc24248f5bb577c0041624b", + strip_prefix = "golang.org/x/tools@v0.1.8-0.20211029000441-d6a9af8af023", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/tools/org_golang_x_tools-v0.1.7.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/golang.org/x/tools/org_golang_x_tools-v0.1.8-0.20211029000441-d6a9af8af023.zip", ], ) go_repository( diff --git a/Makefile b/Makefile index d0d11c82ba5c..e7ef845c988d 100644 --- a/Makefile +++ b/Makefile @@ -928,7 +928,7 @@ test-targets := \ go-targets-ccl := \ $(COCKROACH) \ - bin/workload \ + bin/workload bin/roachprod bin/roachtest \ go-install \ bench benchshort \ check test testshort testslow testrace testraceslow testdeadlock testbuild \ diff --git a/WORKSPACE b/WORKSPACE index da7802dc2e40..9e610b38f489 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -13,12 +13,12 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") # Load go bazel tools. This gives us access to the go bazel SDK/toolchains. http_archive( name = "io_bazel_rules_go", - sha256 = "adf650cf3dfded434e3a46c9063863579cdf8960a6e2f27e3e2537eb83a9413b", - strip_prefix = "rules_go-1ce9e349278b6e9ea1d52e0f73446c70ad440cbd", + sha256 = "5c4bd27429b1a307d51cd23d4677126aa6315fff608f0cd85c5bfb642a13b953", + strip_prefix = "cockroachdb-rules_go-23b381c", urls = [ - # cockroachdb/rules_go as of 1ce9e349278b6e9ea1d52e0f73446c70ad440cbd + # cockroachdb/rules_go as of 23b381cc8a679f5b10fd2459fc87997728b3740c # (upstream release-0.29 plus a few patches). - "https://storage.googleapis.com/public-bazel-artifacts/bazel/cockroachdb-rules_go-v0.29.0-0-1ce9e349.tar.gz", + "https://storage.googleapis.com/public-bazel-artifacts/bazel/cockroachdb-rules_go-v0.27.0-52-g23b381c.tar.gz", ], ) diff --git a/bazel-out b/bazel-out new file mode 100644 index 000000000000..6fd24fbd9518 --- /dev/null +++ b/bazel-out @@ -0,0 +1,3 @@ +This placeholder file prevents bazel from writing a symlink here which points to its output dir. +That symlink often causes tools that traverse the repo to get confused when they find lots of extra go files. +The _bazel/out symlink points to the same thing, but go doesn't traverse into paths that start with _. diff --git a/build/bazelutil/check.sh b/build/bazelutil/check.sh index cf5757200bb7..f5602ba93098 100755 --- a/build/bazelutil/check.sh +++ b/build/bazelutil/check.sh @@ -31,8 +31,8 @@ pkg/sql/schemachanger/scexec/exec_backfill_test.go://go:generate mockgen -packag pkg/sql/schemachanger/scop/backfill.go://go:generate go run ./generate_visitor.go scop Backfill backfill.go backfill_visitor_generated.go pkg/sql/schemachanger/scop/mutation.go://go:generate go run ./generate_visitor.go scop Mutation mutation.go mutation_visitor_generated.go pkg/sql/schemachanger/scop/validation.go://go:generate go run ./generate_visitor.go scop Validation validation.go validation_visitor_generated.go -pkg/sql/schemachanger/scpb/node.go://go:generate go run element_generator.go --in scpb.proto --out elements_generated.go -pkg/sql/schemachanger/scpb/node.go://go:generate go run element_uml_generator.go --out uml/table.puml +pkg/sql/schemachanger/scpb/state.go://go:generate go run element_generator.go --in elements.proto --out elements_generated.go +pkg/sql/schemachanger/scpb/state.go://go:generate go run element_uml_generator.go --out uml/table.puml pkg/util/interval/generic/doc.go: //go:generate ../../util/interval/generic/gen.sh *latch spanlatch pkg/util/interval/generic/example_t.go://go:generate ./gen.sh *example generic pkg/util/log/channels.go://go:generate go run gen/main.go logpb/log.proto channel.go channel/channel_generated.go diff --git a/build/release/teamcity-make-and-publish-build.sh b/build/release/teamcity-make-and-publish-build.sh index f4d24f5bea49..469cb0f9b3f2 100755 --- a/build/release/teamcity-make-and-publish-build.sh +++ b/build/release/teamcity-make-and-publish-build.sh @@ -11,9 +11,12 @@ build/builder.sh make .buildinfo/tag build_name="${TAG_NAME:-$(cat .buildinfo/tag)}" # On no match, `grep -Eo` returns 1. `|| echo""` makes the script not error. -release_branch="$(echo "$build_name" | grep -Eo "^v[0-9]+\.[0-9]+" || echo"")" +release_branch="$(echo "$TC_BUILD_BRANCH" || echo "")" is_custom_build="$(echo "$TC_BUILD_BRANCH" | grep -Eo "^custombuild-" || echo "")" +# Prepend release branch onto build_name +build_name="${release_branch}-${build_name}" + if [[ -z "${DRY_RUN}" ]] ; then bucket="${BUCKET-cockroach-builds}" google_credentials=$GOOGLE_COCKROACH_CLOUD_IMAGES_COCKROACHDB_CREDENTIALS diff --git a/build/release/teamcity-mark-build.sh b/build/release/teamcity-mark-build.sh index 25acb1da858e..1547f42da95b 100755 --- a/build/release/teamcity-mark-build.sh +++ b/build/release/teamcity-mark-build.sh @@ -4,13 +4,13 @@ source "$(dirname "${0}")/teamcity-support.sh" # mark_build marks a build with a given label specified as a parameter on # docker. For example, calling this function on the label "qualified", on a -# v19.2.4 build would tag it as `latest-v19.2-qualified-build`. +# v19.2.4 build would tag it as `latest-release-19.2-qualified-build`. mark_build() { tc_start_block "Variable Setup" build_label=$1 # On no match, `grep -Eo` returns 1. `|| echo""` makes the script not error. - release_branch="$(echo "$TC_BUILD_BRANCH" | grep -Eo "^v[0-9]+\.[0-9]+" || echo"")" + release_branch="$(echo "$TC_BUILD_BRANCH" | grep -Eo "^(release-[0-9]+\.[0-9]+)|(master)" || echo"")" if [[ -z "${DRY_RUN}" ]] ; then google_credentials=$GOOGLE_COCKROACH_CLOUD_IMAGES_COCKROACHDB_CREDENTIALS diff --git a/build/teamcity-nightly-pebble-common.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_common.sh similarity index 65% rename from build/teamcity-nightly-pebble-common.sh rename to build/teamcity/cockroach/nightlies/pebble_nightly_common.sh index 73ff5c201f2a..471153ac4310 100644 --- a/build/teamcity-nightly-pebble-common.sh +++ b/build/teamcity/cockroach/nightlies/pebble_nightly_common.sh @@ -23,25 +23,29 @@ fi artifacts=$PWD/artifacts mkdir -p "${artifacts}" chmod o+rwx "${artifacts}" - -# Disable global -json flag. -PATH=$PATH:$(GOFLAGS=; go env GOPATH)/bin -export PATH +mkdir -p "$PWD/bin" +chmod o+rwx "$PWD/bin" build_tag=$(git describe --abbrev=0 --tags --match=v[0-9]*) export build_tag # Build the roachtest binary. -make bin/roachtest +bazel build //pkg/cmd/roachtest --config ci -c opt +BAZEL_BIN=$(bazel info bazel-bin --config ci -c opt) +cp $BAZEL_BIN/pkg/cmd/roachtest/roachtest_/roachtest bin +chmod a+w bin/roachtest # Pull in the latest version of Pebble from upstream. The benchmarks run -# against the tip of the 'master' branch. -rm -fr vendor/github.com/cockroachdb/pebble -git clone https://github.com/cockroachdb/pebble vendor/github.com/cockroachdb/pebble -pushd vendor/github.com/cockroachdb/pebble -GOOS=linux go build -v -mod=vendor -o pebble.linux ./cmd/pebble -popd -mv vendor/github.com/cockroachdb/pebble/pebble.linux . +# against the tip of the 'master' branch. We do this by `go get`ting the +# latest version of the module, and then running `mirror` to update `DEPS.bzl` +# accordingly. +bazel run @go_sdk//:bin/go get github.com/cockroachdb/pebble@latest +NEW_DEPS_BZL_CONTENT=$(bazel run //pkg/cmd/mirror) +echo "$NEW_DEPS_BZL_CONTENT" > DEPS.bzl +bazel build @com_github_cockroachdb_pebble//cmd/pebble --config ci -c opt +BAZEL_BIN=$(bazel info bazel-bin --config ci -c opt) +cp $BAZEL_BIN/external/com_github_cockroachdb_pebble/cmd/pebble/pebble_/pebble ./pebble.linux +chmod a+w ./pebble.linux # Set the location of the pebble binary. This is referenced by the roachtests, # which will push this binary out to all workers in order to run the @@ -66,7 +70,10 @@ function prepare_datadir() { # Build the mkbench tool from within the Pebble repo. This is used to parse # the benchmark data. function build_mkbench() { - go build -o mkbench github.com/cockroachdb/pebble/internal/mkbench + bazel build @com_github_cockroachdb_pebble//internal/mkbench --config ci -c opt + BAZEL_BIN=$(bazel info bazel-bin --config ci -c opt) + cp $BAZEL_BIN/external/com_github_cockroachdb_pebble/internal/mkbench/mkbench_/mkbench . + chmod a+w mkbench } # Sync all other data within the ./data/ directory. The runner logs aren't of diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_write_throughput.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_write_throughput.sh new file mode 100755 index 000000000000..7085e10dd01e --- /dev/null +++ b/build/teamcity/cockroach/nightlies/pebble_nightly_write_throughput.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# +# This script runs the Pebble Nightly write-throughput benchmarks. +# +# It is run by the Pebble Nightly Write Throughput build +# configuration. + +set -eo pipefail + +dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" + +source "$dir/teamcity-support.sh" # For $root +source "$dir/teamcity-bazel-support.sh" # For run_bazel + +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e GOOGLE_EPHEMERAL_CREDENTIALS -e TC_BUILD_BRANCH -e TC_BUILD_ID" \ + run_bazel build/teamcity/cockroach/nightlies/pebble_nightly_write_throughput_impl.sh diff --git a/build/teamcity-nightly-pebble-write-throughput.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_write_throughput_impl.sh similarity index 87% rename from build/teamcity-nightly-pebble-write-throughput.sh rename to build/teamcity/cockroach/nightlies/pebble_nightly_write_throughput_impl.sh index 96245e442aac..5c7423cbb961 100755 --- a/build/teamcity-nightly-pebble-write-throughput.sh +++ b/build/teamcity/cockroach/nightlies/pebble_nightly_write_throughput_impl.sh @@ -1,9 +1,4 @@ #!/usr/bin/env bash -# -# This script runs the Pebble Nightly write-throughput benchmarks. -# -# It is run by the Pebble Nightly - AWS TeamCity build -# configuration. set -eo pipefail @@ -11,7 +6,7 @@ _dir="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" # Execute the common commands for the benchmark runs and import common # variables / constants. -. "$_dir/teamcity-nightly-pebble-common.sh" +. "$_dir/pebble_nightly_common.sh" # Run the write-throughput benchmark. # @@ -28,6 +23,7 @@ if ! timeout -s INT 12h bin/roachtest run \ --cockroach "true" \ --workload "true" \ --artifacts "$artifacts" \ + --artifacts-literal="${LITERAL_ARTIFACTS_DIR:-}" \ --parallelism 2 \ --teamcity \ --cpu-quota=384 \ @@ -49,3 +45,4 @@ aws s3 sync ./write-throughput s3://pebble-benchmarks/write-throughput sync_data_dir exit "$exit_status" + diff --git a/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb.sh new file mode 100755 index 000000000000..2e30bc16f7cf --- /dev/null +++ b/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# +# This script runs the Pebble Nightly YCSB benchmarks. +# +# It is run by the Pebble Nightly YCSB TeamCity build +# configuration. + +set -euo pipefail + +dir="$(dirname $(dirname $(dirname $(dirname "${0}"))))" + +source "$dir/teamcity-support.sh" # For $root +source "$dir/teamcity-bazel-support.sh" # For run_bazel + +BAZEL_SUPPORT_EXTRA_DOCKER_ARGS="-e LITERAL_ARTIFACTS_DIR=$root/artifacts -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e GOOGLE_EPHEMERAL_CREDENTIALS -e TC_BUILD_BRANCH -e TC_BUILD_ID" \ + run_bazel build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_impl.sh diff --git a/build/teamcity-nightly-pebble-ycsb.sh b/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_impl.sh similarity index 87% rename from build/teamcity-nightly-pebble-ycsb.sh rename to build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_impl.sh index 52e0879e2629..26f6ccfc4863 100755 --- a/build/teamcity-nightly-pebble-ycsb.sh +++ b/build/teamcity/cockroach/nightlies/pebble_nightly_ycsb_impl.sh @@ -1,16 +1,11 @@ #!/usr/bin/env bash -# -# This script runs the Pebble Nightly YCSB benchmarks. -# -# It is run by the Pebble Nightly - AWS TeamCity build -# configuration. set -eo pipefail _dir="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" # Execute the common commands for the benchmark runs. -. "$_dir/teamcity-nightly-pebble-common.sh" +. "$_dir/pebble_nightly_common.sh" # Run the YCSB benchmark. # @@ -27,6 +22,7 @@ if ! timeout -s INT $((1000*60)) bin/roachtest run \ --cockroach "true" \ --workload "true" \ --artifacts "$artifacts" \ + --artifacts-literal="${LITERAL_ARTIFACTS_DIR:-}" \ --parallelism 3 \ --teamcity \ --cpu-quota=384 \ diff --git a/cloud/kubernetes/bring-your-own-certs/client.yaml b/cloud/kubernetes/bring-your-own-certs/client.yaml index 632f7f388c6f..7a323a79e631 100644 --- a/cloud/kubernetes/bring-your-own-certs/client.yaml +++ b/cloud/kubernetes/bring-your-own-certs/client.yaml @@ -19,7 +19,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 # Keep a pod open indefinitely so kubectl exec can be used to get a shell to it # and run cockroach client commands, such as cockroach sql, cockroach node status, etc. command: diff --git a/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml b/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml index 206692c22746..8f730c0f7f8f 100644 --- a/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml @@ -152,7 +152,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/client-secure.yaml b/cloud/kubernetes/client-secure.yaml index 425526df7a87..e5a111f308cb 100644 --- a/cloud/kubernetes/client-secure.yaml +++ b/cloud/kubernetes/client-secure.yaml @@ -31,7 +31,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/cluster-init-secure.yaml b/cloud/kubernetes/cluster-init-secure.yaml index 2abf463efb37..3141cb1e5d70 100644 --- a/cloud/kubernetes/cluster-init-secure.yaml +++ b/cloud/kubernetes/cluster-init-secure.yaml @@ -33,7 +33,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/cluster-init.yaml b/cloud/kubernetes/cluster-init.yaml index 14b70f0f9fd9..d22614e27e1b 100644 --- a/cloud/kubernetes/cluster-init.yaml +++ b/cloud/kubernetes/cluster-init.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/cockroachdb-statefulset-secure.yaml index f364f3be8f80..42ec64fb3ef0 100644 --- a/cloud/kubernetes/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/cockroachdb-statefulset-secure.yaml @@ -194,7 +194,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/cockroachdb-statefulset.yaml b/cloud/kubernetes/cockroachdb-statefulset.yaml index 0ca9954b754c..06009e182cf6 100644 --- a/cloud/kubernetes/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/cockroachdb-statefulset.yaml @@ -97,7 +97,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/multiregion/client-secure.yaml b/cloud/kubernetes/multiregion/client-secure.yaml index 32eea96a3222..b60057d52341 100644 --- a/cloud/kubernetes/multiregion/client-secure.yaml +++ b/cloud/kubernetes/multiregion/client-secure.yaml @@ -8,7 +8,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/multiregion/cluster-init-secure.yaml b/cloud/kubernetes/multiregion/cluster-init-secure.yaml index 9197cb1eb26b..157d501f8990 100644 --- a/cloud/kubernetes/multiregion/cluster-init-secure.yaml +++ b/cloud/kubernetes/multiregion/cluster-init-secure.yaml @@ -10,7 +10,7 @@ spec: serviceAccountName: cockroachdb containers: - name: cluster-init - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml index b4716a229c56..0255d653733a 100644 --- a/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml @@ -166,7 +166,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml b/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml index 642d2acdb457..7fe6c4e117d2 100644 --- a/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml +++ b/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml @@ -184,7 +184,7 @@ spec: name: cockroach-env containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml b/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml index 0f38d6ac29d5..f6d4b4b18cbe 100644 --- a/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-daemonset-insecure.yaml @@ -81,7 +81,7 @@ spec: hostNetwork: true containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent # TODO: If you configured taints to give CockroachDB exclusive access to nodes, feel free # to remove the requests and limits sections. If you didn't, you'll need to change these to diff --git a/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml b/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml index 51b9a19545c9..8d69a0a571e3 100644 --- a/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-daemonset-secure.yaml @@ -197,7 +197,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent # TODO: If you configured taints to give CockroachDB exclusive access to nodes, feel free # to remove the requests and limits sections. If you didn't, you'll need to change these to diff --git a/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml b/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml index 859bd3a0a8c9..f3a642e3f1d4 100644 --- a/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml @@ -140,7 +140,7 @@ spec: - name: cockroachdb # NOTE: Always use the most recent version of CockroachDB for the best # performance and reliability. - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml index 2f8d2c2413d7..c0f03297a9ea 100644 --- a/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml @@ -231,7 +231,7 @@ spec: - name: cockroachdb # NOTE: Always use the most recent version of CockroachDB for the best # performance and reliability. - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent # TODO: Change these to appropriate values for the hardware that you're running. You can see # the resources that can be allocated on each of your Kubernetes nodes by running: diff --git a/cloud/kubernetes/v1.6/client-secure.yaml b/cloud/kubernetes/v1.6/client-secure.yaml index 425526df7a87..e5a111f308cb 100644 --- a/cloud/kubernetes/v1.6/client-secure.yaml +++ b/cloud/kubernetes/v1.6/client-secure.yaml @@ -31,7 +31,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.6/cluster-init-secure.yaml b/cloud/kubernetes/v1.6/cluster-init-secure.yaml index 2abf463efb37..3141cb1e5d70 100644 --- a/cloud/kubernetes/v1.6/cluster-init-secure.yaml +++ b/cloud/kubernetes/v1.6/cluster-init-secure.yaml @@ -33,7 +33,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.6/cluster-init.yaml b/cloud/kubernetes/v1.6/cluster-init.yaml index 14b70f0f9fd9..d22614e27e1b 100644 --- a/cloud/kubernetes/v1.6/cluster-init.yaml +++ b/cloud/kubernetes/v1.6/cluster-init.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml index 0ad368bec88d..499407231cc1 100644 --- a/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/v1.6/cockroachdb-statefulset-secure.yaml @@ -177,7 +177,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml b/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml index cb5b699d69d4..df03bd93cfca 100644 --- a/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/v1.6/cockroachdb-statefulset.yaml @@ -80,7 +80,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.7/client-secure.yaml b/cloud/kubernetes/v1.7/client-secure.yaml index 425526df7a87..e5a111f308cb 100644 --- a/cloud/kubernetes/v1.7/client-secure.yaml +++ b/cloud/kubernetes/v1.7/client-secure.yaml @@ -31,7 +31,7 @@ spec: mountPath: /cockroach-certs containers: - name: cockroachdb-client - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.7/cluster-init-secure.yaml b/cloud/kubernetes/v1.7/cluster-init-secure.yaml index 2abf463efb37..3141cb1e5d70 100644 --- a/cloud/kubernetes/v1.7/cluster-init-secure.yaml +++ b/cloud/kubernetes/v1.7/cluster-init-secure.yaml @@ -33,7 +33,7 @@ spec: mountPath: /cockroach-certs containers: - name: cluster-init - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent volumeMounts: - name: client-certs diff --git a/cloud/kubernetes/v1.7/cluster-init.yaml b/cloud/kubernetes/v1.7/cluster-init.yaml index 14b70f0f9fd9..d22614e27e1b 100644 --- a/cloud/kubernetes/v1.7/cluster-init.yaml +++ b/cloud/kubernetes/v1.7/cluster-init.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: cluster-init - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent command: - "/cockroach/cockroach" diff --git a/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml b/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml index 4e4484c77b64..3e756c516c83 100644 --- a/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml +++ b/cloud/kubernetes/v1.7/cockroachdb-statefulset-secure.yaml @@ -189,7 +189,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml b/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml index fd4a0c2ff549..67693722bbdb 100644 --- a/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml +++ b/cloud/kubernetes/v1.7/cockroachdb-statefulset.yaml @@ -92,7 +92,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: cockroachdb - image: cockroachdb/cockroach:v21.2.3 + image: cockroachdb/cockroach:v21.2.4 imagePullPolicy: IfNotPresent ports: - containerPort: 26257 diff --git a/dev b/dev index 0311e02b2ca6..9e12f8942f26 100755 --- a/dev +++ b/dev @@ -1,19 +1,21 @@ #!/usr/bin/env bash -set -uo pipefail +set -euo pipefail # Bump this counter to force rebuilding `dev` on all machines. -DEV_VERSION=2 +DEV_VERSION=4 THIS_DIR=$(cd "$(dirname "$0")" && pwd) BINARY_DIR=$THIS_DIR/bin/dev-versions BINARY_PATH=$BINARY_DIR/dev.$DEV_VERSION -if [ ! -f "$BINARY_PATH" ]; then +if [[ ! -f "$BINARY_PATH" || ! -z "${DEV_FORCE_REBUILD-}" ]]; then echo "$BINARY_PATH not found, building..." mkdir -p $BINARY_DIR bazel build //pkg/cmd/dev --config nonogo cp $(bazel info bazel-bin --config nonogo)/pkg/cmd/dev/dev_/dev $BINARY_PATH + # The Bazel-built binary won't have write permissions. + chmod a+w $BINARY_PATH fi exec $BINARY_PATH "$@" diff --git a/docs/RFCS/20211203_session_revival_token.md b/docs/RFCS/20211203_session_revival_token.md new file mode 100644 index 000000000000..80a9356d2e0a --- /dev/null +++ b/docs/RFCS/20211203_session_revival_token.md @@ -0,0 +1,168 @@ +- Feature Name: Token-Based Authentication for SQL Session Revival +- Status: draft +- Start Date: 2021-12-03 +- Authors: Rafi Shamim +- RFC PR: https://github.com/cockroachdb/cockroach/pull/74640 +- Cockroach Issue: https://github.com/cockroachdb/cockroach/issues/74643 + + +## Summary + +We are introducing a new way to authenticate and create a SQL session. This is intended to be used by SQL Proxy in order to re-authenticate a previously-established session (technically, authenticate the SQL Proxy itself) as part of the session migration project of CockroachDB Serverless. The new authentication mechanism is inspired by [JSON Web Tokens (JWTs)](https://datatracker.ietf.org/doc/html/rfc7519), so SQL Proxy will be able to ask for a token from a SQL node, and then later use it to start a new session for the user described in that token. This will allow sessions to be moved without major disruption [if a SQL node is shutting down](https://cockroachlabs.atlassian.net/browse/CC-5387) or if load can be shifted [from busy SQL nodes to lightly loaded nodes](https://cockroachlabs.atlassian.net/browse/CC-5385). + + +## Motivation + +There are two motivations for the new authentication mechanism. For CockroachDB Serverless, we want the following: + + + +* Gracefully transfer sessions to another SQL node whenever a node is shutting down. Currently, when a SQL node is scaled down, existing connections to that node will be terminated forcefully, which is a poor user experience. We want to transfer these sessions to a live SQL node so that from the user’s perspective, the connection remains usable without interruption. +* Connection load balancing between SQL nodes. Currently, when there is an increase in load, more SQL nodes are created. However, sessions that caused the increase in load will still be connected to the old nodes. We want to be able to transfer sessions from one SQL node to another for better resource utilization. + +These use cases are addressed by “session migration.” In v21.2, the groundwork was laid for session migration with the addition of the builtin functions `crdb_internal.serialize_session` and `crdb_internal.deserialize_session` ([#68792](https://github.com/cockroachdb/cockroach/pull/68792)). These functions, respectively, save the state of the current session, and restore the saved state into the current session. The challenge is that a session can only be restored if the user associated with the current session matches the user defined in the saved session state. This means that in order to restore a session, SQL Proxy must first create a new session for that user, and this new session must be created without any input from the user. + +The focus of this RFC is to revive a session that had been authenticated for a SQL client previously. Therefore, the goal here is to authenticate the SQL Proxy as operating on behalf of the client, rather than as the client. This is a more narrow scope than the general problem of token authentication for SQL client apps. We do not exclude that this RFC will inform a solution to the more general problem but we wish to not let this RFC be burdened by it. + + +### Non-goals + + + +* An authentication flow that can be leveraged to implement Oauth2 authentication in the future. + +The authors acknowledge that there are various customer requests for authentication-related features that are conceptually related to the change proposed here. However, we are not considering them in-scope here and we expect that the user-facing services will be implemented in other ways. + + +## How would we explain this to a person outside your team? + +The change proposed here makes it possible to move a SQL session from one SQL node to another securely in the CRDB-Serverless architecture. What this means is that it empowers the "SQL Proxy" component to orchestrate SQL session migration, without introducing security weaknesses for malicious users outside of the CC architecture. + +In particular, it makes it possible for the SQL proxy to respawn a SQL session on a new server for an existing SQL user account, without knowing the access credentials of that user, as long as that user had a valid session already open previously on another server. + +The design is such that it remains impossible for malicious users, or even a bug in the SQL Proxy, to spawn a valid SQL session for an arbitrary user without knowing that user's credentials. + +Note that the purpose of this design is NOT to create a new authentication method visible to end-user apps. In other words: + + + +* We do not intend to document this as a feature usable by end-users. +* The mechanisms implemented here will not have cross-version compatibility guarantees for the benefit of app developers. + + +## Technical Design + +The proposed solution to this is to allow SQL Proxy to obtain a token from the SQL node for each open SQL session before that node shuts down. This token can then be presented later, during the establishment of a connection to a different SQL node, in order to initialize a session for the same user. The token is cryptographically signed with a ed25519 signing cert, so the new SQL node can verify the signature and trust that the token originated from some other SQL node. The technical changes are in the following places. + + +#### Add a new builtin: `crdb_internal.create_session_token` + +This function has a return value of type BYTES and returns the SessionToken proto serialized to bytes. + +The SessionToken itself is a proto defined as follows. + + +``` +message SessionToken { + // Payload is defined as a separate type so that it's easier to sign. + message Payload { + // The SQL user who can use this token to authenticate. + string user = 1; + // The algorithm used to sign the payload. Can be either Ed25519 or RSA. + string algorithm = 2 + // The time that this token is no longer considered valid. + // Default is 10 minutes from now. + google.protobuf.Timestamp expires_at = 3; + // The time that this token was created. + google.protobuf.Timestamp issued_at = 4; + } + + Payload payload = 1; + // The signature of the payload, signed using the new signing key. + bytes signature = 2; +} +``` + + +One may notice that the structure of the token is similar to a [JSON Web Token](https://datatracker.ietf.org/doc/html/rfc7519#section-3.1), with some differences. For example, we use protocol buffers so that new fields can easily be added and remain backwards compatible, and we do not base64-encode the result. + + +#### Update pgwire code to look out for the new token + +We will add a CockroachDB-specific “StartupMessage” in the [pgwire/server.go](https://github.com/cockroachdb/cockroach/blob/b4ab627436afd8d9ed23330cd4026aa435b5b65d/pkg/sql/pgwire/server.go#L731) code. We’ll name it **session_revival_token**. + +Before reaching the password authentication code, the server will see the token, and if it’s valid, will bypass the regular session initialization and [authentication code](https://github.com/cockroachdb/cockroach/blob/86383ca90f7e0604773496c03e35850d9089b394/pkg/sql/pgwire/conn.go#L645). + +To validate the token, the bytes are unmarshalled into a SessionToken struct. The SessionToken is considered valid if: (1) the signature in the token is the correct signature for the payload (verified by using the public key of the signing cert), (2) the user in the payload matches the user field in the session parameters, and (3) the expires_at time has not yet passed. If the token is valid, then the authentication continues as if a correct password were presented. These three checks are because: (1) the signature prevents forgery of a token, (2) checking the user matches is a light bit of defensive programming to make sure SQL Proxy uses the correct token for a user, and (3) enforcing an expiration time prevents the creation of a token by brute-force. + + +#### Deploy new signing key + + +In order to sign the token, we will introduce a new signing key (either [Ed25519](https://pkg.go.dev/crypto/ed25519) or RSA depending on what the Intrusion team decides) that is unique per tenant, and is shared by all SQL nodes of the same tenant. The advantage of Ed25519 is that it is fast and generates small signatures. The signing key will be generated by the Intrusion team. Unlike the existing tenant-client cert, this new signing cert doesn't require any DNS or IP information baked into the certificate and the certificate can be self-signed. This makes it easier to rotate if a tenant pod becomes compromised. When creating a tenant SQL node, Intrusion will need to put the key into the `certs-dir` of the SQL node (similar to how the tenant certs are added now). The cert will be named `tenant-signing..key`. The `crdb_internal.create_session_token` function will fail if this key is not present, and the token creation will be able to detect the algorithm used by the cert and call the according signature methods. A `cockroach mt cert create-tenant-signing` command will be added for testing purposes. + + +#### Update SQL Proxy to block the new StartupMessage + +It should remain impossible for external client apps to revive SQL sessions using this new protocol. In other words, this feature should be restricted for use by SQL Proxy. + +To achieve this, we will change the SQL proxy code to block the new status parameter if it is provided by a SQL client app when establishing a new connection. + +If we made this particular design user-facing, a malicious user could take one token for a valid session then flood our infrastructure with thousands of sessions opened with that token in a very short time. We have designed passwords and TLS authn to ensure this flood is impossible and it is important to not re-introduce it inadvertently by making this new mechanism user-facing. + + +#### Configuration in the sqlServer + +Since the mechanism should only be available in CRDB-Serverless deployments, we will make it configurable so that it is not enabled by default on dedicated or self-hosted clusters. A new field will be added to `sqlServerArgs` named **sessionRevivalTokenEnabled** which defaults to false, and it will control if the SQL node looks for the session_revival_token parameter. The field will be set to true only in the server.StartTenant function, so the functionality only is enabled in multitenant clusters. + + +### Drawbacks + +One drawback of this design is that it requires the token to be passed in as a CockroachDB-specific StartupMessage. This means that 3rd party drivers won’t be able to use this flow unless we fork them. Currently, that is not a use case we need to support, but if it does become one later, we can add in other ways for the SQL server to receive the token. + +Another drawback is that it needs to handle the case where the signing key is rotated or revoked. In the case of rotation, we can introduce a grace period so that multiple signing keys are present for a 10-minute grace period. Every signature verification attempt needs to first try the new key, and then try the old key if that fails. If the signing key is **revoked**, then all tokens signed with that key will become invalid. + + +### Rationale and Alternatives + +**Separate pgwire message type** + +An alternative design is to introduce an entirely new message type in our pgwire protocol implementation that contains the token. However, this is a more fundamental divergence from the standard protocol. This would be a risky change, since pgwire messages only use a single byte for the message type identifier. It would be quite possible for a future version of Postgres to introduce a new message type that clashes with our custom message type. + +**Coupling session deserialization to token-based authentication** + +During the discussion of the design, the question arose if the authentication token should also include the serialized session state from `crdb_internal.serialize_session()`. Then, the only way to authenticate with the token would also require that a previous session be restored. This RFC proposes to **decouple** authentication from session restoration. The reason is that we want to use smaller primitives which can later be used as building blocks for other features. For example, allowing SQL Proxy to make a new session might help with a possible future use case that adds connection pooling to SQL Proxy. + +**Setup an external auth broker** + +If we were to build Oauth2 support into CRDB, we would be able to leverage an external auth service. + +The downsides to this option would be that our internal teams would need to configure and maintain an external auth system, as well as building support for dynamically updating the users database for that auth system. + +**Make SQL Proxy generate the token** + +We could add the new signing cert to SQL Proxy instead, and have it be in charge of making the token. Essentially, this would change the proposal to something more like “allow a user to login just because SQL Proxy says it can.” This would mean that the SQL node needs to trust that SQL Proxy would only do this if the user has already authenticated in some other way before. The tokens could not be created outside of the CRDB-Serverless product (e.g. in self-hosted clusters.) However, it also means that a compromised SQL Proxy instance could be used to generate any number of tokens, and we would have no way to audit the impact of such an attack. Because of these risks, we aren’t implementing this solution. + + +### Possible future extensions + +**Long-lived tokens** + +Currently, we only require short-lived tokens. If in the future, we need to have longer-lived tokens, the SQL Proxy could be updated to append additional expiry periods to the token (and sign them), in such a way that would be trusted by the SQL node when the session is resumed. + +**One-time-use tokens** + +In this design, a token can be used any number of times to authenticate a new session. It might be useful as a form of defensive programming to only allow each token to be used once, so that SQL Proxy doesn’t accidentally misuse them. If we decide later that this would be useful, the token could be updated to include a session identifier, and the token validation logic could verify that there are no active sessions with that ID. This would increase the latency of validating a session. + +**Combine session token and deserialize_session** + +As described in the motivation, sessions that are created using this token, will then use the deserialize_session builtin to restore state from a previously serialized session. This RFC keeps these two steps separate, but in the future, we may want to consolidate them if that leads to a reduction in code complexity. + +**Add a tenant read-only cluster setting for opting into session revival** + +Once the [multitenant cluster settings proposal](https://github.com/cockroachdb/cockroach/pull/73349) is completed, we should add a tenant read-only cluster setting that allows users to opt into this new behavior. This will be needed once the multitenant architecture is rolled out to self-hosted/dedicated clusters so that the new authentication mechanism is not enabled on these clusters unintentionally. + + +## Unresolved questions + +* N/A diff --git a/docs/generated/http/BUILD.bazel b/docs/generated/http/BUILD.bazel index 4033bf4ca531..266c86d6a6f9 100644 --- a/docs/generated/http/BUILD.bazel +++ b/docs/generated/http/BUILD.bazel @@ -27,6 +27,7 @@ genrule( "//pkg/server/diagnostics/diagnosticspb:diagnosticspb_proto", "//pkg/server/serverpb:serverpb_proto", "//pkg/server/status/statuspb:statuspb_proto", + "//pkg/sql/catalog/catpb:catpb_proto", "//pkg/sql/catalog/descpb:descpb_proto", "//pkg/sql/schemachanger/scpb:scpb_proto", "//pkg/sql/types:types_proto", diff --git a/docs/generated/redact_safe.md b/docs/generated/redact_safe.md index a26f778b151a..a4116f54802d 100644 --- a/docs/generated/redact_safe.md +++ b/docs/generated/redact_safe.md @@ -24,20 +24,19 @@ pkg/roachpb/metadata.go | `StoreID` pkg/roachpb/method.go | `Method` pkg/roachpb/tenant.go | `TenantID` pkg/rpc/connection_class.go | `ConnectionClass` -pkg/sql/catalog/descpb/structured.go | `ColumnID` +pkg/sql/catalog/catpb/constraint.go | `ForeignKeyAction` pkg/sql/catalog/descpb/structured.go | `ConstraintType` pkg/sql/catalog/descpb/structured.go | `ConstraintValidity` pkg/sql/catalog/descpb/structured.go | `DescriptorMutation_Direction` pkg/sql/catalog/descpb/structured.go | `DescriptorMutation_State` pkg/sql/catalog/descpb/structured.go | `DescriptorState` pkg/sql/catalog/descpb/structured.go | `DescriptorVersion` -pkg/sql/catalog/descpb/structured.go | `FamilyID` -pkg/sql/catalog/descpb/structured.go | `ID` pkg/sql/catalog/descpb/structured.go | `IndexDescriptorVersion` -pkg/sql/catalog/descpb/structured.go | `IndexID` pkg/sql/catalog/descpb/structured.go | `MutationID` -pkg/sql/sem/tree/table_ref.go | `ColumnID` -pkg/sql/sem/tree/table_ref.go | `ID` +pkg/sql/sem/catid/ids.go | `ColumnID` +pkg/sql/sem/catid/ids.go | `DescID` +pkg/sql/sem/catid/ids.go | `FamilyID` +pkg/sql/sem/catid/ids.go | `IndexID` pkg/sql/sqlliveness/sqlliveness.go | `SessionID` pkg/storage/enginepb/mvcc.go | `TxnEpoch` pkg/storage/enginepb/mvcc.go | `TxnSeq` diff --git a/docs/generated/settings/settings-for-tenants.txt b/docs/generated/settings/settings-for-tenants.txt index d234018b5715..80c3d3353ff2 100644 --- a/docs/generated/settings/settings-for-tenants.txt +++ b/docs/generated/settings/settings-for-tenants.txt @@ -68,6 +68,8 @@ server.shutdown.drain_wait duration 0s the amount of time a server waits in an u server.shutdown.lease_transfer_wait duration 5s the amount of time a server waits to transfer range leases before proceeding with the rest of the shutdown process (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting) server.shutdown.query_wait duration 10s the server will wait for at least this amount of time for active queries to finish (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting) server.time_until_store_dead duration 5m0s the time after which if there is no new gossiped information about a store, it is considered dead +server.user_login.min_password_length integer 1 the minimum length accepted for passwords set in cleartext via SQL. Note that a value lower than 1 is ignored: passwords cannot be empty in any case. +server.user_login.password_hashes.default_cost.crdb_bcrypt integer 10 the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method crdb-bcrypt (allowed range: 4-31) server.user_login.store_client_pre_hashed_passwords.enabled boolean true whether the server accepts to store passwords pre-hashed by clients server.user_login.timeout duration 10s timeout after which client authentication times out if some system range is unavailable (0 = no timeout) server.web_session.auto_logout.timeout duration 168h0m0s the duration that web sessions will survive before being periodically purged, since they were last used diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html index 24bd4ec3fa4d..c000c4280f56 100644 --- a/docs/generated/settings/settings.html +++ b/docs/generated/settings/settings.html @@ -40,6 +40,7 @@ kv.range_split.by_load_enabledbooleantrueallow automatic splits of ranges based on where load is concentrated kv.range_split.load_qps_thresholdinteger2500the QPS over which, the range becomes a candidate for load based splitting kv.rangefeed.enabledbooleanfalseif set, rangefeed registration is enabled +kv.replica_circuit_breaker.slow_replication_thresholdduration0sduration after which slow proposals trip the per-Replica circuit breaker (zero duration disables breakers) kv.replication_reports.intervalduration1m0sthe frequency for generating the replication_constraint_stats, replication_stats_report and replication_critical_localities reports (set to 0 to disable) kv.snapshot_rebalance.max_ratebyte size32 MiBthe rate limit (bytes/sec) to use for rebalance and upreplication snapshots kv.snapshot_recovery.max_ratebyte size32 MiBthe rate limit (bytes/sec) to use for recovery snapshots @@ -74,6 +75,8 @@ server.shutdown.lease_transfer_waitduration5sthe amount of time a server waits to transfer range leases before proceeding with the rest of the shutdown process (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting) server.shutdown.query_waitduration10sthe server will wait for at least this amount of time for active queries to finish (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting) server.time_until_store_deadduration5m0sthe time after which if there is no new gossiped information about a store, it is considered dead +server.user_login.min_password_lengthinteger1the minimum length accepted for passwords set in cleartext via SQL. Note that a value lower than 1 is ignored: passwords cannot be empty in any case. +server.user_login.password_hashes.default_cost.crdb_bcryptinteger10the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method crdb-bcrypt (allowed range: 4-31) server.user_login.store_client_pre_hashed_passwords.enabledbooleantruewhether the server accepts to store passwords pre-hashed by clients server.user_login.timeoutduration10stimeout after which client authentication times out if some system range is unavailable (0 = no timeout) server.web_session.auto_logout.timeoutduration168h0m0sthe duration that web sessions will survive before being periodically purged, since they were last used diff --git a/docs/generated/sql/bnf/create_view_stmt.bnf b/docs/generated/sql/bnf/create_view_stmt.bnf index 682e368d72e4..472838a995f6 100644 --- a/docs/generated/sql/bnf/create_view_stmt.bnf +++ b/docs/generated/sql/bnf/create_view_stmt.bnf @@ -5,7 +5,7 @@ create_view_stmt ::= | 'CREATE' 'OR' 'REPLACE' opt_temp 'VIEW' view_name 'AS' select_stmt | 'CREATE' opt_temp 'VIEW' 'IF' 'NOT' 'EXISTS' view_name '(' name_list ')' 'AS' select_stmt | 'CREATE' opt_temp 'VIEW' 'IF' 'NOT' 'EXISTS' view_name 'AS' select_stmt - | 'CREATE' 'MATERIALIZED' 'VIEW' view_name '(' name_list ')' 'AS' select_stmt - | 'CREATE' 'MATERIALIZED' 'VIEW' view_name 'AS' select_stmt - | 'CREATE' 'MATERIALIZED' 'VIEW' 'IF' 'NOT' 'EXISTS' view_name '(' name_list ')' 'AS' select_stmt - | 'CREATE' 'MATERIALIZED' 'VIEW' 'IF' 'NOT' 'EXISTS' view_name 'AS' select_stmt + | 'CREATE' 'MATERIALIZED' 'VIEW' view_name '(' name_list ')' 'AS' select_stmt opt_with_data + | 'CREATE' 'MATERIALIZED' 'VIEW' view_name 'AS' select_stmt opt_with_data + | 'CREATE' 'MATERIALIZED' 'VIEW' 'IF' 'NOT' 'EXISTS' view_name '(' name_list ')' 'AS' select_stmt opt_with_data + | 'CREATE' 'MATERIALIZED' 'VIEW' 'IF' 'NOT' 'EXISTS' view_name 'AS' select_stmt opt_with_data diff --git a/docs/generated/sql/bnf/on_conflict.bnf b/docs/generated/sql/bnf/on_conflict.bnf index 36773d334c82..b9ef21fe3c97 100644 --- a/docs/generated/sql/bnf/on_conflict.bnf +++ b/docs/generated/sql/bnf/on_conflict.bnf @@ -2,3 +2,5 @@ on_conflict ::= 'ON' 'CONFLICT' 'DO' 'NOTHING' | 'ON' 'CONFLICT' '(' ( ( name ) ( ( ',' name ) )* ) ')' 'DO' 'NOTHING' | 'ON' 'CONFLICT' '(' ( ( name ) ( ( ',' name ) )* ) ')' 'DO' 'UPDATE' 'SET' ( ( ( ( column_name '=' a_expr ) | ( '(' ( ( ( column_name ) ) ( ( ',' ( column_name ) ) )* ) ')' '=' ( '(' select_stmt ')' | ( '(' ')' | '(' ( a_expr | a_expr ',' | a_expr ',' ( ( a_expr ) ( ( ',' a_expr ) )* ) ) ')' ) ) ) ) ) ( ( ',' ( ( column_name '=' a_expr ) | ( '(' ( ( ( column_name ) ) ( ( ',' ( column_name ) ) )* ) ')' '=' ( '(' select_stmt ')' | ( '(' ')' | '(' ( a_expr | a_expr ',' | a_expr ',' ( ( a_expr ) ( ( ',' a_expr ) )* ) ) ')' ) ) ) ) ) )* ) + | 'ON' 'CONFLICT' 'ON' 'CONSTRAINT' constraint_name 'DO' 'NOTHING' + | 'ON' 'CONFLICT' 'ON' 'CONSTRAINT' constraint_name 'DO' 'UPDATE' 'SET' ( ( ( ( column_name '=' a_expr ) | ( '(' ( ( ( column_name ) ) ( ( ',' ( column_name ) ) )* ) ')' '=' ( '(' select_stmt ')' | ( '(' ')' | '(' ( a_expr | a_expr ',' | a_expr ',' ( ( a_expr ) ( ( ',' a_expr ) )* ) ) ')' ) ) ) ) ) ( ( ',' ( ( column_name '=' a_expr ) | ( '(' ( ( ( column_name ) ) ( ( ',' ( column_name ) ) )* ) ')' '=' ( '(' select_stmt ')' | ( '(' ')' | '(' ( a_expr | a_expr ',' | a_expr ',' ( ( a_expr ) ( ( ',' a_expr ) )* ) ) ')' ) ) ) ) ) )* ) diff --git a/docs/generated/sql/bnf/stmt_block.bnf b/docs/generated/sql/bnf/stmt_block.bnf index 3dbc25a06120..e5dd5f0c6f4d 100644 --- a/docs/generated/sql/bnf/stmt_block.bnf +++ b/docs/generated/sql/bnf/stmt_block.bnf @@ -559,6 +559,8 @@ on_conflict ::= 'ON' 'CONFLICT' 'DO' 'NOTHING' | 'ON' 'CONFLICT' '(' name_list ')' opt_where_clause 'DO' 'NOTHING' | 'ON' 'CONFLICT' '(' name_list ')' opt_where_clause 'DO' 'UPDATE' 'SET' set_clause_list opt_where_clause + | 'ON' 'CONFLICT' 'ON' 'CONSTRAINT' constraint_name 'DO' 'NOTHING' + | 'ON' 'CONFLICT' 'ON' 'CONSTRAINT' constraint_name 'DO' 'UPDATE' 'SET' set_clause_list opt_where_clause pause_jobs_stmt ::= 'PAUSE' 'JOB' a_expr @@ -1040,6 +1042,7 @@ unreserved_keyword ::= | 'NOMODIFYCLUSTERSETTING' | 'NONVOTERS' | 'NOVIEWACTIVITY' + | 'NOVIEWACTIVITYREDACTED' | 'NOWAIT' | 'NULLS' | 'IGNORE_FOREIGN_KEYS' @@ -1198,6 +1201,7 @@ unreserved_keyword ::= | 'VARYING' | 'VIEW' | 'VIEWACTIVITY' + | 'VIEWACTIVITYREDACTED' | 'VISIBLE' | 'VOTERS' | 'WITHIN' @@ -1440,8 +1444,8 @@ create_view_stmt ::= 'CREATE' opt_temp 'VIEW' view_name opt_column_list 'AS' select_stmt | 'CREATE' 'OR' 'REPLACE' opt_temp 'VIEW' view_name opt_column_list 'AS' select_stmt | 'CREATE' opt_temp 'VIEW' 'IF' 'NOT' 'EXISTS' view_name opt_column_list 'AS' select_stmt - | 'CREATE' 'MATERIALIZED' 'VIEW' view_name opt_column_list 'AS' select_stmt - | 'CREATE' 'MATERIALIZED' 'VIEW' 'IF' 'NOT' 'EXISTS' view_name opt_column_list 'AS' select_stmt + | 'CREATE' 'MATERIALIZED' 'VIEW' view_name opt_column_list 'AS' select_stmt opt_with_data + | 'CREATE' 'MATERIALIZED' 'VIEW' 'IF' 'NOT' 'EXISTS' view_name opt_column_list 'AS' select_stmt opt_with_data create_sequence_stmt ::= 'CREATE' opt_temp 'SEQUENCE' sequence_name opt_sequence_option_list @@ -2025,6 +2029,10 @@ opt_temp ::= | 'TEMP' | +opt_with_data ::= + 'WITH' 'DATA' + | + sequence_name ::= db_object_name @@ -2358,6 +2366,8 @@ role_option ::= | 'NOCREATELOGIN' | 'VIEWACTIVITY' | 'NOVIEWACTIVITY' + | 'VIEWACTIVITYREDACTED' + | 'NOVIEWACTIVITYREDACTED' | 'CANCELQUERY' | 'NOCANCELQUERY' | 'MODIFYCLUSTERSETTING' diff --git a/docs/generated/sql/functions.md b/docs/generated/sql/functions.md index 0ff0092e850b..9f87f05199b5 100644 --- a/docs/generated/sql/functions.md +++ b/docs/generated/sql/functions.md @@ -2534,7 +2534,9 @@ The swap_ordinate_string parameter is a 2-character string naming the ordinates crdb_internal.complete_stream_ingestion_job(job_id: int, cutover_ts: timestamptz) → int

This function can be used to signal a running stream ingestion job to complete. The job will eventually stop ingesting, revert to the specified timestamp and leave the cluster in a consistent state. The specified timestamp can only be specified up to the microsecond. This function does not wait for the job to reach a terminal state, but instead returns the job id as soon as it has signaled the job to complete. This builtin can be used in conjunction with SHOW JOBS WHEN COMPLETE to ensure that the job has left the cluster in a consistent state.

-crdb_internal.replication_stream_progress(stream_id: int, frontier_ts: string) → string

This function can be used on the consumer side to heartbeat its replication progress to a replication stream in the source cluster. The returns a StreamReplicationStatus message that indicates stream status (RUNNING, PAUSED, or STOPPED).

+crdb_internal.replication_stream_progress(stream_id: int, frontier_ts: string) → bytes

This function can be used on the consumer side to heartbeat its replication progress to a replication stream in the source cluster. The returns a StreamReplicationStatus message that indicates stream status (RUNNING, PAUSED, or STOPPED).

+
+crdb_internal.replication_stream_spec(stream_id: int) → bytes

This function can be used on the consumer side to get a replication stream specification for the specified stream starting from the specified ‘start_from’ timestamp. The consumer will later call ‘stream_partition’ to a partition with the spec to start streaming.

crdb_internal.start_replication_stream(tenant_id: int) → int

This function can be used on the producer side to start a replication stream for the specified tenant. The returned stream ID uniquely identifies created stream. The caller must periodically invoke crdb_internal.heartbeat_stream() function to notify that the replication is still ongoing.

@@ -2673,6 +2675,12 @@ The output can be used to recreate a database.’

parse_timetz(val: string) → timetz

Parses a timetz assuming the date (if any) is in MDY format.

+prettify_statement(statement: string, line_width: int, align_mode: int, case_mode: int) → string

Prettifies a statement using a user-configured pretty-printing config. +Align mode values range from 0 - 3, representing no, partial, full, and extra alignment respectively. +Case mode values range between 0 - 1, representing lower casing and upper casing respectively.

+
+prettify_statement(val: string) → string

Prettifies a statement using a the default pretty-printing config.

+
quote_ident(val: string) → string

Return val suitably quoted to serve as identifier in a SQL statement.

quote_literal(val: string) → string

Return val suitably quoted to serve as string literal in a SQL statement.

diff --git a/go.mod b/go.mod index ff19e57a02ab..fccee4a54c01 100644 --- a/go.mod +++ b/go.mod @@ -32,17 +32,17 @@ require ( github.com/bufbuild/buf v0.56.0 github.com/cenkalti/backoff v2.2.1+incompatible github.com/client9/misspell v0.3.4 - github.com/cockroachdb/apd/v2 v2.0.2 + github.com/cockroachdb/apd/v3 v3.0.0 github.com/cockroachdb/circuitbreaker v2.2.2-0.20190114160014-a614b14ccf63+incompatible github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292 github.com/cockroachdb/cockroach-go/v2 v2.1.1 github.com/cockroachdb/crlfmt v0.0.0-20210128092314-b3eff0b87c79 github.com/cockroachdb/datadriven v1.0.1-0.20211007161720-b558070c3be0 - github.com/cockroachdb/errors v1.8.5 + github.com/cockroachdb/errors v1.8.6 github.com/cockroachdb/go-test-teamcity v0.0.0-20191211140407-cff980ad0a55 github.com/cockroachdb/gostdlib v1.13.0 github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f - github.com/cockroachdb/pebble v0.0.0-20220107174839-c75a2e96a7e8 + github.com/cockroachdb/pebble v0.0.0-20220112164547-3d0ff924d13a github.com/cockroachdb/redact v1.1.3 github.com/cockroachdb/returncheck v0.0.0-20200612231554-92cdbca611dd github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 @@ -61,7 +61,8 @@ require ( github.com/elastic/gosigar v0.14.1 github.com/emicklei/dot v0.15.0 github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a - github.com/fraugster/parquet-go v0.4.0 + github.com/fraugster/parquet-go v0.6.1 + github.com/fsnotify/fsnotify v1.5.1 github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 github.com/go-sql-driver/mysql v1.6.0 github.com/go-swagger/go-swagger v0.26.1 @@ -77,7 +78,6 @@ require ( github.com/google/go-github v17.0.0+incompatible github.com/google/pprof v0.0.0-20210827144239-02619b876842 github.com/google/skylark v0.0.0-20181101142754-a5f7082aabed - github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 github.com/gorilla/mux v1.8.0 github.com/goware/modvendor v0.5.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 @@ -120,6 +120,7 @@ require ( github.com/prometheus/prometheus v1.8.2-0.20210914090109-37468d88dce8 github.com/pseudomuto/protoc-gen-doc v1.3.2 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 + github.com/robfig/cron/v3 v3.0.1 github.com/sasha-s/go-deadlock v0.3.1 github.com/shirou/gopsutil/v3 v3.21.12 github.com/slack-go/slack v0.9.5 @@ -138,18 +139,18 @@ require ( go.opentelemetry.io/otel/exporters/zipkin v1.0.0-RC3 go.opentelemetry.io/otel/sdk v1.0.0-RC3 go.opentelemetry.io/otel/trace v1.0.0-RC3 - golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 - golang.org/x/exp v0.0.0-20210514180818-737f94c0881e + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 + golang.org/x/exp v0.0.0-20220104160115-025e73f80486 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 - golang.org/x/net v0.0.0-20210913180222-943fd674d43e + golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20211210111614-af8b64212486 + golang.org/x/sys v0.0.0-20220111092808-5a964db01320 golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b golang.org/x/text v0.3.7 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac - golang.org/x/tools v0.1.7 + golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 google.golang.org/api v0.63.0 google.golang.org/genproto v0.0.0-20211222154725-9823f7ba7562 google.golang.org/grpc v1.43.0 @@ -160,8 +161,6 @@ require ( vitess.io/vitess v0.0.0-00010101000000-000000000000 ) -require github.com/fsnotify/fsnotify v1.5.1 - require ( cloud.google.com/go v0.99.0 // indirect cloud.google.com/go/kms v1.1.0 // indirect @@ -174,7 +173,7 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/DataDog/zstd v1.4.8 // indirect + github.com/DataDog/zstd v1.5.0 // indirect github.com/Masterminds/goutils v1.1.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect @@ -183,7 +182,7 @@ require ( github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/alexbrainman/sspi v0.0.0-20180613141037-e580b900e9f5 // indirect github.com/andybalholm/cascadia v1.2.0 // indirect - github.com/apache/thrift v0.13.0 // indirect + github.com/apache/thrift v0.15.0 // indirect github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect github.com/aws/aws-sdk-go-v2/credentials v1.4.2 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1 // indirect @@ -264,7 +263,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.11 // indirect - github.com/klauspost/compress v1.13.5 // indirect + github.com/klauspost/compress v1.14.1 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.5 // indirect @@ -319,7 +318,7 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/mod v0.5.1 // indirect + golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/grpc/examples v0.0.0-20210324172016-702608ffae4d // indirect diff --git a/go.sum b/go.sum index 5d630eac0571..f13d5cfa9d6f 100644 --- a/go.sum +++ b/go.sum @@ -148,8 +148,8 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3 github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.4.8 h1:Rpmta4xZ/MgZnriKNd24iZMhGpP5dvUcs/uqfBapKZY= -github.com/DataDog/zstd v1.4.8/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= +github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= @@ -246,8 +246,9 @@ github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01 h1:FSqtT0UCk github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.0.0-20151001171628-53dd39833a08/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.15.0 h1:aGvdaR0v1t9XLgjtBYwxcBvBOTMqClzwE26CHOgjW1Y= +github.com/apache/thrift v0.15.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -386,8 +387,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= -github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= +github.com/cockroachdb/apd/v3 v3.0.0 h1:7uckTv9DQFr2/kh9tTEc2PTrPr85n9T+MWX4p5pVS/Y= +github.com/cockroachdb/apd/v3 v3.0.0/go.mod h1:6qgPBMXjATAdD/VefbRP9NoSLKjbB4LCoA7gN4LpHs4= github.com/cockroachdb/circuitbreaker v2.2.2-0.20190114160014-a614b14ccf63+incompatible h1:u3uQ4oAKM5g2eODBAsDdDSrTs7zRWXtvu+nvSDA9098= github.com/cockroachdb/circuitbreaker v2.2.2-0.20190114160014-a614b14ccf63+incompatible/go.mod h1:v3T8+rm/HmCL0D1BwDcGaHHAQDuFPW7EsnYs2nBRqUo= github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292 h1:dzj1/xcivGjNPwwifh/dWTczkwcuqsXXFHY1X/TZMtw= @@ -404,8 +405,8 @@ github.com/cockroachdb/datadriven v1.0.1-0.20211007161720-b558070c3be0/go.mod h1 github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= -github.com/cockroachdb/errors v1.8.5 h1:+nC/m4InLWfTRZJqB9r7KdJpTGxA7kDn5FUzXzd02Qw= -github.com/cockroachdb/errors v1.8.5/go.mod h1:GV8u+kb+Pb23w4FneavC3BBo/8XbnWqcCGhHKi3foME= +github.com/cockroachdb/errors v1.8.6 h1:Am9evxl/po3RzpokemQvq7S7Cd0mxv24xy0B/trlQF4= +github.com/cockroachdb/errors v1.8.6/go.mod h1:hOm5fabihW+xEyY1kuypGwqT+Vt7rafg04ytBtIpeIQ= github.com/cockroachdb/go-test-teamcity v0.0.0-20191211140407-cff980ad0a55 h1:YqzBA7tf8Gv8Oz0BbBsPenqkyjiohS7EUIwi7p1QJCU= github.com/cockroachdb/go-test-teamcity v0.0.0-20191211140407-cff980ad0a55/go.mod h1:QqVqNIiRhLqJXif5C9wbM4JydBhrAF2WDMxkv5xkyxQ= github.com/cockroachdb/gostdlib v1.13.0 h1:TzSEPYgkKDNei3gbLc0rrHu4iHyBp7/+NxPOFmcXGaw= @@ -415,10 +416,10 @@ github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f h1:6jduT9Hfc0n github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/panicparse/v2 v2.0.0-20211103220158-604c82a44f1e h1:FrERdkPlRj+v7fc+PGpey3GUiDGuTR5CsmLCA54YJ8I= github.com/cockroachdb/panicparse/v2 v2.0.0-20211103220158-604c82a44f1e/go.mod h1:pMxsKyCewnV3xPaFvvT9NfwvDTcIx2Xqg0qL5Gq0SjM= -github.com/cockroachdb/pebble v0.0.0-20220107174839-c75a2e96a7e8 h1:LnM9wPinzvGmAXZ/O8eOGXBaMESPVtMrOnmUcJpBgxI= -github.com/cockroachdb/pebble v0.0.0-20220107174839-c75a2e96a7e8/go.mod h1:buxOO9GBtOcq1DiXDpIPYrmxY020K2A8lOrwno5FetU= +github.com/cockroachdb/pebble v0.0.0-20220112164547-3d0ff924d13a h1:ZhbIEzddawjcYhWIiOLLePZawe1PT0VlIFVsJiVakB8= +github.com/cockroachdb/pebble v0.0.0-20220112164547-3d0ff924d13a/go.mod h1:buxOO9GBtOcq1DiXDpIPYrmxY020K2A8lOrwno5FetU= github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/cockroachdb/redact v1.1.0/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/redact v1.1.1/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/returncheck v0.0.0-20200612231554-92cdbca611dd h1:KFOt5I9nEKZgCnOSmy8r4Oykh8BYQO8bFOTgHDS8YZA= @@ -684,8 +685,8 @@ github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2 github.com/frankban/quicktest v1.7.3/go.mod h1:V1d2J5pfxYH6EjBAgSK7YNXcXlTWxUHdE1sVDXkjnig= github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/fraugster/parquet-go v0.4.0 h1:1VjhmRJTlHR2vM3qXiPjsYbTYEtwIxmQZZ7AvVKAcQQ= -github.com/fraugster/parquet-go v0.4.0/go.mod h1:qIL8Wm6AK06QHCj9OBFW6PyS+7ukZxc20K/acSeGUas= +github.com/fraugster/parquet-go v0.6.1 h1:Kks9Ibly3ZZQPRPGkS1lVmrwndBp8PxamBnDFG5jvEM= +github.com/fraugster/parquet-go v0.6.1/go.mod h1:1HGhXzpHv7CULzknVNWIY0Ihn2O3qNbD1p+aQvHWhqo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= @@ -1064,8 +1065,6 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg= github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= @@ -1360,8 +1359,9 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.1 h1:hLQYb23E8/fO+1u53d02A97a8UnsddcvYzq4ERRU4ds= +github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -1780,6 +1780,8 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5X github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -2008,7 +2010,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= @@ -2054,6 +2056,7 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.0.0-RC3 h1:kvwiyEkiUT/JaadXzVLI/R1wDO934A7r3Bs2wEe6wqA= go.opentelemetry.io/otel v1.0.0-RC3/go.mod h1:Ka5j3ua8tZs4Rkq4Ex3hwgBgOchyPVq5S6P2lz//nKQ= go.opentelemetry.io/otel/exporters/jaeger v1.0.0-RC3 h1:pKXuRvOc+5NgM0vv05PVIUetreuM57mcC6QQAKkcqZA= @@ -2065,8 +2068,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC3 h1:F3 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC3/go.mod h1:9JCUOSptzVKaFbxGVO+Wa1P8fDpA5QGVTuIzL/PKSrk= go.opentelemetry.io/otel/exporters/zipkin v1.0.0-RC3 h1:NcjZQwPFW0V2OvasQQJWSOTGA6R+4SLLgcPGIiDC3fk= go.opentelemetry.io/otel/exporters/zipkin v1.0.0-RC3/go.mod h1:3ASq7NNNoDb2P6dIBEBuurzogYO3Jo+wfbeonxBc11Y= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.0.0-RC3 h1:iRMkET+EmJUn5mW0hJzygBraXRmrUwzbOtNvTCh/oKs= go.opentelemetry.io/otel/sdk v1.0.0-RC3/go.mod h1:78H6hyg2fka0NYT9fqGuFLvly2yCxiBXDJAgLKo/2Us= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v1.0.0-RC3 h1:9F0ayEvlxv8BmNmPbU005WK7hC+7KbOazCPZjNa1yME= go.opentelemetry.io/otel/trace v1.0.0-RC3/go.mod h1:VUt2TUYd8S2/ZRX09ZDFZQwn2RqfMB5MzO17jBojGxo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -2148,8 +2155,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2166,8 +2173,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20210514180818-737f94c0881e h1:VqVU3dsTLGDa5pW74b+xG1lvKltt4EZIUrFPeKOqV2s= -golang.org/x/exp v0.0.0-20210514180818-737f94c0881e/go.mod h1:MSdmUWF4ZWBPSUbgUX/gaau5kvnbkSs9pgtY6B9JXDE= +golang.org/x/exp v0.0.0-20220104160115-025e73f80486 h1:gpEOK9kxNqVPOaZayQV2bzetZplXWakHeirk1bXKu2s= +golang.org/x/exp v0.0.0-20220104160115-025e73f80486/go.mod h1:b9TAUYHmRtqA6klRHApnXMnj+OyLce4yF5cZCUbk2ps= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2198,8 +2205,9 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 h1:LQmS1nU0twXLA96Kt7U9qtHJEbBk3z6Q0V4UXjZkpr4= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2270,12 +2278,11 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210907225631-ff17edfbf26d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210913180222-943fd674d43e h1:+b/22bPvDYt4NPDcy4xAGCmON713ONAWFeY3Z7I3tR8= -golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2430,7 +2437,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2442,9 +2448,11 @@ golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211015200801-69063c4bb744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486 h1:5hpz5aRr+W1erYCL5JRhSUBJRph7l9XkNveoExlrKYk= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220111092808-5a964db01320 h1:0jf+tOCoZ3LyutmCOWpVni1chK4VfFLhRsDK7MhqGRY= +golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2568,8 +2576,8 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= -golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 h1:0c3L82FDQ5rt1bjTBlchS8t6RQ6299/+5bWMnRLh+uI= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/BUILD.bazel b/pkg/BUILD.bazel index 37d7b43282d1..79f6456bb331 100644 --- a/pkg/BUILD.bazel +++ b/pkg/BUILD.bazel @@ -118,6 +118,7 @@ ALL_TESTS = [ "//pkg/keys:keys_test", "//pkg/kv/bulk:bulk_test", "//pkg/kv/kvclient/kvcoord:kvcoord_test", + "//pkg/kv/kvclient/kvstreamer:kvstreamer_test", "//pkg/kv/kvclient/rangecache:rangecache_test", "//pkg/kv/kvclient/rangefeed/rangefeedbuffer:rangefeedbuffer_test", "//pkg/kv/kvclient/rangefeed:rangefeed_test", @@ -257,6 +258,7 @@ ALL_TESTS = [ "//pkg/sql/opt/bench:bench_test", "//pkg/sql/opt/cat:cat_test", "//pkg/sql/opt/constraint:constraint_test", + "//pkg/sql/opt/distribution:distribution_test", "//pkg/sql/opt/exec/execbuilder:execbuilder_test", "//pkg/sql/opt/exec/explain:explain_test", "//pkg/sql/opt/idxconstraint:idxconstraint_test", @@ -293,6 +295,8 @@ ALL_TESTS = [ "//pkg/sql/randgen:randgen_test", "//pkg/sql/row:row_test", "//pkg/sql/rowcontainer:rowcontainer_test", + "//pkg/sql/rowenc/keyside:keyside_test", + "//pkg/sql/rowenc/valueside:valueside_test", "//pkg/sql/rowenc:rowenc_test", "//pkg/sql/rowexec:rowexec_test", "//pkg/sql/rowflow:rowflow_test", @@ -302,9 +306,9 @@ ALL_TESTS = [ "//pkg/sql/schemachanger/scbuild:scbuild_test", "//pkg/sql/schemachanger/scdeps:scdeps_test", "//pkg/sql/schemachanger/scexec:scexec_test", - "//pkg/sql/schemachanger/scgraph:scgraph_test", - "//pkg/sql/schemachanger/scplan/deprules:deprules_test", - "//pkg/sql/schemachanger/scplan/opgen:opgen_test", + "//pkg/sql/schemachanger/scplan/internal/deprules:deprules_test", + "//pkg/sql/schemachanger/scplan/internal/opgen:opgen_test", + "//pkg/sql/schemachanger/scplan/internal/scgraph:scgraph_test", "//pkg/sql/schemachanger/scplan:scplan_test", "//pkg/sql/schemachanger/screl:screl_test", "//pkg/sql/schemachanger:schemachanger_test", diff --git a/pkg/bench/bench_test.go b/pkg/bench/bench_test.go index 74525c8caace..6d98c9a710fe 100644 --- a/pkg/bench/bench_test.go +++ b/pkg/bench/bench_test.go @@ -1152,9 +1152,9 @@ func BenchmarkIndexJoin(b *testing.B) { FAMILY "primary" (k, v, extra) ) ` - // We'll insert 1000 rows with random values below 1000 in the index. We'll - // then query the index with a query that retrieves all the data (but the - // optimizer doesn't know that). + // We'll insert 1000 rows with random values below 1000 in the index. + // We'll then force scanning of the secondary index which will require + // performing an index join to get 'extra' column. insert := "insert into tidx(k,v) select generate_series(1,1000), (random()*1000)::int" db.Exec(b, create) @@ -1162,7 +1162,7 @@ func BenchmarkIndexJoin(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - db.Exec(b, "select * from bench.tidx where v < 1000") + db.Exec(b, "select * from bench.tidx@idx where v < 1000") } }) } diff --git a/pkg/ccl/backupccl/BUILD.bazel b/pkg/ccl/backupccl/BUILD.bazel index 874d40c50c5f..511c1af1dda9 100644 --- a/pkg/ccl/backupccl/BUILD.bazel +++ b/pkg/ccl/backupccl/BUILD.bazel @@ -68,6 +68,7 @@ go_library( "//pkg/sql/catalog/catalogkeys", "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/catconstants", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/catprivilege", "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/dbdesc", @@ -80,7 +81,6 @@ go_library( "//pkg/sql/catalog/systemschema", "//pkg/sql/catalog/tabledesc", "//pkg/sql/catalog/typedesc", - "//pkg/sql/covering", "//pkg/sql/execinfra", "//pkg/sql/execinfrapb", "//pkg/sql/parser", @@ -126,9 +126,9 @@ go_library( "@com_github_cockroachdb_logtags//:logtags", "@com_github_gogo_protobuf//jsonpb", "@com_github_gogo_protobuf//types", - "@com_github_gorhill_cronexpr//:cronexpr", "@com_github_kr_pretty//:pretty", "@com_github_lib_pq//oid", + "@com_github_robfig_cron_v3//:cron", "@com_github_stretchr_testify//require", ], ) @@ -147,7 +147,6 @@ go_test( "create_scheduled_backup_test.go", "full_cluster_backup_restore_test.go", "helpers_test.go", - "import_spans_test.go", "insert_missing_public_schema_namespace_entry_restore_test.go", "key_rewriter_test.go", "main_test.go", @@ -162,7 +161,7 @@ go_test( "split_and_scatter_processor_test.go", "system_schema_test.go", ], - data = glob(["testdata/**"]), + data = glob(["testdata/**"]) + ["@cockroach//c-deps:libgeos"], embed = [":backupccl"], deps = [ "//pkg/base", @@ -252,10 +251,10 @@ go_test( "@com_github_cockroachdb_pebble//vfs", "@com_github_gogo_protobuf//proto", "@com_github_gogo_protobuf//types", - "@com_github_gorhill_cronexpr//:cronexpr", "@com_github_jackc_pgx_v4//:pgx", "@com_github_kr_pretty//:pretty", "@com_github_lib_pq//:pq", + "@com_github_robfig_cron_v3//:cron", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", "@org_golang_x_sync//errgroup", diff --git a/pkg/ccl/backupccl/backup_destination.go b/pkg/ccl/backupccl/backup_destination.go index 72a5f6e72cfa..c1014b333f75 100644 --- a/pkg/ccl/backupccl/backup_destination.go +++ b/pkg/ccl/backupccl/backup_destination.go @@ -22,6 +22,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/mon" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/errors" ) @@ -30,29 +32,30 @@ import ( // chain. func fetchPreviousBackups( ctx context.Context, + mem *mon.BoundAccount, user security.SQLUsername, makeCloudStorage cloud.ExternalStorageFromURIFactory, prevBackupURIs []string, encryptionParams jobspb.BackupEncryptionOptions, kmsEnv cloud.KMSEnv, -) ([]BackupManifest, *jobspb.BackupEncryptionOptions, error) { +) ([]BackupManifest, *jobspb.BackupEncryptionOptions, int64, error) { if len(prevBackupURIs) == 0 { - return nil, nil, nil + return nil, nil, 0, nil } baseBackup := prevBackupURIs[0] encryptionOptions, err := getEncryptionFromBase(ctx, user, makeCloudStorage, baseBackup, encryptionParams, kmsEnv) if err != nil { - return nil, nil, err + return nil, nil, 0, err } - prevBackups, err := getBackupManifests(ctx, user, makeCloudStorage, prevBackupURIs, + prevBackups, size, err := getBackupManifests(ctx, mem, user, makeCloudStorage, prevBackupURIs, encryptionOptions) if err != nil { - return nil, nil, err + return nil, nil, 0, err } - return prevBackups, encryptionOptions, nil + return prevBackups, encryptionOptions, size, nil } // resolveDest resolves the true destination of a backup. The backup command @@ -188,20 +191,33 @@ func resolveDest( // getBackupManifests fetches the backup manifest from a list of backup URIs. func getBackupManifests( ctx context.Context, + mem *mon.BoundAccount, user security.SQLUsername, makeCloudStorage cloud.ExternalStorageFromURIFactory, backupURIs []string, encryption *jobspb.BackupEncryptionOptions, -) ([]BackupManifest, error) { +) ([]BackupManifest, int64, error) { manifests := make([]BackupManifest, len(backupURIs)) if len(backupURIs) == 0 { - return manifests, nil + return manifests, 0, nil } + memMu := struct { + syncutil.Mutex + total int64 + mem *mon.BoundAccount + }{} + memMu.mem = mem + g := ctxgroup.WithContext(ctx) for i := range backupURIs { i := i + // boundAccount isn't threadsafe so we'll make a new one this goroutine to + // pass while reading. When it is done, we'll lock an mu, reserve its size + // from the main one tracking the total amount reserved. + subMem := mem.Monitor().MakeBoundAccount() g.GoCtx(func(ctx context.Context) error { + defer subMem.Close(ctx) // TODO(lucy): We may want to upgrade the table descs to the newer // foreign key representation here, in case there are backups from an // older cluster. Keeping the descriptors as they are works for now @@ -209,22 +225,34 @@ func getBackupManifests( // but it will be safer for future code to avoid having older-style // descriptors around. uri := backupURIs[i] - desc, err := ReadBackupManifestFromURI( - ctx, uri, user, makeCloudStorage, encryption, + desc, size, err := ReadBackupManifestFromURI( + ctx, &subMem, uri, user, makeCloudStorage, encryption, ) if err != nil { return errors.Wrapf(err, "failed to read backup from %q", RedactURIForErrorMessage(uri)) } - manifests[i] = desc - return nil + + memMu.Lock() + err = memMu.mem.Grow(ctx, size) + + if err == nil { + memMu.total += size + manifests[i] = desc + } + subMem.Shrink(ctx, size) + memMu.Unlock() + + return err }) } + if err := g.Wait(); err != nil { - return nil, err + mem.Shrink(ctx, memMu.total) + return nil, 0, err } - return manifests, nil + return manifests, memMu.total, nil } // getEncryptionFromBase retrieves the encryption options of a base backup. It diff --git a/pkg/ccl/backupccl/backup_job.go b/pkg/ccl/backupccl/backup_job.go index 0cfcc2474847..bf2e457d4104 100644 --- a/pkg/ccl/backupccl/backup_job.go +++ b/pkg/ccl/backupccl/backup_job.go @@ -32,13 +32,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" - "github.com/cockroachdb/cockroach/pkg/sql/covering" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" @@ -69,53 +69,13 @@ func countRows(raw roachpb.BulkOpSummary, pkIDs map[uint64]bool) RowCount { return res } -// coveringFromSpans creates an interval.Covering with a fixed payload from a -// slice of roachpb.Spans. -func coveringFromSpans(spans []roachpb.Span, payload interface{}) covering.Covering { - var c covering.Covering - for _, span := range spans { - c = append(c, covering.Range{ - Start: []byte(span.Key), - End: []byte(span.EndKey), - Payload: payload, - }) - } - return c -} - // filterSpans returns the spans that represent the set difference // (includes - excludes). func filterSpans(includes []roachpb.Span, excludes []roachpb.Span) []roachpb.Span { - type includeMarker struct{} - type excludeMarker struct{} - - includeCovering := coveringFromSpans(includes, includeMarker{}) - excludeCovering := coveringFromSpans(excludes, excludeMarker{}) - - splits := covering.OverlapCoveringMerge( - []covering.Covering{includeCovering, excludeCovering}, - ) - - var out []roachpb.Span - for _, split := range splits { - include := false - exclude := false - for _, payload := range split.Payload.([]interface{}) { - switch payload.(type) { - case includeMarker: - include = true - case excludeMarker: - exclude = true - } - } - if include && !exclude { - out = append(out, roachpb.Span{ - Key: roachpb.Key(split.Start), - EndKey: roachpb.Key(split.End), - }) - } - } - return out + var cov roachpb.SpanGroup + cov.Add(includes...) + cov.Sub(excludes...) + return cov.Slice() } // clusterNodeCount returns the approximate number of nodes in the cluster. @@ -461,10 +421,18 @@ func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error { storageByLocalityKV[kv] = &conf } - backupManifest, err := b.readManifestOnResume(ctx, p.ExecCfg(), defaultStore, details) + mem := p.ExecCfg().RootMemoryMonitor.MakeBoundAccount() + defer mem.Close(ctx) + + backupManifest, memSize, err := b.readManifestOnResume(ctx, &mem, p.ExecCfg(), defaultStore, details) if err != nil { return err } + defer func() { + if memSize != 0 { + mem.Shrink(ctx, memSize) + } + }() statsCache := p.ExecCfg().TableStatsCache // We retry on pretty generic failures -- any rpc error. If a worker node were @@ -514,7 +482,9 @@ func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error { // Reload the backup manifest to pick up any spans we may have completed on // previous attempts. var reloadBackupErr error - backupManifest, reloadBackupErr = b.readManifestOnResume(ctx, p.ExecCfg(), defaultStore, details) + mem.Shrink(ctx, memSize) + memSize = 0 + backupManifest, memSize, reloadBackupErr = b.readManifestOnResume(ctx, &mem, p.ExecCfg(), defaultStore, details) if reloadBackupErr != nil { return errors.Wrap(reloadBackupErr, "could not reload backup manifest when retrying") } @@ -631,26 +601,27 @@ func (b *backupResumer) ReportResults(ctx context.Context, resultsCh chan<- tree func (b *backupResumer) readManifestOnResume( ctx context.Context, + mem *mon.BoundAccount, cfg *sql.ExecutorConfig, defaultStore cloud.ExternalStorage, details jobspb.BackupDetails, -) (*BackupManifest, error) { +) (*BackupManifest, int64, error) { // We don't read the table descriptors from the backup descriptor, but // they could be using either the new or the old foreign key // representations. We should just preserve whatever representation the // table descriptors were using and leave them alone. - desc, err := readBackupManifest(ctx, defaultStore, backupManifestCheckpointName, + desc, memSize, err := readBackupManifest(ctx, mem, defaultStore, backupManifestCheckpointName, details.EncryptionOptions) if err != nil { if !errors.Is(err, cloud.ErrFileDoesNotExist) { - return nil, errors.Wrapf(err, "reading backup checkpoint") + return nil, 0, errors.Wrapf(err, "reading backup checkpoint") } // Try reading temp checkpoint. tmpCheckpoint := tempCheckpointFileNameForJob(b.job.ID()) - desc, err = readBackupManifest(ctx, defaultStore, tmpCheckpoint, details.EncryptionOptions) + desc, memSize, err = readBackupManifest(ctx, mem, defaultStore, tmpCheckpoint, details.EncryptionOptions) if err != nil { - return nil, err + return nil, 0, err } // "Rename" temp checkpoint. @@ -658,7 +629,8 @@ func (b *backupResumer) readManifestOnResume( ctx, cfg.Settings, defaultStore, backupManifestCheckpointName, details.EncryptionOptions, &desc, ); err != nil { - return nil, errors.Wrapf(err, "renaming temp checkpoint file") + mem.Shrink(ctx, memSize) + return nil, 0, errors.Wrapf(err, "renaming temp checkpoint file") } // Best effort remove temp checkpoint. if err := defaultStore.Delete(ctx, tmpCheckpoint); err != nil { @@ -667,10 +639,11 @@ func (b *backupResumer) readManifestOnResume( } if !desc.ClusterID.Equal(cfg.ClusterID()) { - return nil, errors.Newf("cannot resume backup started on another cluster (%s != %s)", + mem.Shrink(ctx, memSize) + return nil, 0, errors.Newf("cannot resume backup started on another cluster (%s != %s)", desc.ClusterID, cfg.ClusterID()) } - return &desc, nil + return &desc, memSize, nil } func (b *backupResumer) maybeNotifyScheduledJobCompletion( diff --git a/pkg/ccl/backupccl/backup_planning.go b/pkg/ccl/backupccl/backup_planning.go index 194bc8c8a039..1a2b2f142f00 100644 --- a/pkg/ccl/backupccl/backup_planning.go +++ b/pkg/ccl/backupccl/backup_planning.go @@ -1469,11 +1469,18 @@ func getBackupDetailAndManifest( kmsEnv := &backupKMSEnv{settings: execCfg.Settings, conf: &execCfg.ExternalIODirConfig} - prevBackups, encryptionOptions, err := fetchPreviousBackups(ctx, user, makeCloudStorage, prevs, - *initialDetails.EncryptionOptions, kmsEnv) + mem := execCfg.RootMemoryMonitor.MakeBoundAccount() + defer mem.Close(ctx) + + prevBackups, encryptionOptions, memSize, err := fetchPreviousBackups(ctx, &mem, user, + makeCloudStorage, prevs, *initialDetails.EncryptionOptions, kmsEnv) if err != nil { return jobspb.BackupDetails{}, BackupManifest{}, err } + defer func() { + mem.Shrink(ctx, memSize) + }() + if len(prevBackups) > 0 { baseManifest := prevBackups[0] if baseManifest.DescriptorCoverage == tree.AllDescriptors && @@ -1592,10 +1599,7 @@ func getBackupDetailAndManifest( } } - var cov roachpb.SpanGroup - cov.Add(spans...) - cov.Sub(prevBackups[len(prevBackups)-1].Spans...) - newSpans = cov.Slice() + newSpans = filterSpans(spans, prevBackups[len(prevBackups)-1].Spans) tableSpans, err := getReintroducedSpans(ctx, execCfg, prevBackups, tables, revs, endTime) if err != nil { diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index 417b54637978..79ef1abbec33 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -99,6 +99,11 @@ func init() { cloud.RegisterKMSFromURIFactory(MakeTestKMS, "testkms") } +func makeTableSpan(tableID uint32) roachpb.Span { + k := keys.SystemSQLCodec.TablePrefix(tableID) + return roachpb.Span{Key: k, EndKey: k.PrefixEnd()} +} + type sqlDBKey struct { server string user string @@ -1840,7 +1845,7 @@ func TestBackupRestoreResume(t *testing.T) { t.Fatal(err) } if isGZipped(backupManifestBytes) { - backupManifestBytes, err = decompressData(backupManifestBytes) + backupManifestBytes, err = decompressData(ctx, nil, backupManifestBytes) require.NoError(t, err) } var backupManifest BackupManifest @@ -4399,7 +4404,7 @@ func TestBackupRestoreChecksum(t *testing.T) { t.Fatalf("%+v", err) } if isGZipped(backupManifestBytes) { - backupManifestBytes, err = decompressData(backupManifestBytes) + backupManifestBytes, err = decompressData(context.Background(), nil, backupManifestBytes) require.NoError(t, err) } if err := protoutil.Unmarshal(backupManifestBytes, &backupManifest); err != nil { @@ -8095,7 +8100,7 @@ func TestManifestTooNew(t *testing.T) { manifestPath := filepath.Join(rawDir, "too_new", backupManifestName) manifestData, err := ioutil.ReadFile(manifestPath) require.NoError(t, err) - manifestData, err = decompressData(manifestData) + manifestData, err = decompressData(context.Background(), nil, manifestData) require.NoError(t, err) var backupManifest BackupManifest require.NoError(t, protoutil.Unmarshal(manifestData, &backupManifest)) @@ -8461,7 +8466,7 @@ func TestBackupOnlyPublicIndexes(t *testing.T) { fullBackup := LocalFoo + "/full" sqlDB.Exec(t, `BACKUP DATABASE data TO $1 WITH revision_history`, fullBackup) - fullBackupSpans := getSpansFromManifest(t, locationToDir(fullBackup)) + fullBackupSpans := getSpansFromManifest(ctx, t, locationToDir(fullBackup)) require.Equal(t, 1, len(fullBackupSpans)) require.Equal(t, "/Table/56/{1-2}", fullBackupSpans[0].String()) @@ -8505,10 +8510,10 @@ func TestBackupOnlyPublicIndexes(t *testing.T) { // Wait for the backfill and incremental backup to complete. require.NoError(t, g.Wait()) - inc1Spans := getSpansFromManifest(t, locationToDir(inc1Loc)) + inc1Spans := getSpansFromManifest(ctx, t, locationToDir(inc1Loc)) require.Equalf(t, 0, len(inc1Spans), "expected inc1 to not have any data, found %v", inc1Spans) - inc2Spans := getSpansFromManifest(t, locationToDir(inc2Loc)) + inc2Spans := getSpansFromManifest(ctx, t, locationToDir(inc2Loc)) require.Equalf(t, 0, len(inc2Spans), "expected inc2 to not have any data, found %v", inc2Spans) // Take another incremental backup that should only contain the newly added @@ -8516,7 +8521,7 @@ func TestBackupOnlyPublicIndexes(t *testing.T) { inc3Loc := LocalFoo + "/inc3" sqlDB.Exec(t, `BACKUP DATABASE data TO $1 INCREMENTAL FROM $2, $3, $4 WITH revision_history`, inc3Loc, fullBackup, inc1Loc, inc2Loc) - inc3Spans := getSpansFromManifest(t, locationToDir(inc3Loc)) + inc3Spans := getSpansFromManifest(ctx, t, locationToDir(inc3Loc)) require.Equal(t, 1, len(inc3Spans)) require.Equal(t, "/Table/56/{2-3}", inc3Spans[0].String()) diff --git a/pkg/ccl/backupccl/bench_covering_test.go b/pkg/ccl/backupccl/bench_covering_test.go index d1bff8909204..baea944266dc 100644 --- a/pkg/ccl/backupccl/bench_covering_test.go +++ b/pkg/ccl/backupccl/bench_covering_test.go @@ -25,31 +25,15 @@ func BenchmarkCoverageChecks(b *testing.B) { b.Run(fmt.Sprintf("numSpans=%d", numSpans), func(b *testing.B) { for _, baseFiles := range []int{0, 10, 100, 1000, 10000} { b.Run(fmt.Sprintf("numFiles=%d", baseFiles), func(b *testing.B) { - b.StopTimer() ctx := context.Background() backups := MockBackupChain(numBackups, numSpans, baseFiles, r) b.ResetTimer() - b.Run("checkCoverage", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - if err := checkCoverage(ctx, backups[numBackups-1].Spans, backups); err != nil { - b.Fatal(err) - } + for i := 0; i < b.N; i++ { + if err := checkCoverage(ctx, backups[numBackups-1].Spans, backups); err != nil { + b.Fatal(err) } - }) - b.Run("makeImportSpans", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, ts, err := makeImportSpans(backups[numBackups-1].Spans, backups, nil, nil, errOnMissingRange) - if err != nil { - b.Fatal(err) - } - if got, expected := ts, backups[len(backups)-1].EndTime; !got.Equal(expected) { - b.Fatal(expected, got) - } - } - }) + } }) } }) @@ -67,28 +51,16 @@ func BenchmarkRestoreEntryCover(b *testing.B) { b.Run(fmt.Sprintf("numFiles=%d", baseFiles), func(b *testing.B) { for _, numSpans := range []int{10, 100} { b.Run(fmt.Sprintf("numSpans=%d", numSpans), func(b *testing.B) { - b.StopTimer() ctx := context.Background() backups := MockBackupChain(numBackups, numSpans, baseFiles, r) b.ResetTimer() - b.Run("simple", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if err := checkCoverage(ctx, backups[numBackups-1].Spans, backups); err != nil { - b.Fatal(err) - } - cov := makeSimpleImportSpans(backups[numBackups-1].Spans, backups, nil, nil) - b.ReportMetric(float64(len(cov)), "coverSize") - } - }) - b.Run("coveringMerge", func(b *testing.B) { - for i := 0; i < b.N; i++ { - cov, _, err := makeImportSpans(backups[numBackups-1].Spans, backups, nil, nil, errOnMissingRange) - if err != nil { - b.Fatal(err) - } - b.ReportMetric(float64(len(cov)), "coverSize") + for i := 0; i < b.N; i++ { + if err := checkCoverage(ctx, backups[numBackups-1].Spans, backups); err != nil { + b.Fatal(err) } - }) + cov := makeSimpleImportSpans(backups[numBackups-1].Spans, backups, nil, nil) + b.ReportMetric(float64(len(cov)), "coverSize") + } }) } }) diff --git a/pkg/ccl/backupccl/create_scheduled_backup.go b/pkg/ccl/backupccl/create_scheduled_backup.go index 14a1a2f9f5b8..f2e409c132ed 100644 --- a/pkg/ccl/backupccl/create_scheduled_backup.go +++ b/pkg/ccl/backupccl/create_scheduled_backup.go @@ -39,7 +39,7 @@ import ( "github.com/cockroachdb/errors" "github.com/gogo/protobuf/jsonpb" pbtypes "github.com/gogo/protobuf/types" - "github.com/gorhill/cronexpr" + "github.com/robfig/cron/v3" ) const ( @@ -195,19 +195,19 @@ func computeScheduleRecurrence( if evalFn == nil { return neverRecurs, nil } - cron, err := evalFn() + cronStr, err := evalFn() if err != nil { return nil, err } - expr, err := cronexpr.Parse(cron) + expr, err := cron.ParseStandard(cronStr) if err != nil { return nil, errors.Newf( `error parsing schedule expression: %q; it must be a valid cron expression`, - cron) + cronStr) } nextRun := expr.Next(now) frequency := expr.Next(nextRun).Sub(nextRun) - return &scheduleRecurrence{cron, frequency}, nil + return &scheduleRecurrence{cronStr, frequency}, nil } var forceFullBackup *scheduleRecurrence diff --git a/pkg/ccl/backupccl/create_scheduled_backup_test.go b/pkg/ccl/backupccl/create_scheduled_backup_test.go index d71727b75759..f3a0f79c76df 100644 --- a/pkg/ccl/backupccl/create_scheduled_backup_test.go +++ b/pkg/ccl/backupccl/create_scheduled_backup_test.go @@ -40,7 +40,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" pbtypes "github.com/gogo/protobuf/types" - "github.com/gorhill/cronexpr" + "github.com/robfig/cron/v3" "github.com/stretchr/testify/require" ) @@ -1078,8 +1078,10 @@ INSERT INTO t values (1), (10), (100); // to the next scheduled recurrence. for _, id := range []int64{fullID, incID} { s := th.loadSchedule(t, id) + e, err := cron.ParseStandard(s.ScheduleExpr()) + require.NoError(t, err) require.EqualValues(t, - cronexpr.MustParse(s.ScheduleExpr()).Next(th.env.Now()).Round(time.Microsecond), + e.Next(th.env.Now()).Round(time.Microsecond), s.NextRun()) } diff --git a/pkg/ccl/backupccl/helpers_test.go b/pkg/ccl/backupccl/helpers_test.go index 6b28087e6a29..cdbdd460889c 100644 --- a/pkg/ccl/backupccl/helpers_test.go +++ b/pkg/ccl/backupccl/helpers_test.go @@ -347,11 +347,11 @@ func makeThresholdBlocker(threshold int) thresholdBlocker { // getSpansFromManifest returns the spans that describe the data included in a // given backup. -func getSpansFromManifest(t *testing.T, backupPath string) roachpb.Spans { +func getSpansFromManifest(ctx context.Context, t *testing.T, backupPath string) roachpb.Spans { backupManifestBytes, err := ioutil.ReadFile(backupPath + "/" + backupManifestName) require.NoError(t, err) var backupManifest BackupManifest - decompressedBytes, err := decompressData(backupManifestBytes) + decompressedBytes, err := decompressData(ctx, nil, backupManifestBytes) require.NoError(t, err) require.NoError(t, protoutil.Unmarshal(decompressedBytes, &backupManifest)) spans := make([]roachpb.Span, 0, len(backupManifest.Files)) diff --git a/pkg/ccl/backupccl/import_spans_test.go b/pkg/ccl/backupccl/import_spans_test.go deleted file mode 100644 index f14aaca3566a..000000000000 --- a/pkg/ccl/backupccl/import_spans_test.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2021 The Cockroach Authors. -// -// Licensed as a CockroachDB Enterprise file under the Cockroach Community -// License (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt - -package backupccl - -import ( - "fmt" - "testing" - - "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/util/hlc" - "github.com/cockroachdb/cockroach/pkg/util/leaktest" - "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/stretchr/testify/require" -) - -func makeTimestamps(n int) []hlc.Timestamp { - timestamps := make([]hlc.Timestamp, n) - for i := range timestamps { - timestamps[i] = hlc.Timestamp{WallTime: int64(i * 10)} - } - - return timestamps -} - -// MakeImportSpans looks for the following properties: -// - Start time -// - End time -// - Spans -// - Introduced spans -// - Files -func makeBackupManifest( - startTime, endTime hlc.Timestamp, spans, introducedSpans []roachpb.Span, -) BackupManifest { - // We only care about the files' span. - files := make([]BackupManifest_File, 0) - for i, span := range append(spans, introducedSpans...) { - files = append(files, BackupManifest_File{Span: span, Path: fmt.Sprintf("data/%d.sst", i)}) - } - - return BackupManifest{ - StartTime: startTime, - EndTime: endTime, - Spans: spans, - IntroducedSpans: introducedSpans, - Files: files, - } -} - -func makeTableSpan(tableID uint32) roachpb.Span { - k := keys.SystemSQLCodec.TablePrefix(tableID) - return roachpb.Span{Key: k, EndKey: k.PrefixEnd()} -} - -func TestMakeImportSpans(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - ts := makeTimestamps(10) - - var backupLocalityMap map[int]storeByLocalityKV - lowWaterMark := roachpb.KeyMin - - noIntroducedSpans := make([]roachpb.Span, 0) - onMissing := errOnMissingRange - - tcs := []struct { - name string - tablesToRestore []roachpb.Span - backups []BackupManifest - - // In the successful cases, expectedSpans and endTime should be - // specified. - expectedSpans []roachpb.Span - expectedMaxEndTime hlc.Timestamp - - // In the error case, only the error is checked. - expectedError string - }{ - { - name: "single-backup", - tablesToRestore: []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - backups: []BackupManifest{ - makeBackupManifest( - ts[0], ts[1], - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - noIntroducedSpans, - ), - }, - - expectedSpans: []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - expectedMaxEndTime: ts[1], - }, - { - name: "incremental-backup", - tablesToRestore: []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - backups: []BackupManifest{ - makeBackupManifest( - ts[0], ts[1], - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - noIntroducedSpans, - ), - // Now add an incremental backup of the same tables. - makeBackupManifest( - ts[1], ts[2], - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - noIntroducedSpans, - ), - }, - - expectedSpans: []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - expectedMaxEndTime: ts[2], - }, - { - name: "restore-subset", - // Restore only a sub-set of the spans that have been backed up. - tablesToRestore: []roachpb.Span{makeTableSpan(52)}, - backups: []BackupManifest{ - makeBackupManifest( - ts[0], ts[1], - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - noIntroducedSpans, - ), - makeBackupManifest( - ts[1], ts[2], - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - noIntroducedSpans, - ), - }, - - expectedSpans: []roachpb.Span{makeTableSpan(52)}, - expectedMaxEndTime: ts[2], - }, - { - // Try backing up a non-new table in an incremental backup. - name: "widen-backup", - tablesToRestore: []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - backups: []BackupManifest{ - // The full backup only has table 52. - makeBackupManifest( - ts[0], ts[1], - []roachpb.Span{makeTableSpan(52)}, - noIntroducedSpans, - ), - // This incremental claims to have backed up more. - makeBackupManifest( - ts[1], ts[2], - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - noIntroducedSpans, - ), - }, - - expectedError: "no backup covers time [0,0,0.000000010,0) for range [/Table/53,/Table/54) or backups listed out of order (mismatched start time)", - }, - { - name: "narrow-backup", - tablesToRestore: []roachpb.Span{makeTableSpan(52)}, - backups: []BackupManifest{ - makeBackupManifest( - ts[0], ts[1], - // This full backup backs up both tables 52 and 53. - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - noIntroducedSpans, - ), - makeBackupManifest( - ts[1], ts[2], - // This incremental decided to only backup table 52. That's ok. - []roachpb.Span{makeTableSpan(52)}, - noIntroducedSpans, - ), - }, - - expectedSpans: []roachpb.Span{makeTableSpan(52)}, - expectedMaxEndTime: ts[2], - }, - { - name: "narrow-backup-rewident", - tablesToRestore: []roachpb.Span{makeTableSpan(52)}, - backups: []BackupManifest{ - makeBackupManifest( - ts[0], ts[1], - // This full backup backs up both tables 52 and 53. - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - noIntroducedSpans, - ), - makeBackupManifest( - ts[1], ts[2], - // This incremental decided to only backup table 52. That's - // permitted. - []roachpb.Span{makeTableSpan(52)}, - noIntroducedSpans, - ), - makeBackupManifest( - ts[2], ts[3], - // We can't start backing up table 53 again after an - // incremental missed it though. - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - noIntroducedSpans, - ), - }, - - expectedError: "no backup covers time [0.000000010,0,0.000000020,0) for range [/Table/53,/Table/54) or backups listed out of order (mismatched start time)", - }, - { - name: "incremental-newly-created-table", - tablesToRestore: []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - backups: []BackupManifest{ - makeBackupManifest( - ts[0], ts[1], - []roachpb.Span{makeTableSpan(52)}, - noIntroducedSpans, - ), - makeBackupManifest( - ts[1], ts[2], - // We're now backing up a new table (53), but this is only - // allowed since this table didn't exist at the time of the - // full backup. It must appear in introduced spans. - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - // Table 53 was created between the full backup and this - // inc, so it appears as introduced spans. - []roachpb.Span{makeTableSpan(53)}, // introduced spans - ), - makeBackupManifest( - ts[2], ts[3], - // We should be able to backup table 53 incremenatally after - // it has been introduced. - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - noIntroducedSpans, - ), - }, - - expectedSpans: []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - expectedMaxEndTime: ts[3], - }, - { - name: "reintroduced-spans", - tablesToRestore: []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - backups: []BackupManifest{ - makeBackupManifest( - ts[0], ts[1], - []roachpb.Span{makeTableSpan(52)}, - noIntroducedSpans, - ), - makeBackupManifest( - ts[1], ts[2], - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - // Table 53 was created between the full backup and this - // inc, so it appears as introduced spans. - []roachpb.Span{makeTableSpan(53)}, // introduced spans - ), - makeBackupManifest( - ts[2], ts[3], - // We should be able to backup table 53 incremenatally after - // it has been introduced. - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - noIntroducedSpans, - ), - makeBackupManifest( - ts[3], ts[4], - []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - // In some cases, spans that were normally included in - // incremental backups may be re-introduced. - []roachpb.Span{makeTableSpan(53)}, - ), - }, - - expectedSpans: []roachpb.Span{makeTableSpan(52), makeTableSpan(53)}, - expectedMaxEndTime: ts[4], - }, - } - - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - importSpans, maxEndTime, err := makeImportSpans( - tc.tablesToRestore, tc.backups, backupLocalityMap, - lowWaterMark, onMissing) - - // Collect just the spans to import. - spansToImport := make([]roachpb.Span, len(importSpans)) - for i, importSpan := range importSpans { - spansToImport[i] = importSpan.Span - } - - if len(tc.expectedError) != 0 { - require.Equal(t, tc.expectedError, err.Error()) - } else { - require.NoError(t, err) - require.Equal(t, tc.expectedSpans, spansToImport) - require.Equal(t, tc.expectedMaxEndTime, maxEndTime) - } - }) - } -} diff --git a/pkg/ccl/backupccl/key_rewriter_test.go b/pkg/ccl/backupccl/key_rewriter_test.go index 379deddf81fe..0a041c6d92bf 100644 --- a/pkg/ccl/backupccl/key_rewriter_test.go +++ b/pkg/ccl/backupccl/key_rewriter_test.go @@ -81,7 +81,7 @@ func TestKeyRewriter(t *testing.T) { t.Run("normal", func(t *testing.T) { key := rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, - systemschema.NamespaceTable, desc.GetPrimaryIndexID()) + systemschema.NamespaceTable.GetID(), desc.GetPrimaryIndexID()) newKey, ok, err := kr.RewriteKey(key) if err != nil { t.Fatal(err) @@ -100,7 +100,7 @@ func TestKeyRewriter(t *testing.T) { t.Run("prefix end", func(t *testing.T) { key := roachpb.Key(rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, - systemschema.NamespaceTable, desc.GetPrimaryIndexID())).PrefixEnd() + systemschema.NamespaceTable.GetID(), desc.GetPrimaryIndexID())).PrefixEnd() newKey, ok, err := kr.RewriteKey(key) if err != nil { t.Fatal(err) @@ -129,7 +129,7 @@ func TestKeyRewriter(t *testing.T) { t.Fatal(err) } - key := rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, systemschema.NamespaceTable, desc.GetPrimaryIndexID()) + key := rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, systemschema.NamespaceTable.GetID(), desc.GetPrimaryIndexID()) newKey, ok, err := newKr.RewriteKey(key) if err != nil { t.Fatal(err) @@ -156,7 +156,7 @@ func TestKeyRewriter(t *testing.T) { }) require.NoError(t, err) - key := rowenc.MakeIndexKeyPrefix(srcCodec, systemschema.NamespaceTable, desc.GetPrimaryIndexID()) + key := rowenc.MakeIndexKeyPrefix(srcCodec, systemschema.NamespaceTable.GetID(), desc.GetPrimaryIndexID()) newKey, ok, err := newKr.RewriteKey(key) require.NoError(t, err) if !ok { diff --git a/pkg/ccl/backupccl/manifest_handling.go b/pkg/ccl/backupccl/manifest_handling.go index 6c859794a047..36598696f253 100644 --- a/pkg/ccl/backupccl/manifest_handling.go +++ b/pkg/ccl/backupccl/manifest_handling.go @@ -36,6 +36,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" @@ -122,41 +123,44 @@ func (m *BackupManifest) isIncremental() bool { // export storage. func ReadBackupManifestFromURI( ctx context.Context, + mem *mon.BoundAccount, uri string, user security.SQLUsername, makeExternalStorageFromURI cloud.ExternalStorageFromURIFactory, encryption *jobspb.BackupEncryptionOptions, -) (BackupManifest, error) { +) (BackupManifest, int64, error) { exportStore, err := makeExternalStorageFromURI(ctx, uri, user) if err != nil { - return BackupManifest{}, err + return BackupManifest{}, 0, err } defer exportStore.Close() - return ReadBackupManifestFromStore(ctx, exportStore, encryption) + return ReadBackupManifestFromStore(ctx, mem, exportStore, encryption) } -// ReadBackupManifestFromStore reads and unmarshalls a BackupManifest -// from an export store. +// ReadBackupManifestFromStore reads and unmarshalls a BackupManifest from the +// store and returns it with the size it reserved for it from the boundAccount. func ReadBackupManifestFromStore( ctx context.Context, + mem *mon.BoundAccount, exportStore cloud.ExternalStorage, encryption *jobspb.BackupEncryptionOptions, -) (BackupManifest, error) { - backupManifest, err := readBackupManifest(ctx, exportStore, backupManifestName, +) (BackupManifest, int64, error) { + backupManifest, memSize, err := readBackupManifest(ctx, mem, exportStore, backupManifestName, encryption) if err != nil { - oldManifest, newErr := readBackupManifest(ctx, exportStore, backupOldManifestName, + oldManifest, newMemSize, newErr := readBackupManifest(ctx, mem, exportStore, backupOldManifestName, encryption) if newErr != nil { - return BackupManifest{}, err + return BackupManifest{}, 0, err } backupManifest = oldManifest + memSize = newMemSize } backupManifest.Dir = exportStore.Conf() // TODO(dan): Sanity check this BackupManifest: non-empty EndTime, // non-empty Paths, and non-overlapping Spans and keyranges in Files. - return backupManifest, nil + return backupManifest, memSize, nil } func containsManifest(ctx context.Context, exportStore cloud.ExternalStorage) (bool, error) { @@ -187,32 +191,40 @@ func compressData(descBuf []byte) ([]byte, error) { // decompressData decompresses gzip data buffer and // returns decompressed bytes. -func decompressData(descBytes []byte) ([]byte, error) { +func decompressData(ctx context.Context, mem *mon.BoundAccount, descBytes []byte) ([]byte, error) { r, err := gzip.NewReader(bytes.NewBuffer(descBytes)) if err != nil { return nil, err } defer r.Close() - return ioutil.ReadAll(r) + return mon.ReadAll(ctx, r, mem) } // readBackupManifest reads and unmarshals a BackupManifest from filename in -// the provided export store. +// the provided export store. If the passed bound account is not nil, the bytes +// read are reserved from it as it is read and then the approximate in-memory +// size (the total decompressed serialized byte size) is reserved as well before +// deserialization and returned so that callers can then shrink the bound acct +// by that amount when they release the returned manifest. func readBackupManifest( ctx context.Context, + mem *mon.BoundAccount, exportStore cloud.ExternalStorage, filename string, encryption *jobspb.BackupEncryptionOptions, -) (BackupManifest, error) { +) (BackupManifest, int64, error) { r, err := exportStore.ReadFile(ctx, filename) if err != nil { - return BackupManifest{}, err + return BackupManifest{}, 0, err } defer r.Close() - descBytes, err := ioutil.ReadAll(r) + descBytes, err := mon.ReadAll(ctx, r, mem) if err != nil { - return BackupManifest{}, err + return BackupManifest{}, 0, err } + defer func() { + mem.Shrink(ctx, int64(cap(descBytes))) + }() checksumFile, err := exportStore.ReadFile(ctx, filename+backupManifestChecksumSuffix) if err == nil { @@ -220,20 +232,20 @@ func readBackupManifest( defer checksumFile.Close() checksumFileData, err := ioutil.ReadAll(checksumFile) if err != nil { - return BackupManifest{}, errors.Wrap(err, "reading checksum file") + return BackupManifest{}, 0, errors.Wrap(err, "reading checksum file") } checksum, err := getChecksum(descBytes) if err != nil { - return BackupManifest{}, errors.Wrap(err, "calculating checksum of manifest") + return BackupManifest{}, 0, errors.Wrap(err, "calculating checksum of manifest") } if !bytes.Equal(checksumFileData, checksum) { - return BackupManifest{}, errors.Newf("checksum mismatch; expected %s, got %s", + return BackupManifest{}, 0, errors.Newf("checksum mismatch; expected %s, got %s", hex.EncodeToString(checksumFileData), hex.EncodeToString(checksum)) } } else { // If we don't have a checksum file, carry on. This might be an old version. if !errors.Is(err, cloud.ErrFileDoesNotExist) { - return BackupManifest{}, err + return BackupManifest{}, 0, err } } @@ -242,30 +254,41 @@ func readBackupManifest( encryptionKey, err = getEncryptionKey(ctx, encryption, exportStore.Settings(), exportStore.ExternalIOConf()) if err != nil { - return BackupManifest{}, err + return BackupManifest{}, 0, err } descBytes, err = storageccl.DecryptFile(descBytes, encryptionKey) if err != nil { - return BackupManifest{}, err + return BackupManifest{}, 0, err } } if isGZipped(descBytes) { - descBytes, err = decompressData(descBytes) + decompressedBytes, err := decompressData(ctx, mem, descBytes) if err != nil { - return BackupManifest{}, errors.Wrap( + return BackupManifest{}, 0, errors.Wrap( err, "decompressing backup manifest") } + // Release the compressed bytes from the monitor before we switch descBytes + // to point at the decompressed bytes, since the deferred release will later + // release the latter. + mem.Shrink(ctx, int64(cap(descBytes))) + descBytes = decompressedBytes + } + + approxMemSize := int64(len(descBytes)) + if err := mem.Grow(ctx, approxMemSize); err != nil { + return BackupManifest{}, 0, err } var backupManifest BackupManifest if err := protoutil.Unmarshal(descBytes, &backupManifest); err != nil { + mem.Shrink(ctx, approxMemSize) if encryption == nil && storageccl.AppearsEncrypted(descBytes) { - return BackupManifest{}, errors.Wrapf( + return BackupManifest{}, 0, errors.Wrapf( err, "file appears encrypted -- try specifying one of \"%s\" or \"%s\"", backupOptEncPassphrase, backupOptEncKMS) } - return BackupManifest{}, err + return BackupManifest{}, 0, err } for _, d := range backupManifest.Descriptors { // Calls to GetTable are generally frowned upon. @@ -286,48 +309,65 @@ func readBackupManifest( t.ModificationTime = hlc.Timestamp{WallTime: 1} } } - return backupManifest, nil + + return backupManifest, approxMemSize, nil } func readBackupPartitionDescriptor( ctx context.Context, + mem *mon.BoundAccount, exportStore cloud.ExternalStorage, filename string, encryption *jobspb.BackupEncryptionOptions, -) (BackupPartitionDescriptor, error) { +) (BackupPartitionDescriptor, int64, error) { r, err := exportStore.ReadFile(ctx, filename) if err != nil { - return BackupPartitionDescriptor{}, err + return BackupPartitionDescriptor{}, 0, err } defer r.Close() - descBytes, err := ioutil.ReadAll(r) + descBytes, err := mon.ReadAll(ctx, r, mem) if err != nil { - return BackupPartitionDescriptor{}, err + return BackupPartitionDescriptor{}, 0, err } + defer func() { + mem.Shrink(ctx, int64(cap(descBytes))) + }() + if encryption != nil { encryptionKey, err := getEncryptionKey(ctx, encryption, exportStore.Settings(), exportStore.ExternalIOConf()) if err != nil { - return BackupPartitionDescriptor{}, err + return BackupPartitionDescriptor{}, 0, err } descBytes, err = storageccl.DecryptFile(descBytes, encryptionKey) if err != nil { - return BackupPartitionDescriptor{}, err + return BackupPartitionDescriptor{}, 0, err } } if isGZipped(descBytes) { - descBytes, err = decompressData(descBytes) + decompressedData, err := decompressData(ctx, mem, descBytes) if err != nil { - return BackupPartitionDescriptor{}, errors.Wrap( + return BackupPartitionDescriptor{}, 0, errors.Wrap( err, "decompressing backup partition descriptor") } + mem.Shrink(ctx, int64(cap(descBytes))) + descBytes = decompressedData } + + memSize := int64(len(descBytes)) + + if err := mem.Grow(ctx, memSize); err != nil { + return BackupPartitionDescriptor{}, 0, err + } + var backupManifest BackupPartitionDescriptor if err := protoutil.Unmarshal(descBytes, &backupManifest); err != nil { - return BackupPartitionDescriptor{}, err + mem.Shrink(ctx, memSize) + return BackupPartitionDescriptor{}, 0, err } - return backupManifest, err + + return backupManifest, memSize, err } // readTableStatistics reads and unmarshals a StatsTable from filename in @@ -525,25 +565,35 @@ func writeTableStatistics( func loadBackupManifests( ctx context.Context, + mem *mon.BoundAccount, uris []string, user security.SQLUsername, makeExternalStorageFromURI cloud.ExternalStorageFromURIFactory, encryption *jobspb.BackupEncryptionOptions, -) ([]BackupManifest, error) { +) ([]BackupManifest, int64, error) { backupManifests := make([]BackupManifest, len(uris)) - + var reserved int64 + defer func() { + if reserved != 0 { + mem.Shrink(ctx, reserved) + } + }() for i, uri := range uris { - desc, err := ReadBackupManifestFromURI(ctx, uri, user, makeExternalStorageFromURI, + desc, memSize, err := ReadBackupManifestFromURI(ctx, mem, uri, user, makeExternalStorageFromURI, encryption) if err != nil { - return nil, errors.Wrapf(err, "failed to read backup descriptor") + return nil, 0, errors.Wrapf(err, "failed to read backup descriptor") } + reserved += memSize backupManifests[i] = desc } if len(backupManifests) == 0 { - return nil, errors.Newf("no backups found") + return nil, 0, errors.Newf("no backups found") } - return backupManifests, nil + memSize := reserved + reserved = 0 + + return backupManifests, memSize, nil } // getLocalityInfo takes a list of stores and their URIs, along with the main @@ -567,7 +617,7 @@ func getLocalityInfo( } found := false for i, store := range stores { - if desc, err := readBackupPartitionDescriptor(ctx, store, filename, encryption); err == nil { + if desc, _, err := readBackupPartitionDescriptor(ctx, nil /*mem*/, store, filename, encryption); err == nil { if desc.BackupID != mainBackupManifest.ID { return info, errors.Errorf( "expected backup part to have backup ID %s, found %s", @@ -667,6 +717,7 @@ func checkForLatestFileInCollection( // layers had been specified in `from` explicitly. func resolveBackupManifests( ctx context.Context, + mem *mon.BoundAccount, baseStores []cloud.ExternalStorage, mkStore cloud.ExternalStorageFromURIFactory, from [][]string, @@ -679,12 +730,21 @@ func resolveBackupManifests( // mainBackupManifests contains the manifest located at each defaultURI in the backup chain. mainBackupManifests []BackupManifest, localityInfo []jobspb.RestoreDetails_BackupLocalityInfo, + reservedMemSize int64, _ error, ) { - baseManifest, err := ReadBackupManifestFromStore(ctx, baseStores[0], encryption) + var ownedMemSize int64 + defer func() { + if ownedMemSize != 0 { + mem.Shrink(ctx, ownedMemSize) + } + }() + + baseManifest, memSize, err := ReadBackupManifestFromStore(ctx, mem, baseStores[0], encryption) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } + ownedMemSize += memSize // If explicit incremental backups were are passed, we simply load them one // by one as specified and return the results. @@ -701,21 +761,23 @@ func resolveBackupManifests( for j := range uris { stores[j], err = mkStore(ctx, uris[j], user) if err != nil { - return nil, nil, nil, errors.Wrapf(err, "export configuration") + return nil, nil, nil, 0, errors.Wrapf(err, "export configuration") } defer stores[j].Close() } - mainBackupManifests[i], err = ReadBackupManifestFromStore(ctx, stores[0], encryption) + mainBackupManifests[i], memSize, err = ReadBackupManifestFromStore(ctx, mem, stores[0], encryption) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } + ownedMemSize += memSize + if len(uris) > 1 { localityInfo[i], err = getLocalityInfo( ctx, stores, uris, mainBackupManifests[i], encryption, "", /* prefix */ ) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } } } @@ -729,7 +791,7 @@ func resolveBackupManifests( for i := range incFrom { store, err := mkStore(ctx, incFrom[i], user) if err != nil { - return nil, nil, nil, errors.Wrapf(err, "failed to open backup storage location") + return nil, nil, nil, 0, errors.Wrapf(err, "failed to open backup storage location") } defer store.Close() incStores[i] = store @@ -747,7 +809,7 @@ func resolveBackupManifests( // and restore the specified base. prev = nil } else { - return nil, nil, nil, err + return nil, nil, nil, 0, err } } @@ -764,7 +826,7 @@ func resolveBackupManifests( ctx, baseStores, from[0], baseManifest, encryption, "", /* prefix */ ) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } // If we discovered additional layers, handle them too. @@ -776,17 +838,18 @@ func resolveBackupManifests( for i := range incFrom { baseURIs[i], err = url.Parse(incFrom[i]) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } } // For each layer, we need to load the default manifest then calculate the URI and the // locality info for each partition. for i := range prev { - defaultManifestForLayer, err := readBackupManifest(ctx, incStores[0], prev[i], encryption) + defaultManifestForLayer, memSize, err := readBackupManifest(ctx, mem, incStores[0], prev[i], encryption) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } + ownedMemSize += memSize mainBackupManifests[i+1] = defaultManifestForLayer // prev[i] is the path to the manifest file itself for layer i -- the @@ -803,7 +866,7 @@ func resolveBackupManifests( defaultURIs[i+1] = partitionURIs[0] localityInfo[i+1], err = getLocalityInfo(ctx, incStores, partitionURIs, defaultManifestForLayer, encryption, incSubDir) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } } } @@ -828,12 +891,12 @@ func resolveBackupManifests( if b.MVCCFilter != MVCCFilter_All { const errPrefix = "invalid RESTORE timestamp: restoring to arbitrary time requires that BACKUP for requested time be created with '%s' option." if i == 0 { - return nil, nil, nil, errors.Errorf( + return nil, nil, nil, 0, errors.Errorf( errPrefix+" nearest backup time is %s", backupOptRevisionHistory, timeutil.Unix(0, b.EndTime.WallTime).UTC(), ) } - return nil, nil, nil, errors.Errorf( + return nil, nil, nil, 0, errors.Errorf( errPrefix+" nearest BACKUP times are %s or %s", backupOptRevisionHistory, timeutil.Unix(0, mainBackupManifests[i-1].EndTime.WallTime).UTC(), @@ -846,7 +909,7 @@ func resolveBackupManifests( // only captured since the GC window. Note that the RevisionStartTime is // the latest for ranges backed up. if endTime.LessEq(b.RevisionStartTime) { - return nil, nil, nil, errors.Errorf( + return nil, nil, nil, 0, errors.Errorf( "invalid RESTORE timestamp: BACKUP for requested time only has revision history"+ " from %v", timeutil.Unix(0, b.RevisionStartTime.WallTime).UTC(), ) @@ -857,13 +920,16 @@ func resolveBackupManifests( } if !ok { - return nil, nil, nil, errors.Errorf( + return nil, nil, nil, 0, errors.Errorf( "invalid RESTORE timestamp: supplied backups do not cover requested time", ) } } - return defaultURIs, mainBackupManifests, localityInfo, nil + totalMemSize := ownedMemSize + ownedMemSize = 0 + + return defaultURIs, mainBackupManifests, localityInfo, totalMemSize, nil } // TODO(anzoteh96): benchmark the performance of different search algorithms, diff --git a/pkg/ccl/backupccl/restore_data_processor.go b/pkg/ccl/backupccl/restore_data_processor.go index f61a2e7b3dca..e38289b18b15 100644 --- a/pkg/ccl/backupccl/restore_data_processor.go +++ b/pkg/ccl/backupccl/restore_data_processor.go @@ -14,6 +14,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/storageccl" "github.com/cockroachdb/cockroach/pkg/cloud" + "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/kv/bulk" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" @@ -102,6 +103,13 @@ var numRestoreWorkers = settings.RegisterIntSetting( settings.PositiveInt, ) +var restoreAtNow = settings.RegisterBoolSetting( + settings.TenantWritable, + "bulkio.restore_at_current_time.enabled", + "write restored data at the current timestamp", + false, +) + func newRestoreDataProcessor( flowCtx *execinfra.FlowCtx, processorID int32, @@ -164,7 +172,7 @@ func (rd *restoreDataProcessor) Start(ctx context.Context) { rd.phaseGroup.GoCtx(func(ctx context.Context) error { defer close(rd.sstCh) for entry := range entries { - if err := rd.openSSTs(entry, rd.sstCh); err != nil { + if err := rd.openSSTs(ctx, entry, rd.sstCh); err != nil { return err } } @@ -174,7 +182,7 @@ func (rd *restoreDataProcessor) Start(ctx context.Context) { rd.phaseGroup.GoCtx(func(ctx context.Context) error { defer close(rd.progCh) - return rd.runRestoreWorkers(rd.sstCh) + return rd.runRestoreWorkers(ctx, rd.sstCh) }) } @@ -191,7 +199,7 @@ func inputReader( entries chan execinfrapb.RestoreSpanEntry, metaCh chan *execinfrapb.ProducerMetadata, ) error { - var alloc rowenc.DatumAlloc + var alloc tree.DatumAlloc for { // We read rows from the SplitAndScatter processor. We expect each row to @@ -248,9 +256,8 @@ type mergedSST struct { } func (rd *restoreDataProcessor) openSSTs( - entry execinfrapb.RestoreSpanEntry, sstCh chan mergedSST, + ctx context.Context, entry execinfrapb.RestoreSpanEntry, sstCh chan mergedSST, ) error { - ctx := rd.Ctx ctxDone := ctx.Done() // The sstables only contain MVCC data and no intents, so using an MVCC @@ -307,7 +314,7 @@ func (rd *restoreDataProcessor) openSSTs( return nil } - log.VEventf(rd.Ctx, 1 /* level */, "ingesting span [%s-%s)", entry.Span.Key, entry.Span.EndKey) + log.VEventf(ctx, 1 /* level */, "ingesting span [%s-%s)", entry.Span.Key, entry.Span.EndKey) for _, file := range entry.Files { log.VEventf(ctx, 2, "import file %s which starts at %s", file.Path, entry.Span.Key) @@ -330,8 +337,8 @@ func (rd *restoreDataProcessor) openSSTs( return sendIters(iters, dirs) } -func (rd *restoreDataProcessor) runRestoreWorkers(ssts chan mergedSST) error { - return ctxgroup.GroupWorkers(rd.Ctx, rd.numWorkers, func(ctx context.Context, _ int) error { +func (rd *restoreDataProcessor) runRestoreWorkers(ctx context.Context, ssts chan mergedSST) error { + return ctxgroup.GroupWorkers(ctx, rd.numWorkers, func(ctx context.Context, _ int) error { for { done, err := func() (done bool, _ error) { sstIter, ok := <-ssts @@ -340,7 +347,7 @@ func (rd *restoreDataProcessor) runRestoreWorkers(ssts chan mergedSST) error { return done, nil } - summary, err := rd.processRestoreSpanEntry(sstIter) + summary, err := rd.processRestoreSpanEntry(ctx, sstIter) if err != nil { return done, err } @@ -366,10 +373,9 @@ func (rd *restoreDataProcessor) runRestoreWorkers(ssts chan mergedSST) error { } func (rd *restoreDataProcessor) processRestoreSpanEntry( - sst mergedSST, + ctx context.Context, sst mergedSST, ) (roachpb.BulkOpSummary, error) { db := rd.flowCtx.Cfg.DB - ctx := rd.Ctx evalCtx := rd.EvalCtx var summary roachpb.BulkOpSummary @@ -377,6 +383,13 @@ func (rd *restoreDataProcessor) processRestoreSpanEntry( iter := sst.iter defer sst.cleanup() + writeAtBatchTS := restoreAtNow.Get(&evalCtx.Settings.SV) + if writeAtBatchTS && !evalCtx.Settings.Version.IsActive(ctx, clusterversion.MVCCAddSSTable) { + return roachpb.BulkOpSummary{}, errors.Newf( + "cannot use %s until version %s", restoreAtNow.Key(), clusterversion.MVCCAddSSTable.String(), + ) + } + // "disallowing" shadowing of anything older than logical=1 is i.e. allow all // shadowing. We must allow shadowing in case the RESTORE has to retry any // ingestions, but setting a (permissive) disallow like this serves to force @@ -385,8 +398,13 @@ func (rd *restoreDataProcessor) processRestoreSpanEntry( // this comes at the cost of said overlap check, but in the common case of // non-overlapping ingestion into empty spans, that is just one seek. disallowShadowingBelow := hlc.Timestamp{Logical: 1} - batcher, err := bulk.MakeSSTBatcher(ctx, db, evalCtx.Settings, - func() int64 { return rd.flushBytes }, disallowShadowingBelow) + batcher, err := bulk.MakeSSTBatcher(ctx, + db, + evalCtx.Settings, + func() int64 { return rd.flushBytes }, + disallowShadowingBelow, + writeAtBatchTS, + ) if err != nil { return summary, err } diff --git a/pkg/ccl/backupccl/restore_data_processor_test.go b/pkg/ccl/backupccl/restore_data_processor_test.go index fbbef31b3a39..ba902825a066 100644 --- a/pkg/ccl/backupccl/restore_data_processor_test.go +++ b/pkg/ccl/backupccl/restore_data_processor_test.go @@ -389,10 +389,10 @@ func runTestIngest(t *testing.T, init func(*cluster.Settings)) { mockRestoreDataSpec) require.NoError(t, err) ssts := make(chan mergedSST, 1) - require.NoError(t, mockRestoreDataProcessor.openSSTs(restoreSpanEntry, ssts)) + require.NoError(t, mockRestoreDataProcessor.openSSTs(ctx, restoreSpanEntry, ssts)) close(ssts) sst := <-ssts - _, err = mockRestoreDataProcessor.processRestoreSpanEntry(sst) + _, err = mockRestoreDataProcessor.processRestoreSpanEntry(ctx, sst) require.NoError(t, err) clientKVs, err := kvDB.Scan(ctx, reqStartKey, reqEndKey, 0) diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index 50df34ba880c..97672f3821b4 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -54,6 +54,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/interval" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" + "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/cockroach/pkg/util/syncutil" @@ -415,25 +416,16 @@ func restore( return emptyRowCount, errors.Wrap(err, "resolving locality locations") } + if err := checkCoverage(restoreCtx, dataToRestore.getSpans(), backupManifests); err != nil { + return emptyRowCount, err + } + // Pivot the backups, which are grouped by time, into requests for import, // which are grouped by keyrange. highWaterMark := job.Progress().Details.(*jobspb.Progress_Restore).Restore.HighWater - const useSimpleImportSpans = true - var importSpans []execinfrapb.RestoreSpanEntry - if useSimpleImportSpans { - if err := checkCoverage(restoreCtx, dataToRestore.getSpans(), backupManifests); err != nil { - return emptyRowCount, err - } - importSpans = makeSimpleImportSpans(dataToRestore.getSpans(), backupManifests, backupLocalityMap, - highWaterMark) - } else { - importSpans, _, err = makeImportSpans(dataToRestore.getSpans(), backupManifests, backupLocalityMap, - highWaterMark, errOnMissingRange) - if err != nil { - return emptyRowCount, errors.Wrapf(err, "making import requests for %d backups", len(backupManifests)) - } - } + importSpans := makeSimpleImportSpans(dataToRestore.getSpans(), backupManifests, backupLocalityMap, + highWaterMark) if len(importSpans) == 0 { // There are no files to restore. @@ -570,14 +562,15 @@ func restore( // be broken down into two methods. func loadBackupSQLDescs( ctx context.Context, + mem *mon.BoundAccount, p sql.JobExecContext, details jobspb.RestoreDetails, encryption *jobspb.BackupEncryptionOptions, -) ([]BackupManifest, BackupManifest, []catalog.Descriptor, error) { - backupManifests, err := loadBackupManifests(ctx, details.URIs, +) ([]BackupManifest, BackupManifest, []catalog.Descriptor, int64, error) { + backupManifests, sz, err := loadBackupManifests(ctx, mem, details.URIs, p.User(), p.ExecCfg().DistSQLSrv.ExternalStorageFromURI, encryption) if err != nil { - return nil, BackupManifest{}, nil, err + return nil, BackupManifest{}, nil, 0, err } allDescs, latestBackupManifest := loadSQLDescsFromBackupsAtTime(backupManifests, details.EndTime) @@ -603,9 +596,11 @@ func loadBackupSQLDescs( } if err := maybeUpgradeDescriptors(ctx, sqlDescs, true /* skipFKsWithNoMatchingTable */); err != nil { - return nil, BackupManifest{}, nil, err + mem.Shrink(ctx, sz) + return nil, BackupManifest{}, nil, 0, err } - return backupManifests, latestBackupManifest, sqlDescs, nil + + return backupManifests, latestBackupManifest, sqlDescs, sz, nil } // restoreResumer should only store a reference to the job it's running. State @@ -1399,12 +1394,18 @@ func (r *restoreResumer) doResume(ctx context.Context, execCtx interface{}) erro p := execCtx.(sql.JobExecContext) r.execCfg = p.ExecCfg() - backupManifests, latestBackupManifest, sqlDescs, err := loadBackupSQLDescs( - ctx, p, details, details.Encryption, + mem := p.ExecCfg().RootMemoryMonitor.MakeBoundAccount() + defer mem.Close(ctx) + + backupManifests, latestBackupManifest, sqlDescs, memSize, err := loadBackupSQLDescs( + ctx, &mem, p, details, details.Encryption, ) if err != nil { return err } + defer func() { + mem.Shrink(ctx, memSize) + }() // backupCodec is the codec that was used to encode the keys in the backup. It // is the tenant in which the backup was taken. backupCodec := keys.SystemSQLCodec diff --git a/pkg/ccl/backupccl/restore_planning.go b/pkg/ccl/backupccl/restore_planning.go index b122b1f54687..5837781e16f6 100644 --- a/pkg/ccl/backupccl/restore_planning.go +++ b/pkg/ccl/backupccl/restore_planning.go @@ -34,6 +34,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -44,7 +45,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" - "github.com/cockroachdb/cockroach/pkg/sql/covering" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -1361,13 +1361,6 @@ func RewriteTableDescs( return nil } -func errOnMissingRange(span covering.Range, start, end hlc.Timestamp) error { - return errors.Errorf( - "no backup covers time [%s,%s) for range [%s,%s) (or backups out of order)", - start, end, roachpb.Key(span.Start), roachpb.Key(span.End), - ) -} - // resolveOptionsForRestoreJobDescription creates a copy of // the options specified during a restore, after processing // them to be suitable for displaying in the jobs' description. @@ -1729,7 +1722,7 @@ func checkPrivilegesForRestore( func checkClusterRegions( ctx context.Context, p sql.PlanHookState, typesByID map[descpb.ID]*typedesc.Mutable, ) error { - regionSet := make(map[descpb.RegionName]struct{}) + regionSet := make(map[catpb.RegionName]struct{}) for _, typ := range typesByID { typeDesc := typedesc.NewBuilder(typ.TypeDesc()).BuildImmutableType() if typeDesc.GetKind() == descpb.TypeDescriptor_MULTIREGION_ENUM { @@ -1831,13 +1824,19 @@ func doRestorePlan( KMSInfo: defaultKMSInfo} } - defaultURIs, mainBackupManifests, localityInfo, err := resolveBackupManifests( - ctx, baseStores, p.ExecCfg().DistSQLSrv.ExternalStorageFromURI, from, + mem := p.ExecCfg().RootMemoryMonitor.MakeBoundAccount() + defer mem.Close(ctx) + + defaultURIs, mainBackupManifests, localityInfo, memReserved, err := resolveBackupManifests( + ctx, &mem, baseStores, p.ExecCfg().DistSQLSrv.ExternalStorageFromURI, from, incFrom, endTime, encryption, p.User(), ) if err != nil { return err } + defer func() { + mem.Shrink(ctx, memReserved) + }() currentVersion := p.ExecCfg().Settings.Version.ActiveVersion(ctx) for i := range mainBackupManifests { @@ -2222,7 +2221,7 @@ func planDatabaseModifiersForRestore( restoreDBs []catalog.DatabaseDescriptor, ) (map[descpb.ID]*jobspb.RestoreDetails_DatabaseModifier, []catalog.Descriptor, error) { databaseModifiers := make(map[descpb.ID]*jobspb.RestoreDetails_DatabaseModifier) - defaultPrimaryRegion := descpb.RegionName( + defaultPrimaryRegion := catpb.RegionName( sql.DefaultPrimaryRegion.Get(&p.ExecCfg().Settings.SV), ) if defaultPrimaryRegion == "" { @@ -2314,7 +2313,7 @@ func planDatabaseModifiersForRestore( return nil, nil, err } regionConfig := multiregion.MakeRegionConfig( - []descpb.RegionName{defaultPrimaryRegion}, + []catpb.RegionName{defaultPrimaryRegion}, defaultPrimaryRegion, sg, regionEnumID, diff --git a/pkg/ccl/backupccl/restore_span_covering.go b/pkg/ccl/backupccl/restore_span_covering.go index 6fe3693b64e7..e8f00ddfdf5e 100644 --- a/pkg/ccl/backupccl/restore_span_covering.go +++ b/pkg/ccl/backupccl/restore_span_covering.go @@ -11,20 +11,16 @@ package backupccl import ( "sort" - "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/sql/covering" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" - "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/interval" - "github.com/cockroachdb/errors" ) type intervalSpan roachpb.Span var _ interval.Interface = intervalSpan{} -// ID is part of `interval.Interface` but unused in makeImportSpans. +// ID is part of `interval.Interface` but seemed unused by backupccl usage. func (ie intervalSpan) ID() uintptr { return 0 } // Range is part of `interval.Interface`. @@ -32,224 +28,6 @@ func (ie intervalSpan) Range() interval.Range { return interval.Range{Start: []byte(ie.Key), End: []byte(ie.EndKey)} } -type importEntryType int - -const ( - backupSpan importEntryType = iota - backupFile - tableSpan - completedSpan -) - -type importEntry struct { - roachpb.Span - entryType importEntryType - - // Only set if entryType is backupSpan - start, end hlc.Timestamp - - // Only set if entryType is backupFile - dir roachpb.ExternalStorage - file BackupManifest_File -} - -// makeImportSpans pivots the backups, which are grouped by time, into -// spans for import, which are grouped by keyrange. -// -// The core logic of this is in OverlapCoveringMerge, which accepts sets of -// non-overlapping key ranges (aka coverings) each with a payload, and returns -// them aligned with the payloads in the same order as in the input. -// -// Example (input): -// - [A, C) backup t0 to t1 -> /file1 -// - [C, D) backup t0 to t1 -> /file2 -// - [A, B) backup t1 to t2 -> /file3 -// - [B, C) backup t1 to t2 -> /file4 -// - [C, D) backup t1 to t2 -> /file5 -// - [B, D) requested table data to be restored -// -// Example (output): -// - [A, B) -> /file1, /file3 -// - [B, C) -> /file1, /file4, requested (note that file1 was split into two ranges) -// - [C, D) -> /file2, /file5, requested -// -// This would be turned into two Import spans, one restoring [B, C) out of -// /file1 and /file4, the other restoring [C, D) out of /file2 and /file5. -// Nothing is restored out of /file3 and only part of /file1 is used. -// -// NB: All grouping operates in the pre-rewrite keyspace, meaning the keyranges -// as they were backed up, not as they're being restored. -// -// If a span is not covered, the onMissing function is called with the span and -// time missing to determine what error, if any, should be returned. -func makeImportSpans( - tableSpans []roachpb.Span, - backups []BackupManifest, - backupLocalityMap map[int]storeByLocalityKV, - lowWaterMark roachpb.Key, - onMissing func(span covering.Range, start, end hlc.Timestamp) error, -) ([]execinfrapb.RestoreSpanEntry, hlc.Timestamp, error) { - // Put the covering for the already-completed spans into the - // OverlapCoveringMerge input first. Payloads are returned in the same order - // that they appear in the input; putting the completedSpan first means we'll - // see it first when iterating over the output of OverlapCoveringMerge and - // avoid doing unnecessary work. - completedCovering := covering.Covering{ - { - Start: []byte(keys.MinKey), - End: []byte(lowWaterMark), - Payload: importEntry{entryType: completedSpan}, - }, - } - - // Put the merged table data covering into the OverlapCoveringMerge input - // next. - var tableSpanCovering covering.Covering - for _, span := range tableSpans { - tableSpanCovering = append(tableSpanCovering, covering.Range{ - Start: span.Key, - End: span.EndKey, - Payload: importEntry{ - Span: span, - entryType: tableSpan, - }, - }) - } - - backupCoverings := []covering.Covering{completedCovering, tableSpanCovering} - - // Iterate over backups creating two coverings for each. First the spans - // that were backed up, then the files in the backup. The latter is a subset - // when some of the keyranges in the former didn't change since the previous - // backup. These alternate (backup1 spans, backup1 files, backup2 spans, - // backup2 files) so they will retain that alternation in the output of - // OverlapCoveringMerge. - var maxEndTime hlc.Timestamp - for i, b := range backups { - if maxEndTime.Less(b.EndTime) { - maxEndTime = b.EndTime - } - - var backupNewSpanCovering covering.Covering - for _, s := range b.IntroducedSpans { - backupNewSpanCovering = append(backupNewSpanCovering, covering.Range{ - Start: s.Key, - End: s.EndKey, - Payload: importEntry{Span: s, entryType: backupSpan, start: hlc.Timestamp{}, end: b.StartTime}, - }) - } - backupCoverings = append(backupCoverings, backupNewSpanCovering) - - var backupSpanCovering covering.Covering - for _, s := range b.Spans { - backupSpanCovering = append(backupSpanCovering, covering.Range{ - Start: s.Key, - End: s.EndKey, - Payload: importEntry{Span: s, entryType: backupSpan, start: b.StartTime, end: b.EndTime}, - }) - } - backupCoverings = append(backupCoverings, backupSpanCovering) - var backupFileCovering covering.Covering - - var storesByLocalityKV map[string]roachpb.ExternalStorage - if storesByLocalityKVMap, ok := backupLocalityMap[i]; ok { - storesByLocalityKV = storesByLocalityKVMap - } - - for _, f := range b.Files { - dir := b.Dir - if storesByLocalityKV != nil { - if newDir, ok := storesByLocalityKV[f.LocalityKV]; ok { - dir = newDir - } - } - backupFileCovering = append(backupFileCovering, covering.Range{ - Start: f.Span.Key, - End: f.Span.EndKey, - Payload: importEntry{ - Span: f.Span, - entryType: backupFile, - dir: dir, - file: f, - }, - }) - } - backupCoverings = append(backupCoverings, backupFileCovering) - } - - // Group ranges covered by backups with ones needed to restore the selected - // tables. Note that this breaks intervals up as necessary to align them. - // See the function godoc for details. - importRanges := covering.OverlapCoveringMerge(backupCoverings) - - // Translate the output of OverlapCoveringMerge into requests. - var requestEntries []execinfrapb.RestoreSpanEntry -rangeLoop: - for _, importRange := range importRanges { - needed := false - var latestCoveredTime hlc.Timestamp - var files []execinfrapb.RestoreFileSpec - payloads := importRange.Payload.([]interface{}) - for _, p := range payloads { - ie := p.(importEntry) - switch ie.entryType { - case completedSpan: - continue rangeLoop - case tableSpan: - needed = true - case backupSpan: - // The latest time we've backed up this span may be ahead of the start - // time of this entry. This is because some spans can be - // "re-introduced", meaning that they were previously backed up but - // still appear in introducedSpans. Spans are re-introduced when they - // were taken OFFLINE (and therefore processed non-transactional writes) - // and brought back online (PUBLIC). For more information see #62564. - if latestCoveredTime.Less(ie.start) { - return nil, hlc.Timestamp{}, errors.Errorf( - "no backup covers time [%s,%s) for range [%s,%s) or backups listed out of order (mismatched start time)", - latestCoveredTime, ie.start, - roachpb.Key(importRange.Start), roachpb.Key(importRange.End)) - } - if !ie.end.Less(latestCoveredTime) { - latestCoveredTime = ie.end - } - case backupFile: - if len(ie.file.Path) > 0 { - files = append(files, execinfrapb.RestoreFileSpec{ - Dir: ie.dir, - Path: ie.file.Path, - }) - } - } - } - if needed { - if latestCoveredTime != maxEndTime { - if err := onMissing(importRange, latestCoveredTime, maxEndTime); err != nil { - return nil, hlc.Timestamp{}, err - } - } - if len(files) == 0 { - // There may be import entries that refer to no data, and hence - // no files. These are caused because file spans start at a - // specific key. E.g. consider the first file backing up data - // from table 51. It will cover span ‹/Table/51/1/0/0› - - // ‹/Table/51/1/3273›. When merged with the backup span: - // ‹/Table/51› - ‹/Table/52›, we get an empty span with no - // files: ‹/Table/51› - ‹/Table/51/1/0/0›. We should ignore - // these to avoid thrashing during restore's split and scatter. - continue - } - // If needed is false, we have data backed up that is not necessary - // for this restore. Skip it. - requestEntries = append(requestEntries, execinfrapb.RestoreSpanEntry{ - Span: roachpb.Span{Key: importRange.Start, EndKey: importRange.End}, - Files: files, - }) - } - } - return requestEntries, maxEndTime, nil -} - // makeSimpleImportSpans partitions the spans of requiredSpans into a covering // of RestoreSpanEntry's which each have all overlapping files from the passed // backups assigned to them. The spans of requiredSpans are trimmed/removed @@ -313,18 +91,23 @@ func makeSimpleImportSpans( if cover[i].Span.Overlaps(sp) { cover[i].Files = append(cover[i].Files, fileSpec) } - // Later files start later, so this cover ends before this file, - // it ends before them too and they can start searching after it. + // If partition i of the cover ends before this file starts, we + // know it also ends before any remaining files start too, as the + // files are sorted above by start key, so remaining files can + // start their search after this partition. if cover[i].Span.EndKey.Compare(sp.Key) <= 0 { covPos = i + 1 } } + // If this file extends beyond the end of the last partition of the + // cover, append a new partition for the uncovered span. if covEnd := cover[len(cover)-1].Span.EndKey; sp.EndKey.Compare(covEnd) > 0 { cover = append(cover, makeEntry(covEnd, sp.EndKey, fileSpec)) } } } else if span.EndKey.Compare(f.Span.Key) <= 0 { - // This file is already after the end, so rest are too. + // If this file starts after the needed span ends, then all the files + // remaining do too so we're done checking files for this span. break } } diff --git a/pkg/ccl/backupccl/restore_span_covering_test.go b/pkg/ccl/backupccl/restore_span_covering_test.go index 5f9401654c67..60d3cefa24ae 100644 --- a/pkg/ccl/backupccl/restore_span_covering_test.go +++ b/pkg/ccl/backupccl/restore_span_covering_test.go @@ -67,6 +67,18 @@ func MockBackupChain(length, spans, baseFiles int, r *rand.Rand) []BackupManifes return backups } +// checkRestoreCovering verifies that a covering actually uses every span of +// every file in the passed backups that overlaps with any part of the passed +// spans. It does by constructing a map from every file name to a SpanGroup that +// contains the overlap of that file span with every required span, and then +// iterating through the partitions of the cover and removing that partition's +// span from the group for every file specified by that partition, and then +// checking that all the groups are empty, indicating no needed span was missed. +// It also checks that each file that the cover has an expected number of +// partitions (i.e. isn't just one big partition of all files), by comparing its +// length to the number of files a file's end key was greater than any prior end +// key when walking files in order by start key in the backups. This check is +// thus sensitive to ordering; the coverage correctness check however is not. func checkRestoreCovering( backups []BackupManifest, spans roachpb.Spans, cov []execinfrapb.RestoreSpanEntry, ) error { diff --git a/pkg/ccl/backupccl/show.go b/pkg/ccl/backupccl/show.go index aabe1637299a..21d7780fa28b 100644 --- a/pkg/ccl/backupccl/show.go +++ b/pkg/ccl/backupccl/show.go @@ -35,6 +35,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" @@ -67,6 +68,7 @@ func checkShowBackupURIPrivileges(ctx context.Context, p sql.PlanHookState, uri type backupInfoReader interface { showBackup( context.Context, + *mon.BoundAccount, cloud.ExternalStorage, cloud.ExternalStorage, *jobspb.BackupEncryptionOptions, @@ -91,15 +93,21 @@ func (m manifestInfoReader) header() colinfo.ResultColumns { // and pipes the information to the user's sql console via the results channel. func (m manifestInfoReader) showBackup( ctx context.Context, + mem *mon.BoundAccount, store cloud.ExternalStorage, incStore cloud.ExternalStorage, enc *jobspb.BackupEncryptionOptions, incPaths []string, resultsCh chan<- tree.Datums, ) error { + var memSize int64 + defer func() { + mem.Shrink(ctx, memSize) + }() + var err error manifests := make([]BackupManifest, len(incPaths)+1) - manifests[0], err = ReadBackupManifestFromStore(ctx, store, enc) + manifests[0], memSize, err = ReadBackupManifestFromStore(ctx, mem, store, enc) if err != nil { if errors.Is(err, cloud.ErrFileDoesNotExist) { @@ -117,10 +125,11 @@ func (m manifestInfoReader) showBackup( } for i := range incPaths { - m, err := readBackupManifest(ctx, incStore, incPaths[i], enc) + m, sz, err := readBackupManifest(ctx, mem, incStore, incPaths[i], enc) if err != nil { return err } + memSize += sz // Blank the stats to prevent memory blowup. m.DeprecatedStatistics = nil manifests[i+1] = m @@ -315,7 +324,10 @@ func showBackupPlanHook( } } - return infoReader.showBackup(ctx, store, incStore, encryption, incPaths, resultsCh) + mem := p.ExecCfg().RootMemoryMonitor.MakeBoundAccount() + defer mem.Close(ctx) + + return infoReader.showBackup(ctx, &mem, store, incStore, encryption, incPaths, resultsCh) } return fn, infoReader.header(), nil, false, nil diff --git a/pkg/ccl/backupccl/show_test.go b/pkg/ccl/backupccl/show_test.go index 080227164883..3041a48c77ce 100644 --- a/pkg/ccl/backupccl/show_test.go +++ b/pkg/ccl/backupccl/show_test.go @@ -165,8 +165,8 @@ ORDER BY object_type, object_name`, full) details1Desc := catalogkv.TestingGetTableDescriptor(tc.Server(0).DB(), keys.SystemSQLCodec, "data", "details1") details2Desc := catalogkv.TestingGetTableDescriptor(tc.Server(0).DB(), keys.SystemSQLCodec, "data", "details2") - details1Key := roachpb.Key(rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, details1Desc, details1Desc.GetPrimaryIndexID())) - details2Key := roachpb.Key(rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, details2Desc, details2Desc.GetPrimaryIndexID())) + details1Key := roachpb.Key(rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, details1Desc.GetID(), details1Desc.GetPrimaryIndexID())) + details2Key := roachpb.Key(rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, details2Desc.GetID(), details2Desc.GetPrimaryIndexID())) sqlDBRestore.CheckQueryResults(t, fmt.Sprintf(`SHOW BACKUP RANGES '%s'`, details), [][]string{ {"/Table/64/1", "/Table/64/2", string(details1Key), string(details1Key.PrefixEnd())}, diff --git a/pkg/ccl/backupccl/split_and_scatter_processor.go b/pkg/ccl/backupccl/split_and_scatter_processor.go index 349278b6e62a..7bca48d762bb 100644 --- a/pkg/ccl/backupccl/split_and_scatter_processor.go +++ b/pkg/ccl/backupccl/split_and_scatter_processor.go @@ -427,7 +427,7 @@ func routingDatumsForNode(nodeID roachpb.NodeID) (rowenc.EncDatum, rowenc.EncDat // routingSpanForNode provides the mapping to be used during distsql planning // when setting up the output router. func routingSpanForNode(nodeID roachpb.NodeID) ([]byte, []byte, error) { - var alloc rowenc.DatumAlloc + var alloc tree.DatumAlloc startDatum, endDatum := routingDatumsForNode(nodeID) startBytes, endBytes := make([]byte, 0), make([]byte, 0) diff --git a/pkg/ccl/backupccl/targets.go b/pkg/ccl/backupccl/targets.go index 3f78784e68ae..071c045d7500 100644 --- a/pkg/ccl/backupccl/targets.go +++ b/pkg/ccl/backupccl/targets.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" @@ -443,15 +444,16 @@ func MakeBackupTableEntry( tablePrimaryIndexSpan := tbDesc.PrimaryIndexSpan(backupCodec) - entry, _, err := makeImportSpans( + if err := checkCoverage(ctx, []roachpb.Span{tablePrimaryIndexSpan}, backupManifests); err != nil { + return BackupTableEntry{}, errors.Wrapf(err, "making spans for table %s", fullyQualifiedTableName) + } + + entry := makeSimpleImportSpans( []roachpb.Span{tablePrimaryIndexSpan}, backupManifests, nil, /*backupLocalityInfo*/ roachpb.Key{}, /*lowWaterMark*/ - errOnMissingRange) - if err != nil { - return BackupTableEntry{}, errors.Wrapf(err, "making spans for table %s", fullyQualifiedTableName) - } + ) lastSchemaChangeTime := findLastSchemaChangeTime(backupManifests, tbDesc, endTime) @@ -535,7 +537,7 @@ func checkMultiRegionCompatible( if table.IsLocalityRegionalByTable() { regionName, _ := table.GetRegionalByTableRegion() - if regionName == descpb.RegionName(tree.PrimaryRegionNotSpecifiedName) { + if regionName == catpb.RegionName(tree.PrimaryRegionNotSpecifiedName) { // REGIONAL BY PRIMARY REGION tables are allowed since they do not // reference a particular region. return nil diff --git a/pkg/ccl/changefeedccl/BUILD.bazel b/pkg/ccl/changefeedccl/BUILD.bazel index dd5d3ae98e84..372969c5e1d5 100644 --- a/pkg/ccl/changefeedccl/BUILD.bazel +++ b/pkg/ccl/changefeedccl/BUILD.bazel @@ -105,7 +105,7 @@ go_library( "//pkg/util/timeutil", "//pkg/util/tracing", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_logtags//:logtags", "@com_github_google_btree//:btree", @@ -177,7 +177,6 @@ go_test( "//pkg/server/status", "//pkg/server/telemetry", "//pkg/settings/cluster", - "//pkg/spanconfig", "//pkg/sql", "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkv", @@ -223,7 +222,7 @@ go_test( "//pkg/workload/bank", "//pkg/workload/ledger", "//pkg/workload/workloadsql", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_cockroach_go_v2//crdb", "@com_github_cockroachdb_errors//:errors", "@com_github_dustin_go_humanize//:go-humanize", diff --git a/pkg/ccl/changefeedccl/avro.go b/pkg/ccl/changefeedccl/avro.go index 3f5a47cae895..44c63c94214f 100644 --- a/pkg/ccl/changefeedccl/avro.go +++ b/pkg/ccl/changefeedccl/avro.go @@ -13,7 +13,7 @@ import ( "math/big" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/geo" "github.com/cockroachdb/cockroach/pkg/geo/geopb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" @@ -160,7 +160,7 @@ type avroDataRecord struct { // Allocate Go native representation once, to avoid repeated map allocation // when encoding. native map[string]interface{} - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc } // avroMetadata is the `avroEnvelopeRecord` metadata. @@ -1015,12 +1015,13 @@ func decimalToRat(dec apd.Decimal, scale int32) (big.Rat, error) { if dec.Exponent >= 0 { exp := big.NewInt(10) exp = exp.Exp(exp, big.NewInt(int64(dec.Exponent)), nil) - var coeff big.Int - r.SetFrac(coeff.Mul(&dec.Coeff, exp), big.NewInt(1)) + coeff := dec.Coeff.MathBigInt() + r.SetFrac(coeff.Mul(coeff, exp), big.NewInt(1)) } else { exp := big.NewInt(10) exp = exp.Exp(exp, big.NewInt(int64(-dec.Exponent)), nil) - r.SetFrac(&dec.Coeff, exp) + coeff := dec.Coeff.MathBigInt() + r.SetFrac(coeff, exp) } if dec.Negative { r.Mul(&r, big.NewRat(-1, 1)) @@ -1036,7 +1037,8 @@ func ratToDecimal(rat big.Rat, scale int32) apd.Decimal { exp := big.NewInt(10) exp = exp.Exp(exp, big.NewInt(int64(scale)), nil) sf := denom.Div(exp, denom) - coeff := num.Mul(num, sf) - dec := apd.NewWithBigInt(coeff, -scale) + var coeff apd.BigInt + coeff.SetMathBigInt(num.Mul(num, sf)) + dec := apd.NewWithBigInt(&coeff, -scale) return *dec } diff --git a/pkg/ccl/changefeedccl/avro_test.go b/pkg/ccl/changefeedccl/avro_test.go index 2e847336e819..a51471c71919 100644 --- a/pkg/ccl/changefeedccl/avro_test.go +++ b/pkg/ccl/changefeedccl/avro_test.go @@ -18,7 +18,7 @@ import ( "testing" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/ccl/importccl" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/settings/cluster" diff --git a/pkg/ccl/changefeedccl/cdctest/mock_webhook_sink.go b/pkg/ccl/changefeedccl/cdctest/mock_webhook_sink.go index 279a0c7f10c7..a7e26082319f 100644 --- a/pkg/ccl/changefeedccl/cdctest/mock_webhook_sink.go +++ b/pkg/ccl/changefeedccl/cdctest/mock_webhook_sink.go @@ -52,6 +52,23 @@ func StartMockWebhookSink(certificate *tls.Certificate) (*MockWebhookSink, error return s, nil } +// StartMockWebhookSinkSecure creates and starts a mock webhook sink server that +// requires clients to provide client certificates for authentication +func StartMockWebhookSinkSecure(certificate *tls.Certificate) (*MockWebhookSink, error) { + s := makeMockWebhookSink() + if certificate == nil { + return nil, errors.Errorf("Must pass a CA cert when creating a mock webhook sink.") + } + + s.server.TLS = &tls.Config{ + Certificates: []tls.Certificate{*certificate}, + ClientAuth: tls.RequireAnyClientCert, + } + + s.server.StartTLS() + return s, nil +} + // StartMockWebhookSinkWithBasicAuth creates and starts a mock webhook sink for // tests with basic username/password auth. func StartMockWebhookSinkWithBasicAuth( diff --git a/pkg/ccl/changefeedccl/cdctest/tls_util.go b/pkg/ccl/changefeedccl/cdctest/tls_util.go index a2769869462f..7608eab2684b 100644 --- a/pkg/ccl/changefeedccl/cdctest/tls_util.go +++ b/pkg/ccl/changefeedccl/cdctest/tls_util.go @@ -9,6 +9,7 @@ package cdctest import ( + "bytes" "crypto/rand" "crypto/rsa" "crypto/tls" @@ -118,6 +119,55 @@ func PemEncodeCert(cert []byte) (string, error) { return pemEncode("CERTIFICATE", cert) } +// GenerateClientCertAndKey generates a client certificate and client key that +// is signed by the given caCert +func GenerateClientCertAndKey(caCert *tls.Certificate) ([]byte, []byte, error) { + clientCert := &x509.Certificate{ + SerialNumber: big.NewInt(1658), + IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback}, + NotBefore: timeutil.Now(), + NotAfter: timeutil.Now().Add(certLifetime), + SubjectKeyId: []byte{1, 2, 3, 4, 6}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature, + } + + clientKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + return nil, nil, err + } + + cert, err := x509.ParseCertificate(caCert.Certificate[0]) + if err != nil { + return nil, nil, err + } + + clientCertBytes, err := x509.CreateCertificate(rand.Reader, clientCert, cert, &clientKey.PublicKey, caCert.PrivateKey) + if err != nil { + return nil, nil, err + } + + clientCertPEM := new(bytes.Buffer) + err = pem.Encode(clientCertPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: clientCertBytes, + }) + if err != nil { + return nil, nil, err + } + + clientKeyPEM := new(bytes.Buffer) + err = pem.Encode(clientKeyPEM, &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(clientKey), + }) + if err != nil { + return nil, nil, err + } + + return clientCertPEM.Bytes(), clientKeyPEM.Bytes(), nil +} + func randomSerial() (*big.Int, error) { limit := new(big.Int).Lsh(big.NewInt(1), 128) ret, err := rand.Int(rand.Reader, limit) diff --git a/pkg/ccl/changefeedccl/changefeed_processors.go b/pkg/ccl/changefeedccl/changefeed_processors.go index b6b6f88d1a5e..6410d33eaa86 100644 --- a/pkg/ccl/changefeedccl/changefeed_processors.go +++ b/pkg/ccl/changefeedccl/changefeed_processors.go @@ -884,7 +884,7 @@ type changeFrontier struct { flowCtx *execinfra.FlowCtx spec execinfrapb.ChangeFrontierSpec memAcc mon.BoundAccount - a rowenc.DatumAlloc + a tree.DatumAlloc // input returns rows from one or more changeAggregator processors input execinfra.RowSource diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index 1d7aacd554d8..925add4ea42a 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -3332,6 +3332,16 @@ func TestChangefeedErrors(t *testing.T) { `CREATE CHANGEFEED FOR foo INTO $1 WITH updated, webhook_sink_config='{"Retry":{"Max":"inf"}}'`, `webhook-https://fake-host`, ) + sqlDB.ExpectErr( + t, `client_cert requires client_key to be set`, + `CREATE CHANGEFEED FOR foo INTO $1`, + `webhook-https://fake-host?client_cert=Zm9v`, + ) + sqlDB.ExpectErr( + t, `client_key requires client_cert to be set`, + `CREATE CHANGEFEED FOR foo INTO $1`, + `webhook-https://fake-host?client_key=Zm9v`, + ) // Sanity check on_error option sqlDB.ExpectErr( @@ -3578,8 +3588,8 @@ func TestChangefeedProtectedTimestamps(t *testing.T) { func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { defer close(done) sqlDB := sqlutils.MakeSQLRunner(db) - sqlDB.Exec(t, `ALTER RANGE default CONFIGURE ZONE USING gc.ttlseconds = 1`) - sqlDB.Exec(t, `ALTER RANGE system CONFIGURE ZONE USING gc.ttlseconds = 1`) + sqlDB.Exec(t, `ALTER RANGE default CONFIGURE ZONE USING gc.ttlseconds = 100`) + sqlDB.Exec(t, `ALTER RANGE system CONFIGURE ZONE USING gc.ttlseconds = 100`) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`) diff --git a/pkg/ccl/changefeedccl/encoder.go b/pkg/ccl/changefeedccl/encoder.go index e6f4f5c630a5..c9a148c959e8 100644 --- a/pkg/ccl/changefeedccl/encoder.go +++ b/pkg/ccl/changefeedccl/encoder.go @@ -103,7 +103,7 @@ type jsonEncoder struct { updatedField, mvccTimestampField, beforeField, wrapped, keyOnly, keyInValue, topicInValue bool targets jobspb.ChangefeedTargets - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc buf bytes.Buffer } diff --git a/pkg/ccl/changefeedccl/helpers_test.go b/pkg/ccl/changefeedccl/helpers_test.go index 8d1d32961ab4..09bfaa47eb77 100644 --- a/pkg/ccl/changefeedccl/helpers_test.go +++ b/pkg/ccl/changefeedccl/helpers_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/blobs" "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/cdctest" @@ -34,7 +34,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server" - "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -329,7 +328,6 @@ func startTestFullServer( knobs := base.TestingKnobs{ DistSQL: &execinfra.TestingKnobs{Changefeed: &TestingKnobs{}}, JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), - SpanConfig: &spanconfig.TestingKnobs{ManagerDisableJobCreation: true}, } if options.knobsFn != nil { options.knobsFn(&knobs) @@ -412,7 +410,7 @@ func startTestTenant( ) (serverutils.TestServerInterface, *gosql.DB, func()) { ctx := context.Background() - kvServer, _, cleanup := startTestFullServer(t, options) + kvServer, _, cleanupCluster := startTestFullServer(t, options) knobs := base.TestingKnobs{ DistSQL: &execinfra.TestingKnobs{Changefeed: &TestingKnobs{}}, JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), @@ -439,7 +437,12 @@ func startTestTenant( // Log so that it is clear if a failed test happened // to run on a tenant. t.Logf("Running test using tenant %s", tenantID) - return server, tenantDB, cleanup + return server, tenantDB, func() { + tenantServer.Stopper().Stop(context.Background()) + log.Infof(context.Background(), "tenant server stopped") + cleanupCluster() + log.Infof(context.Background(), "cluster shut down") + } } type cdcTestFn func(*testing.T, *gosql.DB, cdctest.TestFeedFactory) diff --git a/pkg/ccl/changefeedccl/kvevent/BUILD.bazel b/pkg/ccl/changefeedccl/kvevent/BUILD.bazel index 047865f58226..b8c203b795ea 100644 --- a/pkg/ccl/changefeedccl/kvevent/BUILD.bazel +++ b/pkg/ccl/changefeedccl/kvevent/BUILD.bazel @@ -43,7 +43,7 @@ go_test( "//pkg/roachpb:with-mocks", "//pkg/settings/cluster", "//pkg/sql/randgen", - "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", "//pkg/sql/types", "//pkg/util/ctxgroup", "//pkg/util/encoding", diff --git a/pkg/ccl/changefeedccl/kvevent/blocking_buffer_test.go b/pkg/ccl/changefeedccl/kvevent/blocking_buffer_test.go index 23aa40db800f..55a691f197d3 100644 --- a/pkg/ccl/changefeedccl/kvevent/blocking_buffer_test.go +++ b/pkg/ccl/changefeedccl/kvevent/blocking_buffer_test.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/randgen" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -35,7 +35,7 @@ import ( func makeKV(t *testing.T, rnd *rand.Rand) roachpb.KeyValue { const tableID = 42 - key, err := rowenc.EncodeTableKey( + key, err := keyside.Encode( keys.SystemSQLCodec.TablePrefix(tableID), randgen.RandDatumSimple(rnd, types.String), encoding.Ascending, diff --git a/pkg/ccl/changefeedccl/kvfeed/BUILD.bazel b/pkg/ccl/changefeedccl/kvfeed/BUILD.bazel index f64659ec6a92..bf7e2573e466 100644 --- a/pkg/ccl/changefeedccl/kvfeed/BUILD.bazel +++ b/pkg/ccl/changefeedccl/kvfeed/BUILD.bazel @@ -64,7 +64,7 @@ go_test( "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/descpb", - "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", "//pkg/sql/sem/tree", "//pkg/testutils/serverutils", "//pkg/testutils/sqlutils", diff --git a/pkg/ccl/changefeedccl/kvfeed/kv_feed_test.go b/pkg/ccl/changefeedccl/kvfeed/kv_feed_test.go index 08f9ae0a55e2..d03620ecb1c6 100644 --- a/pkg/ccl/changefeedccl/kvfeed/kv_feed_test.go +++ b/pkg/ccl/changefeedccl/kvfeed/kv_feed_test.go @@ -24,7 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -46,7 +46,7 @@ func TestKVFeed(t *testing.T) { mkKey := func(tableID uint32, k string) roachpb.Key { vDatum := tree.DString(k) - key, err := rowenc.EncodeTableKey(keys.SystemSQLCodec.TablePrefix(tableID), &vDatum, encoding.Ascending) + key, err := keyside.Encode(keys.SystemSQLCodec.TablePrefix(tableID), &vDatum, encoding.Ascending) require.NoError(t, err) return key } diff --git a/pkg/ccl/changefeedccl/rowfetcher_cache.go b/pkg/ccl/changefeedccl/rowfetcher_cache.go index 57a3cde2f889..744b2cd00366 100644 --- a/pkg/ccl/changefeedccl/rowfetcher_cache.go +++ b/pkg/ccl/changefeedccl/rowfetcher_cache.go @@ -41,7 +41,7 @@ type rowFetcherCache struct { collection *descs.Collection db *kv.DB - a rowenc.DatumAlloc + a tree.DatumAlloc } var rfCacheConfig = cache.Config{ @@ -177,8 +177,7 @@ func (c *rowFetcherCache) RowFetcherForTableDesc( false, /* reverse */ descpb.ScanLockingStrength_FOR_NONE, descpb.ScanLockingWaitPolicy_BLOCK, - 0, /* lockTimeout */ - false, /* isCheck */ + 0, /* lockTimeout */ &c.a, nil, /* memMonitor */ rfArgs, diff --git a/pkg/ccl/changefeedccl/schemafeed/schematestutils/BUILD.bazel b/pkg/ccl/changefeedccl/schemafeed/schematestutils/BUILD.bazel index a73520490d16..7832057de020 100644 --- a/pkg/ccl/changefeedccl/schemafeed/schematestutils/BUILD.bazel +++ b/pkg/ccl/changefeedccl/schemafeed/schematestutils/BUILD.bazel @@ -7,6 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/sql/catalog", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", "//pkg/sql/types", diff --git a/pkg/ccl/changefeedccl/schemafeed/schematestutils/schema_test_utils.go b/pkg/ccl/changefeedccl/schemafeed/schematestutils/schema_test_utils.go index 8bc5efd5a326..22729093bb0e 100644 --- a/pkg/ccl/changefeedccl/schemafeed/schematestutils/schema_test_utils.go +++ b/pkg/ccl/changefeedccl/schemafeed/schematestutils/schema_test_utils.go @@ -14,6 +14,7 @@ import ( "strconv" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -59,9 +60,9 @@ func MakeColumnDesc(id descpb.ColumnID) *descpb.ColumnDescriptor { // SetLocalityRegionalByRow sets the LocalityConfig of the table // descriptor such that desc.IsLocalityRegionalByRow will return true. func SetLocalityRegionalByRow(desc catalog.TableDescriptor) catalog.TableDescriptor { - desc.TableDesc().LocalityConfig = &descpb.TableDescriptor_LocalityConfig{ - Locality: &descpb.TableDescriptor_LocalityConfig_RegionalByRow_{ - RegionalByRow: &descpb.TableDescriptor_LocalityConfig_RegionalByRow{}, + desc.TableDesc().LocalityConfig = &catpb.LocalityConfig{ + Locality: &catpb.LocalityConfig_RegionalByRow_{ + RegionalByRow: &catpb.LocalityConfig_RegionalByRow{}, }, } return tabledesc.NewBuilder(desc.TableDesc()).BuildImmutableTable() diff --git a/pkg/ccl/changefeedccl/sink.go b/pkg/ccl/changefeedccl/sink.go index 2e91f1e27a36..fd9c0b1c4740 100644 --- a/pkg/ccl/changefeedccl/sink.go +++ b/pkg/ccl/changefeedccl/sink.go @@ -300,7 +300,7 @@ func (b *encDatumRowBuffer) Pop() rowenc.EncDatumRow { type bufferSink struct { buf encDatumRowBuffer - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc scratch bufalloc.ByteAllocator closed bool metrics *sliMetrics diff --git a/pkg/ccl/changefeedccl/sink_webhook.go b/pkg/ccl/changefeedccl/sink_webhook.go index 538dbebfe1a3..0a9dc729b5d4 100644 --- a/pkg/ccl/changefeedccl/sink_webhook.go +++ b/pkg/ccl/changefeedccl/sink_webhook.go @@ -323,6 +323,8 @@ func makeWebhookSink( params := sinkURLParsed.Query() params.Del(changefeedbase.SinkParamSkipTLSVerify) params.Del(changefeedbase.SinkParamCACert) + params.Del(changefeedbase.SinkParamClientCert) + params.Del(changefeedbase.SinkParamClientKey) sinkURLParsed.RawQuery = params.Encode() sink.url = sinkURL{URL: sinkURLParsed} @@ -354,6 +356,12 @@ func makeWebhookClient(u sinkURL, timeout time.Duration) (*httputil.Client, erro if err := u.decodeBase64(changefeedbase.SinkParamCACert, &dialConfig.caCert); err != nil { return nil, err } + if err := u.decodeBase64(changefeedbase.SinkParamClientCert, &dialConfig.clientCert); err != nil { + return nil, err + } + if err := u.decodeBase64(changefeedbase.SinkParamClientKey, &dialConfig.clientKey); err != nil { + return nil, err + } transport.TLSClientConfig = &tls.Config{ InsecureSkipVerify: dialConfig.tlsSkipVerify, @@ -373,6 +381,20 @@ func makeWebhookClient(u sinkURL, timeout time.Duration) (*httputil.Client, erro transport.TLSClientConfig.RootCAs = caCertPool } + if dialConfig.clientCert != nil && dialConfig.clientKey == nil { + return nil, errors.Errorf(`%s requires %s to be set`, changefeedbase.SinkParamClientCert, changefeedbase.SinkParamClientKey) + } else if dialConfig.clientKey != nil && dialConfig.clientCert == nil { + return nil, errors.Errorf(`%s requires %s to be set`, changefeedbase.SinkParamClientKey, changefeedbase.SinkParamClientCert) + } + + if dialConfig.clientCert != nil && dialConfig.clientKey != nil { + cert, err := tls.X509KeyPair(dialConfig.clientCert, dialConfig.clientKey) + if err != nil { + return nil, errors.Wrap(err, `invalid client certificate data provided`) + } + transport.TLSClientConfig.Certificates = []tls.Certificate{cert} + } + return client, nil } diff --git a/pkg/ccl/changefeedccl/sink_webhook_test.go b/pkg/ccl/changefeedccl/sink_webhook_test.go index 20446356ef0b..98a0a396f164 100644 --- a/pkg/ccl/changefeedccl/sink_webhook_test.go +++ b/pkg/ccl/changefeedccl/sink_webhook_test.go @@ -10,6 +10,7 @@ package changefeedccl import ( "context" + "encoding/base64" "fmt" "net/http" "net/url" @@ -187,11 +188,38 @@ func TestWebhookSink(t *testing.T) { require.EqualError(t, sinkSrcWrongProtocol.EmitRow(context.Background(), nil, nil, nil, zeroTS, zeroTS, zeroAlloc), `context canceled`) + sinkDestSecure, err := cdctest.StartMockWebhookSinkSecure(cert) + require.NoError(t, err) + + sinkDestSecureHost, err := url.Parse(sinkDestSecure.URL()) + require.NoError(t, err) + + clientCertPEM, clientKeyPEM, err := cdctest.GenerateClientCertAndKey(cert) + require.NoError(t, err) + + params = sinkDestSecureHost.Query() + params.Set(changefeedbase.SinkParamSkipTLSVerify, "true") + params.Set(changefeedbase.SinkParamClientCert, base64.StdEncoding.EncodeToString(clientCertPEM)) + params.Set(changefeedbase.SinkParamClientKey, base64.StdEncoding.EncodeToString(clientKeyPEM)) + sinkDestSecureHost.RawQuery = params.Encode() + + details = jobspb.ChangefeedDetails{ + SinkURI: fmt.Sprintf("webhook-%s", sinkDestSecureHost.String()), + Opts: opts, + } + + sinkSrc, err = setupWebhookSinkWithDetails(context.Background(), details, parallelism, timeutil.DefaultTimeSource{}) + require.NoError(t, err) + + // sink with client accepting server cert should pass + testSendAndReceiveRows(t, sinkSrc, sinkDestSecure) + require.NoError(t, sinkSrc.Close()) require.NoError(t, sinkSrcNoCert.Close()) require.NoError(t, sinkSrcInsecure.Close()) require.NoError(t, sinkSrcWrongProtocol.Close()) sinkDestHTTP.Close() + sinkDestSecure.Close() } // run tests with parallelism from 1-4 diff --git a/pkg/ccl/changefeedccl/testfeed_test.go b/pkg/ccl/changefeedccl/testfeed_test.go index 4081ca8829b8..ed69c2e6d3e8 100644 --- a/pkg/ccl/changefeedccl/testfeed_test.go +++ b/pkg/ccl/changefeedccl/testfeed_test.go @@ -13,9 +13,11 @@ import ( "bytes" "context" gosql "database/sql" + "encoding/base64" gojson "encoding/json" "fmt" "io/ioutil" + "math/rand" "net/url" "os" "path/filepath" @@ -1262,6 +1264,7 @@ func (k *kafkaFeed) Close() error { type webhookFeedFactory struct { enterpriseFeedFactory + useSecureServer bool } var _ cdctest.TestFeedFactory = (*webhookFeedFactory)(nil) @@ -1270,12 +1273,14 @@ var _ cdctest.TestFeedFactory = (*webhookFeedFactory)(nil) func makeWebhookFeedFactory( srv serverutils.TestServerInterface, db *gosql.DB, ) cdctest.TestFeedFactory { + useSecure := rand.Float32() < 0.5 return &webhookFeedFactory{ enterpriseFeedFactory: enterpriseFeedFactory{ s: srv, db: db, di: newDepInjector(srv), }, + useSecureServer: useSecure, } } @@ -1286,19 +1291,40 @@ func (f *webhookFeedFactory) Feed(create string, args ...interface{}) (cdctest.T } createStmt := parsed.AST.(*tree.CreateChangefeed) + var sinkDest *cdctest.MockWebhookSink + cert, _, err := cdctest.NewCACertBase64Encoded() if err != nil { return nil, err } - sinkDest, err := cdctest.StartMockWebhookSink(cert) - if err != nil { - return nil, err - } - if createStmt.SinkURI == nil { - createStmt.SinkURI = tree.NewStrVal( - fmt.Sprintf("webhook-%s?insecure_tls_skip_verify=true", sinkDest.URL())) + if f.useSecureServer { + sinkDest, err = cdctest.StartMockWebhookSinkSecure(cert) + if err != nil { + return nil, err + } + + clientCertPEM, clientKeyPEM, err := cdctest.GenerateClientCertAndKey(cert) + if err != nil { + return nil, err + } + + if createStmt.SinkURI == nil { + createStmt.SinkURI = tree.NewStrVal( + fmt.Sprintf("webhook-%s?insecure_tls_skip_verify=true&client_cert=%s&client_key=%s", sinkDest.URL(), base64.StdEncoding.EncodeToString(clientCertPEM), base64.StdEncoding.EncodeToString(clientKeyPEM))) + } + } else { + sinkDest, err = cdctest.StartMockWebhookSink(cert) + if err != nil { + return nil, err + } + + if createStmt.SinkURI == nil { + createStmt.SinkURI = tree.NewStrVal( + fmt.Sprintf("webhook-%s?insecure_tls_skip_verify=true", sinkDest.URL())) + } } + ss := &sinkSynchronizer{} wrapSink := func(s Sink) Sink { return ¬ifyFlushSink{Sink: s, sync: ss} diff --git a/pkg/ccl/cliccl/BUILD.bazel b/pkg/ccl/cliccl/BUILD.bazel index e5a5247308fa..5dfb4dde1d2f 100644 --- a/pkg/ccl/cliccl/BUILD.bazel +++ b/pkg/ccl/cliccl/BUILD.bazel @@ -39,7 +39,6 @@ go_library( "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", "//pkg/sql/row", - "//pkg/sql/rowenc", "//pkg/sql/sem/tree", "//pkg/storage", "//pkg/storage/enginepb", @@ -53,7 +52,7 @@ go_library( "//pkg/util/timeutil", "//pkg/util/timeutil/pgdate", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_errors//oserror", "@com_github_spf13_cobra//:cobra", diff --git a/pkg/ccl/cliccl/debug_backup.go b/pkg/ccl/cliccl/debug_backup.go index 2faba7f5fe78..ceb2e101c7c9 100644 --- a/pkg/ccl/cliccl/debug_backup.go +++ b/pkg/ccl/cliccl/debug_backup.go @@ -22,7 +22,7 @@ import ( "strings" "time" - apd "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/blobs" "github.com/cockroachdb/cockroach/pkg/ccl/backupccl" @@ -44,7 +44,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/row" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util" @@ -295,7 +294,7 @@ func getManifestFromURI(ctx context.Context, path string) (backupccl.BackupManif // upgraded from the old FK representation, or even older formats). If more // fields are added to the output, the table descriptors may need to be // upgraded. - backupManifest, err := backupccl.ReadBackupManifestFromURI(ctx, path, security.RootUserName(), + backupManifest, _, err := backupccl.ReadBackupManifestFromURI(ctx, nil /* mem */, path, security.RootUserName(), externalStorageFromURIFactory, nil) if err != nil { return backupccl.BackupManifest{}, err @@ -390,7 +389,7 @@ func runListIncrementalCmd(cmd *cobra.Command, args []string) error { defer stores[i].Close() } - manifest, err := backupccl.ReadBackupManifestFromStore(ctx, stores[i], nil) + manifest, _, err := backupccl.ReadBackupManifestFromStore(ctx, nil /* mem */, stores[i], nil) if err != nil { return err } @@ -601,9 +600,8 @@ func makeRowFetcher( false, /*reverse*/ descpb.ScanLockingStrength_FOR_NONE, descpb.ScanLockingWaitPolicy_BLOCK, - 0, /* lockTimeout */ - false, /*isCheck*/ - &rowenc.DatumAlloc{}, + 0, /* lockTimeout */ + &tree.DatumAlloc{}, nil, /*mon.BytesMonitor*/ table, ); err != nil { diff --git a/pkg/ccl/importccl/BUILD.bazel b/pkg/ccl/importccl/BUILD.bazel index c70975e7f73b..e46d8ab94c2c 100644 --- a/pkg/ccl/importccl/BUILD.bazel +++ b/pkg/ccl/importccl/BUILD.bazel @@ -45,6 +45,7 @@ go_library( "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkeys", "//pkg/sql/catalog/catalogkv", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/dbdesc", "//pkg/sql/catalog/descpb", @@ -91,7 +92,7 @@ go_library( "//pkg/util/timeutil/pgdate", "//pkg/util/tracing", "//pkg/workload", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_logtags//:logtags", "@com_github_fraugster_parquet_go//:parquet-go", diff --git a/pkg/ccl/importccl/exportcsv.go b/pkg/ccl/importccl/exportcsv.go index 708fd8b546bb..d050cd320d30 100644 --- a/pkg/ccl/importccl/exportcsv.go +++ b/pkg/ccl/importccl/exportcsv.go @@ -181,7 +181,7 @@ func (sp *csvWriter) Run(ctx context.Context) { sp.input.Start(ctx) input := execinfra.MakeNoMetadataRowSource(sp.input, sp.output) - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} writer := newCSVExporter(sp.spec) diff --git a/pkg/ccl/importccl/exportcsv_test.go b/pkg/ccl/importccl/exportcsv_test.go index 2b7e24838f85..6809ec2c525b 100644 --- a/pkg/ccl/importccl/exportcsv_test.go +++ b/pkg/ccl/importccl/exportcsv_test.go @@ -273,7 +273,13 @@ func TestExportOrder(t *testing.T) { // fields below name and stmt validate some aspect of the exported parquet file. // If a validation field is empty, then that field will not be used in the test. type parquetTest struct { + // name is the name of the test. name string + + // prep contains sql commands that will execute before the stmt. + prep []string + + // stmt contains the EXPORT PARQUET statement. stmt string // colNames provides the expected column names for the parquet file. @@ -287,8 +293,8 @@ type parquetTest struct { vals [][]interface{} } -// validateParquetFile reads the parquet file, converts each value in the parquet file to its -// native go type, and asserts its values match the truth. +// validateParquetFile reads the parquet file, conducts a test related to each +// non nil field in the parquetTest struct func validateParquetFile(t *testing.T, file string, test parquetTest) error { r, err := os.Open(file) if err != nil { @@ -314,12 +320,9 @@ func validateParquetFile(t *testing.T, file string, test parquetTest) error { require.Equal(t, *col.SchemaElement.RepetitionType, test.colFieldRepType[i]) } } - if test.vals != nil { - require.Equal(t, len(cols), len(test.vals[0])) require.Equal(t, int(fr.NumRows()), len(test.vals)) - count := 0 for { row, err := fr.NextRow() @@ -333,38 +336,70 @@ func validateParquetFile(t *testing.T, file string, test parquetTest) error { t.Logf("\n Record %v:", count) for i := 0; i < len(cols); i++ { if test.vals[count][i] == nil { - // If we expect a null value, the row created by the parquet reader will not have the - // associated column. + // If we expect a null value, the row created by the parquet reader + // will not have the associated column. _, ok := row[cols[i].SchemaElement.Name] require.Equal(t, ok, false) continue } - var decodedV interface{} v := row[cols[i].SchemaElement.Name] - switch vv := v.(type) { - case []byte: - // the parquet exporter encodes native go strings as []byte, so an extra - // step is required here - // TODO (MB): as we add more type support, this - // test will be insufficient: many go native types are encoded as - // []byte, so in the future, each column will have to call it's own - // custom decoder ( this is how IMPORT Parquet will work) - decodedV = string(vv) - case int64, float64, bool: - decodedV = vv - default: - t.Fatalf("unexepected type: %T", vv) - } + decodedV := decodeEl(t, v) t.Logf("\t %v", decodedV) require.Equal(t, test.vals[count][i], decodedV) } count++ } } - return nil } +func decodeEl(t *testing.T, v interface{}) interface{} { + var decodedV interface{} + switch vv := v.(type) { + case []byte: + // The parquet exporter encodes native go strings as []byte, so an extra + // step is required here. + + // TODO (MB): as we add more type support, this switch statement will be + // insufficient: many go native types are encoded as []byte, so in the + // future, each column will have to call it's own custom decoder ( this is + // how IMPORT Parquet will work). + decodedV = string(vv) + case int64, float64, bool: + decodedV = vv + case map[string]interface{}: + var arrDecodedV []interface{} + b := vv["list"].([]map[string]interface{}) + + // Array values are stored in an array of maps: []map[string]interface{}, + // where the ith map contains a single key value pair. The key is always "element" + // and the value is the ith value in the array. + + // If the array of maps only contains an empty map, the array is empty. This + // occurs IFF "element" is not in the map. + + // NB: there's a bug in the fraugster-parquet vendor library around reading + // an ARRAY[NULL]. See the "arrays" test case for more info. Ideally, once + // the bug gets fixed, ARRAY[NULL] will get read as the kvp {"element":interface{}} while + // ARRAY[] will continue to get read as an empty map. + if _, nonEmpty := b[0]["element"]; !nonEmpty { + arrDecodedV = []interface{}{} + if len(b) > 1 { + t.Fatalf("array is empty, it shouldn't have a length greater than 1") + } + } else { + // For non-empty arrays + for _, elMap := range b { + arrDecodedV = append(arrDecodedV, decodeEl(t, elMap["element"])) + } + } + decodedV = arrDecodedV + default: + t.Fatalf("unexepected type: %T", vv) + } + return decodedV +} + // TestBasicParquetTypes exports a relation with bool, int, float and string // values to a parquet file, and then asserts that the parquet exporter properly // encoded the values of the crdb relation. @@ -416,10 +451,36 @@ false),(3, 'Carl', 1, 34.214,true),(4, 'Alex', 3, 14.3, NULL), (5, 'Bobby', 2, 3 parquet.FieldRepetitionType_REQUIRED, parquet.FieldRepetitionType_OPTIONAL}, }, + { + // TODO (mb): switch one of the values in the array to NULL once the + // vendor's parquet file reader bug resolves. + // https://github.com/fraugster/parquet-go/issues/60 + // + // I already verified that the vendor's parquet writer can write arrays + // with null values just fine, so EXPORT PARQUET is bug free; however this + // roundtrip test would fail. + name: "arrays", + prep: []string{"CREATE TABLE atable (i INT PRIMARY KEY, x INT[])", + "INSERT INTO atable VALUES (1, ARRAY[1,2]), (2, ARRAY[2]), (3,ARRAY[1,13,5]),(4, NULL),(5, ARRAY[])"}, + stmt: `EXPORT INTO PARQUET 'nodelocal://0/arrays' FROM SELECT * FROM atable`, + colNames: []string{"i", "x"}, + vals: [][]interface{}{ + {int64(1), []interface{}{int64(1), int64(2)}}, + {int64(2), []interface{}{int64(2)}}, + {int64(3), []interface{}{int64(1), int64(13), int64(5)}}, + {int64(4), nil}, + {int64(5), []interface{}{}}, + }, + }, } for _, test := range tests { t.Logf("Test %s", test.name) + if test.prep != nil { + for _, cmd := range test.prep { + sqlDB.Exec(t, cmd) + } + } sqlDB.Exec(t, test.stmt) paths, err := filepath.Glob(filepath.Join(dir, test.name, parquetExportFilePattern)) diff --git a/pkg/ccl/importccl/exportparquet.go b/pkg/ccl/importccl/exportparquet.go index 5b7b7595b009..c712a8be8896 100644 --- a/pkg/ccl/importccl/exportparquet.go +++ b/pkg/ccl/importccl/exportparquet.go @@ -150,8 +150,8 @@ func newParquetColumn(typ *types.T, name string, nullable bool) (parquetColumn, cannot have null values). A column is set to required if the user specified the CRDB column as NOT NULL. - optional: 0 or 1 occurrence (i.e. same as above, but can have values) - - repeated: 0 or more occurrences (the column value can be an array of values, - so the value within the array will have its own repetition type) + - repeated: 0 or more occurrences (the column value will be an array. A + value within the array will have its own repetition type) See this blog post for more on parquet type specification: https://blog.twitter.com/engineering/en_us/a/2013/dremel-made-simple-with-parquet @@ -208,10 +208,60 @@ func newParquetColumn(typ *types.T, name string, nullable bool) (parquetColumn, } case types.ArrayFamily: - // TODO(mb): Figure out how to modify the encodeFn for arrays. - // One possibility: recurse on type within array, define encodeFn elsewhere - // col.definition.Children[0] = newParquetColumn(typ.ArrayContents(), name) - return col, errors.Errorf("parquet export does not support array type yet") + + // Define a list such that the parquet schema in json is: + /* + required group colName (LIST){ // parent + repeated group list { // child + required colType element; //grandChild + } + } + */ + // MB figured this out by running toy examples of the fraugster-parquet + // vendor repository for added context, checkout this issue + // https://github.com/fraugster/parquet-go/issues/18 + + // First, define the grandChild definition, the schema for the array value. + grandChild, err := newParquetColumn(typ.ArrayContents(), "element", true) + if err != nil { + return col, err + } + + // Next define the child definition, required by fraugster-parquet vendor library. Again, + // there's little documentation on this. MB figured this out using a debugger. + child := &parquetschema.ColumnDefinition{} + child.SchemaElement = parquet.NewSchemaElement() + child.SchemaElement.RepetitionType = parquet.FieldRepetitionTypePtr(parquet. + FieldRepetitionType_REPEATED) + child.SchemaElement.Name = "list" + child.Children = []*parquetschema.ColumnDefinition{grandChild.definition} + ngc := int32(len(child.Children)) + child.SchemaElement.NumChildren = &ngc + + // Finally, define the parent definition. + col.definition.Children = []*parquetschema.ColumnDefinition{child} + nc := int32(len(col.definition.Children)) + child.SchemaElement.NumChildren = &nc + col.definition.SchemaElement.ConvertedType = parquet.ConvertedTypePtr(parquet.ConvertedType_LIST) + col.encodeFn = func(d tree.Datum) (interface{}, error) { + + datumArr := d.(*tree.DArray) + els := make([]map[string]interface{}, datumArr.Len()) + for i, elt := range datumArr.Array { + var el interface{} + if elt.ResolvedType().Family() == types.UnknownFamily { + // skip encoding the datum + } else { + el, err = grandChild.encodeFn(elt) + if err != nil { + return col, err + } + } + els[i] = map[string]interface{}{"element": el} + } + encEl := map[string]interface{}{"list": els} + return encEl, nil + } default: return col, errors.Errorf("parquet export does not support the %v type yet", typ.Family()) @@ -307,7 +357,7 @@ func (sp *parquetWriterProcessor) Run(ctx context.Context) { typs := sp.input.OutputTypes() sp.input.Start(ctx) input := execinfra.MakeNoMetadataRowSource(sp.input, sp.output) - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} exporter, err := newParquetExporter(sp.spec, typs) if err != nil { diff --git a/pkg/ccl/importccl/import_planning.go b/pkg/ccl/importccl/import_planning.go index 525b371bc7c3..cfd7309e4a94 100644 --- a/pkg/ccl/importccl/import_planning.go +++ b/pkg/ccl/importccl/import_planning.go @@ -33,6 +33,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" @@ -869,7 +870,7 @@ func importPlanHook( // Store the primary region of the database being imported into. This is // used during job execution to evaluate certain default expressions and // computed columns such as `gateway_region`. - var databasePrimaryRegion descpb.RegionName + var databasePrimaryRegion catpb.RegionName if db.IsMultiRegion() { if err := sql.DescsTxn(ctx, p.ExecCfg(), func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { diff --git a/pkg/ccl/importccl/import_processor.go b/pkg/ccl/importccl/import_processor.go index b6d3b34f3eea..351ad83627de 100644 --- a/pkg/ccl/importccl/import_processor.go +++ b/pkg/ccl/importccl/import_processor.go @@ -15,6 +15,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/ccl/storageccl" + "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" @@ -95,6 +96,13 @@ var importBufferIncrementSize = func() *settings.ByteSizeSetting { return s }() +var importAtNow = settings.RegisterBoolSetting( + settings.TenantWritable, + "bulkio.import_at_current_time.enabled", + "write imported data at the current timestamp, when each batch is flushed", + false, +) + // ImportBufferConfigSizes determines the minimum, maximum and step size for the // BulkAdder buffer used in import. func importBufferConfigSizes(st *cluster.Settings, isPKAdder bool) (int64, func() int64, int64) { @@ -319,6 +327,15 @@ func ingestKvs( defer span.Finish() writeTS := hlc.Timestamp{WallTime: spec.WalltimeNanos} + writeAtRequestTime := false + if importAtNow.Get(&flowCtx.Cfg.Settings.SV) { + if !flowCtx.Cfg.Settings.Version.IsActive(ctx, clusterversion.MVCCAddSSTable) { + return nil, errors.Newf( + "cannot use %s until version %s", importAtNow.Key(), clusterversion.MVCCAddSSTable.String(), + ) + } + writeAtRequestTime = true + } flushSize := func() int64 { return storageccl.MaxIngestBatchSize(flowCtx.Cfg.Settings) } @@ -341,6 +358,7 @@ func ingestKvs( MaxBufferSize: maxBufferSize, StepBufferSize: stepSize, SSTSize: flushSize, + WriteAtRequestTime: writeAtRequestTime, }) if err != nil { return nil, err @@ -357,6 +375,7 @@ func ingestKvs( MaxBufferSize: maxBufferSize, StepBufferSize: stepSize, SSTSize: flushSize, + WriteAtRequestTime: writeAtRequestTime, }) if err != nil { return nil, err diff --git a/pkg/ccl/importccl/import_stmt_test.go b/pkg/ccl/importccl/import_stmt_test.go index fb432171cde8..6e95bfeca680 100644 --- a/pkg/ccl/importccl/import_stmt_test.go +++ b/pkg/ccl/importccl/import_stmt_test.go @@ -5601,6 +5601,26 @@ func TestImportPgDump(t *testing.T) { "PGDUMP file format is currently unsupported by IMPORT INTO", fmt.Sprintf(`IMPORT INTO t (a, b) PGDUMP DATA (%q)`, srv.URL)) }) + t.Run("more-target-cols-than-data", func(t *testing.T) { + data := ` +CREATE TABLE public.t (c STRING, a STRING, b STRING, d STRING); +COPY public.t (a, b, c) FROM stdin; +a b c +\. +INSERT INTO public.t (a, b, c) VALUES ('a', 'b', 'c'); + ` + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" { + _, _ = w.Write([]byte(data)) + } + })) + defer srv.Close() + defer sqlDB.Exec(t, "DROP TABLE t") + sqlDB.Exec(t, ` +IMPORT TABLE t FROM PGDUMP ($1) WITH ignore_unsupported_statements`, srv.URL) + sqlDB.CheckQueryResults(t, `SELECT * from t`, + [][]string{{"c", "a", "b", "NULL"}, {"c", "a", "b", "NULL"}}) + }) } func TestImportPgDumpIgnoredStmts(t *testing.T) { diff --git a/pkg/ccl/importccl/import_table_creation.go b/pkg/ccl/importccl/import_table_creation.go index 3ddf7a5dc64f..ab3b018b601f 100644 --- a/pkg/ccl/importccl/import_table_creation.go +++ b/pkg/ccl/importccl/import_table_creation.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" @@ -224,17 +225,17 @@ var ( // Implements the tree.RegionOperator interface. type importRegionOperator struct { - primaryRegion descpb.RegionName + primaryRegion catpb.RegionName } -func makeImportRegionOperator(primaryRegion descpb.RegionName) *importRegionOperator { +func makeImportRegionOperator(primaryRegion catpb.RegionName) *importRegionOperator { return &importRegionOperator{primaryRegion: primaryRegion} } // importDatabaseRegionConfig is a stripped down version of // multiregion.RegionConfig that is used by import. type importDatabaseRegionConfig struct { - primaryRegion descpb.RegionName + primaryRegion catpb.RegionName } // IsValidRegionNameString implements the tree.DatabaseRegionConfig interface. @@ -333,7 +334,7 @@ func (so *importSequenceOperators) HasPrivilege( ctx context.Context, specifier tree.HasPrivilegeSpecifier, user security.SQLUsername, - kind privilege.Kind, + priv privilege.Privilege, ) (bool, error) { return false, errors.WithStack(errSequenceOperators) } diff --git a/pkg/ccl/importccl/read_import_base.go b/pkg/ccl/importccl/read_import_base.go index 30fbed379b1a..6bd59369b25b 100644 --- a/pkg/ccl/importccl/read_import_base.go +++ b/pkg/ccl/importccl/read_import_base.go @@ -364,15 +364,13 @@ func isMultiTableFormat(format roachpb.IOFileFormat_FileFormat) bool { return false } -func makeRowErr(_ string, row int64, code pgcode.Code, format string, args ...interface{}) error { +func makeRowErr(row int64, code pgcode.Code, format string, args ...interface{}) error { err := pgerror.NewWithDepthf(1, code, format, args...) err = errors.WrapWithDepthf(1, err, "row %d", row) return err } -func wrapRowErr( - err error, _ string, row int64, code pgcode.Code, format string, args ...interface{}, -) error { +func wrapRowErr(err error, row int64, code pgcode.Code, format string, args ...interface{}) error { if format != "" || len(args) > 0 { err = errors.WrapWithDepthf(1, err, format, args...) } @@ -396,13 +394,18 @@ const ( ) func (e *importRowError) Error() string { - return fmt.Sprintf("error parsing row %d: %v (row: %q)", e.rowNum, e.err, e.row) + // The job system will truncate this error before saving it, + // but we will additionally truncate it here since it is + // separately written to the log and could easily result in + // very large log files. + rowForLog := e.row + if len(rowForLog) > importRowErrMaxRuneCount { + rowForLog = util.TruncateString(rowForLog, importRowErrMaxRuneCount) + importRowErrTruncatedMarker + } + return fmt.Sprintf("error parsing row %d: %v (row: %s)", e.rowNum, e.err, rowForLog) } func newImportRowError(err error, row string, num int64) error { - if len(row) > importRowErrMaxRuneCount { - row = util.TruncateString(row, importRowErrMaxRuneCount) + importRowErrTruncatedMarker - } return &importRowError{ err: err, row: row, diff --git a/pkg/ccl/importccl/read_import_pgdump.go b/pkg/ccl/importccl/read_import_pgdump.go index 9be3bb5f25ff..6d0ab6b99a91 100644 --- a/pkg/ccl/importccl/read_import_pgdump.go +++ b/pkg/ccl/importccl/read_import_pgdump.go @@ -1184,12 +1184,20 @@ func (m *pgDumpReader) readFile( conv.TargetColOrds.Add(idx) targetColMapIdx[j] = idx } + // For any missing columns, fill those to NULL. + // These will get filled in with the correct default / computed + // expression if there are any for these columns. + for idx := range conv.VisibleCols { + if !conv.TargetColOrds.Contains(idx) { + conv.Datums[idx] = tree.DNull + } + } } for { row, err := ps.Next() // We expect an explicit copyDone here. io.EOF is unexpected. if err == io.EOF { - return makeRowErr("", count, pgcode.ProtocolViolation, + return makeRowErr(count, pgcode.ProtocolViolation, "unexpected EOF") } if row == errCopyDone { @@ -1198,7 +1206,7 @@ func (m *pgDumpReader) readFile( count++ tableNameToRowsProcessed[name.String()]++ if err != nil { - return wrapRowErr(err, "", count, pgcode.Uncategorized, "") + return wrapRowErr(err, count, pgcode.Uncategorized, "") } if !importing { continue @@ -1209,7 +1217,7 @@ func (m *pgDumpReader) readFile( switch row := row.(type) { case copyData: if expected, got := conv.TargetColOrds.Len(), len(row); expected != got { - return makeRowErr("", count, pgcode.Syntax, + return makeRowErr(count, pgcode.Syntax, "expected %d values, got %d", expected, got) } if rowLimit != 0 && tableNameToRowsProcessed[name.String()] > rowLimit { @@ -1226,7 +1234,7 @@ func (m *pgDumpReader) readFile( conv.Datums[idx], _, err = tree.ParseAndRequireString(conv.VisibleColTypes[idx], *s, conv.EvalCtx) if err != nil { col := conv.VisibleCols[idx] - return wrapRowErr(err, "", count, pgcode.Syntax, + return wrapRowErr(err, count, pgcode.Syntax, "parse %q as %s", col.GetName(), col.GetType().SQLString()) } } @@ -1235,7 +1243,7 @@ func (m *pgDumpReader) readFile( return err } default: - return makeRowErr("", count, pgcode.Uncategorized, + return makeRowErr(count, pgcode.Uncategorized, "unexpected: %v", row) } } @@ -1362,7 +1370,7 @@ func (m *pgDumpReader) readFile( } key, val, err := sql.MakeSequenceKeyVal(m.evalCtx.Codec, seq, val, isCalled) if err != nil { - return wrapRowErr(err, "", count, pgcode.Uncategorized, "") + return wrapRowErr(err, count, pgcode.Uncategorized, "") } kv := roachpb.KeyValue{Key: key} kv.Value.SetInt(val) diff --git a/pkg/ccl/importccl/read_import_workload.go b/pkg/ccl/importccl/read_import_workload.go index bb86bd82793e..35089ab507bf 100644 --- a/pkg/ccl/importccl/read_import_workload.go +++ b/pkg/ccl/importccl/read_import_workload.go @@ -17,7 +17,7 @@ import ( "sync/atomic" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -58,7 +58,7 @@ func (w *workloadReader) start(ctx ctxgroup.Group) { // makeDatumFromColOffset tries to fast-path a few workload-generated types into // directly datums, to dodge making a string and then the parsing it. func makeDatumFromColOffset( - alloc *rowenc.DatumAlloc, hint *types.T, evalCtx *tree.EvalContext, col coldata.Vec, rowIdx int, + alloc *tree.DatumAlloc, hint *types.T, evalCtx *tree.EvalContext, col coldata.Vec, rowIdx int, ) (tree.Datum, error) { if col.Nulls().NullAt(rowIdx) { return tree.DNull, nil @@ -232,7 +232,7 @@ func (w *WorkloadKVConverter) Worker( conv.FractionFn = func() float32 { return float32(atomic.LoadInt64(&w.finishedBatchesAtomic)) / w.totalBatches } - var alloc rowenc.DatumAlloc + var alloc tree.DatumAlloc var a bufalloc.ByteAllocator cb := coldata.NewMemBatchWithCapacity(nil /* typs */, 0 /* capacity */, coldata.StandardColumnFactory) diff --git a/pkg/ccl/kvccl/kvfollowerreadsccl/testdata/boundedstaleness/single_row b/pkg/ccl/kvccl/kvfollowerreadsccl/testdata/boundedstaleness/single_row index 80d241065b09..e0a5752f5582 100644 --- a/pkg/ccl/kvccl/kvfollowerreadsccl/testdata/boundedstaleness/single_row +++ b/pkg/ccl/kvccl/kvfollowerreadsccl/testdata/boundedstaleness/single_row @@ -15,7 +15,7 @@ INSERT INTO t VALUES (1); # read, we should always be looking at the leaseholder in the nearest_only=False # case. We always do bounded staleness reads from node_idx 2, as node_idx 0 in a # TestCluster is always the leaseholder. -query idx=2 +query idx=2 wait-until-match SELECT * FROM t AS OF SYSTEM TIME with_max_staleness('1μs') WHERE pk = 1 ---- 1 diff --git a/pkg/ccl/kvccl/kvtenantccl/connector_test.go b/pkg/ccl/kvccl/kvtenantccl/connector_test.go index a0cc7af7afcd..12a2c7f617b6 100644 --- a/pkg/ccl/kvccl/kvtenantccl/connector_test.go +++ b/pkg/ccl/kvccl/kvtenantccl/connector_test.go @@ -100,6 +100,12 @@ func (m *mockServer) UpdateSpanConfigs( panic("unimplemented") } +func (m *mockServer) TenantSettings( + *roachpb.TenantSettingsRequest, roachpb.Internal_TenantSettingsServer, +) error { + panic("unimplemented") +} + func gossipEventForClusterID(clusterID uuid.UUID) *roachpb.GossipSubscriptionEvent { return &roachpb.GossipSubscriptionEvent{ Key: gossip.KeyClusterID, diff --git a/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality b/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality index 0620b94e8597..70e0ba932ca7 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality +++ b/pkg/ccl/logictestccl/testdata/logic_test/alter_table_locality @@ -10,7 +10,7 @@ CREATE TABLE no_table_locality ( FAMILY (pk, i) ) -statement error cannot alter a table's LOCALITY if its database is not multi-region enabled +statement error cannot alter a table's LOCALITY if its database is not multi-region enabled\nHINT: database must first be multi-region enabled using ALTER DATABASE ... SET PRIMARY REGION ALTER TABLE no_table_locality SET LOCALITY REGIONAL BY TABLE statement ok diff --git a/pkg/ccl/logictestccl/testdata/logic_test/as_of b/pkg/ccl/logictestccl/testdata/logic_test/as_of index 4e4aeff9ba35..9ef2ac43fcfc 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/as_of +++ b/pkg/ccl/logictestccl/testdata/logic_test/as_of @@ -174,21 +174,36 @@ SELECT * FROM t AS OF SYSTEM TIME with_max_staleness('1ms') WHERE j = 2 query T EXPLAIN (OPT, MEMO) SELECT * FROM t AS OF SYSTEM TIME with_max_staleness('1ms') WHERE j = 2 AND i = 1 ---- -memo (optimized, ~8KB, required=[presentation: info:6]) - ├── G1: (explain G2 [presentation: i:1,j:2,k:3]) - │ └── [presentation: info:6] - │ ├── best: (explain G2="[presentation: i:1,j:2,k:3]" [presentation: i:1,j:2,k:3]) +memo (optimized, ~8KB, required=[presentation: info:6] [distribution: test]) + ├── G1: (explain G2 [presentation: i:1,j:2,k:3] [distribution: test]) + │ ├── [presentation: info:6] [distribution: test] + │ │ ├── best: (explain G2="[presentation: i:1,j:2,k:3] [distribution: test]" [presentation: i:1,j:2,k:3] [distribution: test]) + │ │ └── cost: 5.18 + │ └── [] + │ ├── best: (explain G2="[presentation: i:1,j:2,k:3]" [presentation: i:1,j:2,k:3] [distribution: test]) │ └── cost: 5.18 ├── G2: (select G3 G4) (select G5 G6) - │ └── [presentation: i:1,j:2,k:3] + │ ├── [presentation: i:1,j:2,k:3] + │ │ ├── best: (select G5 G6) + │ │ └── cost: 5.16 + │ ├── [presentation: i:1,j:2,k:3] [distribution: test] + │ │ ├── best: (select G5="[distribution: test]" G6) + │ │ └── cost: 5.16 + │ └── [] │ ├── best: (select G5 G6) │ └── cost: 5.16 ├── G3: (scan t,cols=(1-3)) (scan t@t_k_key,cols=(1-3)) + │ ├── [distribution: test] + │ │ ├── best: (scan t,cols=(1-3)) + │ │ └── cost: 1145.22 │ └── [] │ ├── best: (scan t,cols=(1-3)) │ └── cost: 1145.22 ├── G4: (filters G7 G8) ├── G5: (scan t,cols=(1-3),constrained) + │ ├── [distribution: test] + │ │ ├── best: (scan t,cols=(1-3),constrained) + │ │ └── cost: 5.13 │ └── [] │ ├── best: (scan t,cols=(1-3),constrained) │ └── cost: 5.13 diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region b/pkg/ccl/logictestccl/testdata/logic_test/multi_region index 88c93fb33d2e..5d8a86378b39 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region @@ -16,10 +16,10 @@ ap-southeast-2 {ap-az1,ap-az2,ap-az3} ca-central-1 {ca-az1,ca-az2,ca-az3} us-east-1 {us-az1,us-az2,us-az3} -statement error cannot set LOCALITY on a table in a database that is not multi-region enabled +statement error cannot set LOCALITY on a table in a database that is not multi-region enabled\nHINT: database must first be multi-region enabled using ALTER DATABASE ... SET PRIMARY REGION CREATE TABLE regional_by_table_table (pk int) LOCALITY REGIONAL BY TABLE -statement error cannot set LOCALITY on a table in a database that is not multi-region enabled +statement error cannot set LOCALITY on a table in a database that is not multi-region enabled\nHINT: database must first be multi-region enabled using ALTER DATABASE ... SET PRIMARY REGION CREATE TABLE global_table (pk int) LOCALITY GLOBAL statement ok diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_zone_configs b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_zone_configs index 5eb0f90dca3e..f91c132f0a58 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_zone_configs +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_zone_configs @@ -1,7 +1,7 @@ # LogicTest: multiregion-9node-3region-3azs multiregion-9node-3region-3azs-tenant statement ok -SET CLUSTER SETTING sql.zone_configs.experimental_allow_for_secondary_tenant.enabled = true +SET CLUSTER SETTING sql.zone_configs.allow_for_secondary_tenant.enabled = true query TTTT SHOW REGIONS diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row index 66b800342d18..cf08d3a7d59c 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row @@ -5,7 +5,7 @@ statement ok statement ok CREATE DATABASE multi_region_test_db PRIMARY REGION "ca-central-1" REGIONS "ap-southeast-2", "us-east-1" SURVIVE REGION FAILURE -statement error cannot set LOCALITY on a table in a database that is not multi-region enabled +statement error cannot set LOCALITY on a table in a database that is not multi-region enabled\nHINT: database must first be multi-region enabled using ALTER DATABASE ... SET PRIMARY REGION CREATE TABLE regional_by_row_table (pk int) LOCALITY REGIONAL BY ROW statement ok @@ -293,7 +293,7 @@ public regional_by_row_table_explicit_crdb_region_column table root 0 # Add a gc.ttlseconds to a partition and ensure it displays. statement ok -SET CLUSTER SETTING sql.zone_configs.experimental_allow_for_secondary_tenant.enabled = true; +SET CLUSTER SETTING sql.zone_configs.allow_for_secondary_tenant.enabled = true; ALTER PARTITION "us-east-1" OF INDEX public.regional_by_row_table@regional_by_row_table_a_idx CONFIGURE ZONE USING gc.ttlseconds = 10 diff --git a/pkg/ccl/logictestccl/testdata/logic_test/zone_config_secondary_tenants b/pkg/ccl/logictestccl/testdata/logic_test/zone_config_secondary_tenants index 462a514a7ab5..6e27af2b2b23 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/zone_config_secondary_tenants +++ b/pkg/ccl/logictestccl/testdata/logic_test/zone_config_secondary_tenants @@ -4,11 +4,14 @@ statement ok CREATE TABLE t(); +statement ok +SET CLUSTER SETTING sql.zone_configs.allow_for_secondary_tenant.enabled = false + statement error pq: unimplemented: operation is unsupported in multi-tenancy mode ALTER TABLE t CONFIGURE ZONE USING num_replicas = 5; statement ok -SET CLUSTER SETTING sql.zone_configs.experimental_allow_for_secondary_tenant.enabled = true +SET CLUSTER SETTING sql.zone_configs.allow_for_secondary_tenant.enabled = true statement ok ALTER TABLE t CONFIGURE ZONE USING num_replicas = 5; diff --git a/pkg/ccl/migrationccl/migrationsccl/seed_tenant_span_configs_external_test.go b/pkg/ccl/migrationccl/migrationsccl/seed_tenant_span_configs_external_test.go index 0b6933295499..1e38a6e3ceb7 100644 --- a/pkg/ccl/migrationccl/migrationsccl/seed_tenant_span_configs_external_test.go +++ b/pkg/ccl/migrationccl/migrationsccl/seed_tenant_span_configs_external_test.go @@ -52,7 +52,17 @@ func TestPreSeedSpanConfigsWrittenWhenActive(t *testing.T) { ts := tc.Server(0) tenantID := roachpb.MakeTenantID(10) - _, err := ts.StartTenant(ctx, base.TestTenantArgs{TenantID: tenantID}) + _, err := ts.StartTenant(ctx, base.TestTenantArgs{ + TenantID: tenantID, + TestingKnobs: base.TestingKnobs{ + SpanConfig: &spanconfig.TestingKnobs{ + // Disable the tenant's span config reconciliation process, + // it'll muck with the tenant's span configs that we check + // below. + ManagerDisableJobCreation: true, + }, + }, + }) require.NoError(t, err) scKVAccessor := ts.SpanConfigKVAccessor().(spanconfig.KVAccessor) @@ -101,7 +111,17 @@ func TestSeedTenantSpanConfigs(t *testing.T) { tenantSpan := roachpb.Span{Key: tenantPrefix, EndKey: tenantPrefix.PrefixEnd()} tenantSeedSpan := roachpb.Span{Key: tenantPrefix, EndKey: tenantPrefix.Next()} { - _, err := ts.StartTenant(ctx, base.TestTenantArgs{TenantID: tenantID}) + _, err := ts.StartTenant(ctx, base.TestTenantArgs{ + TenantID: tenantID, + TestingKnobs: base.TestingKnobs{ + SpanConfig: &spanconfig.TestingKnobs{ + // Disable the tenant's span config reconciliation process, + // it'll muck with the tenant's span configs that we check + // below. + ManagerDisableJobCreation: true, + }, + }, + }) require.NoError(t, err) } @@ -159,7 +179,17 @@ func TestSeedTenantSpanConfigsWithExistingEntry(t *testing.T) { tenantSpan := roachpb.Span{Key: tenantPrefix, EndKey: tenantPrefix.PrefixEnd()} tenantSeedSpan := roachpb.Span{Key: tenantPrefix, EndKey: tenantPrefix.Next()} { - _, err := ts.StartTenant(ctx, base.TestTenantArgs{TenantID: tenantID}) + _, err := ts.StartTenant(ctx, base.TestTenantArgs{ + TenantID: tenantID, + TestingKnobs: base.TestingKnobs{ + SpanConfig: &spanconfig.TestingKnobs{ + // Disable the tenant's span config reconciliation process, + // it'll muck with the tenant's span configs that we check + // below. + ManagerDisableJobCreation: true, + }, + }, + }) require.NoError(t, err) } diff --git a/pkg/ccl/multiregionccl/BUILD.bazel b/pkg/ccl/multiregionccl/BUILD.bazel index 9e0ddc4fde88..34f317ba8739 100644 --- a/pkg/ccl/multiregionccl/BUILD.bazel +++ b/pkg/ccl/multiregionccl/BUILD.bazel @@ -9,7 +9,7 @@ go_library( "//pkg/ccl/utilccl", "//pkg/sql", "//pkg/sql/catalog/catalogkv", - "//pkg/sql/catalog/descpb", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/multiregion", "//pkg/sql/catalog/typedesc", "//pkg/sql/pgwire/pgcode", diff --git a/pkg/ccl/multiregionccl/datadriven_test.go b/pkg/ccl/multiregionccl/datadriven_test.go index 5d20dddfa94d..a5a18df43f97 100644 --- a/pkg/ccl/multiregionccl/datadriven_test.go +++ b/pkg/ccl/multiregionccl/datadriven_test.go @@ -707,7 +707,7 @@ func getRangeKeyForInput( } _, keyPrefix, err := rowenc.DecodePartitionTuple( - &rowenc.DatumAlloc{}, + &tree.DatumAlloc{}, keys.SystemSQLCodec, tableDesc, primaryInd, diff --git a/pkg/ccl/multiregionccl/multiregion.go b/pkg/ccl/multiregionccl/multiregion.go index b448d907972f..60784930abee 100644 --- a/pkg/ccl/multiregionccl/multiregion.go +++ b/pkg/ccl/multiregionccl/multiregion.go @@ -15,7 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/multiregion" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -33,7 +33,7 @@ func initializeMultiRegionMetadata( execCfg *sql.ExecutorConfig, liveRegions sql.LiveClusterRegions, goal tree.SurvivalGoal, - primaryRegion descpb.RegionName, + primaryRegion catpb.RegionName, regions []tree.Name, dataPlacement tree.DataPlacement, ) (*multiregion.RegionConfig, error) { @@ -50,22 +50,22 @@ func initializeMultiRegionMetadata( return nil, err } - if primaryRegion != descpb.RegionName(tree.PrimaryRegionNotSpecifiedName) { + if primaryRegion != catpb.RegionName(tree.PrimaryRegionNotSpecifiedName) { if err := sql.CheckClusterRegionIsLive(liveRegions, primaryRegion); err != nil { return nil, err } } - regionNames := make(descpb.RegionNames, 0, len(regions)+1) - seenRegions := make(map[descpb.RegionName]struct{}, len(regions)+1) + regionNames := make(catpb.RegionNames, 0, len(regions)+1) + seenRegions := make(map[catpb.RegionName]struct{}, len(regions)+1) if len(regions) > 0 { - if primaryRegion == descpb.RegionName(tree.PrimaryRegionNotSpecifiedName) { + if primaryRegion == catpb.RegionName(tree.PrimaryRegionNotSpecifiedName) { return nil, pgerror.Newf( pgcode.InvalidDatabaseDefinition, "PRIMARY REGION must be specified if REGIONS are specified", ) } for _, r := range regions { - region := descpb.RegionName(r) + region := catpb.RegionName(r) if err := sql.CheckClusterRegionIsLive(liveRegions, region); err != nil { return nil, err } diff --git a/pkg/ccl/multiregionccl/roundtrips_test.go b/pkg/ccl/multiregionccl/roundtrips_test.go index 19d0b0522190..f04e22eac0eb 100644 --- a/pkg/ccl/multiregionccl/roundtrips_test.go +++ b/pkg/ccl/multiregionccl/roundtrips_test.go @@ -20,8 +20,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/tracing" + "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) @@ -29,6 +32,7 @@ import ( // GLOBAL tables don't incur a network hop. func TestEnsureLocalReadsOnGlobalTables(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) // ensureOnlyLocalReads looks at a trace to ensure that reads were served // locally. It returns true if the read was served as a follower read. @@ -107,18 +111,26 @@ func TestEnsureLocalReadsOnGlobalTables(t *testing.T) { tc.AddVotersOrFatal(t, tablePrefix.AsRawKey(), tc.Target(1), tc.Target(2)) for i := 0; i < numServers; i++ { - // Run a query to populate its cache. conn := tc.ServerConn(i) - _, err = conn.Exec("SELECT * from t.test_table WHERE k=1") - require.NoError(t, err) + isLeaseHolder := false + testutils.SucceedsSoon(t, func() error { + // Run a query to populate its cache. + _, err = conn.Exec("SELECT * from t.test_table WHERE k=1") + require.NoError(t, err) + + // Check that the cache was indeed populated. + cache := tc.Server(i).DistSenderI().(*kvcoord.DistSender).RangeDescriptorCache() + entry := cache.GetCached(context.Background(), tablePrefix, false /* inverted */) + require.NotNil(t, entry.Lease().Empty()) + require.NotNil(t, entry) + + if expected, got := roachpb.LEAD_FOR_GLOBAL_READS, entry.ClosedTimestampPolicy(); got != expected { + return errors.Newf("expected closedts policy %s, got %s", expected, got) + } - // Check that the cache was indeed populated. - cache := tc.Server(i).DistSenderI().(*kvcoord.DistSender).RangeDescriptorCache() - entry := cache.GetCached(context.Background(), tablePrefix, false /* inverted */) - require.NotNil(t, entry.Lease().Empty()) - require.NotNil(t, entry) - require.Equal(t, roachpb.LEAD_FOR_GLOBAL_READS, entry.ClosedTimestampPolicy()) - isLeaseHolder := entry.Lease().Replica.NodeID == tc.Server(i).NodeID() + isLeaseHolder = entry.Lease().Replica.NodeID == tc.Server(i).NodeID() + return nil + }) // Run the query to ensure local read. _, err = conn.Exec(presentTimeRead) diff --git a/pkg/ccl/partitionccl/BUILD.bazel b/pkg/ccl/partitionccl/BUILD.bazel index b6a73c00d24d..609a2e9211ad 100644 --- a/pkg/ccl/partitionccl/BUILD.bazel +++ b/pkg/ccl/partitionccl/BUILD.bazel @@ -17,6 +17,7 @@ go_library( "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/valueside", "//pkg/sql/schemachanger/scdeps", "//pkg/sql/sem/tree", "//pkg/sql/types", @@ -63,7 +64,6 @@ go_test( "//pkg/sql/gcjob", "//pkg/sql/parser", "//pkg/sql/randgen", - "//pkg/sql/rowenc", "//pkg/sql/sem/tree", "//pkg/sql/tests", "//pkg/sql/types", diff --git a/pkg/ccl/partitionccl/partition.go b/pkg/ccl/partitionccl/partition.go index 1bd162c7894d..197c9d3fab5f 100644 --- a/pkg/ccl/partitionccl/partition.go +++ b/pkg/ccl/partitionccl/partition.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scdeps" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -114,9 +115,7 @@ func valueEncodePartitionTuple( if err := colinfo.CheckDatumTypeFitsColumnType(cols[i], datum.ResolvedType()); err != nil { return nil, err } - value, err = rowenc.EncodeTableValue( - value, descpb.ColumnID(encoding.NoColumnID), datum, scratch, - ) + value, err = valueside.Encode(value, valueside.NoColumnID, datum, scratch) if err != nil { return nil, err } @@ -411,7 +410,7 @@ func selectPartitionExprs( exprsByPartName[string(partName)] = nil } - a := &rowenc.DatumAlloc{} + a := &tree.DatumAlloc{} var prefixDatums []tree.Datum if err := catalog.ForEachIndex(tableDesc, catalog.IndexOpts{ AddMutations: true, @@ -467,7 +466,7 @@ func selectPartitionExprs( // register itself in the map with a placeholder entry (so we can still verify // that the requested partitions are all valid). func selectPartitionExprsByName( - a *rowenc.DatumAlloc, + a *tree.DatumAlloc, evalCtx *tree.EvalContext, tableDesc catalog.TableDescriptor, idx catalog.Index, diff --git a/pkg/ccl/partitionccl/partition_test.go b/pkg/ccl/partitionccl/partition_test.go index e608340a19d7..9d27540b3878 100644 --- a/pkg/ccl/partitionccl/partition_test.go +++ b/pkg/ccl/partitionccl/partition_test.go @@ -35,7 +35,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/randgen" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -1371,7 +1370,7 @@ func TestRepartitioning(t *testing.T) { repartition.WriteString(`PARTITION BY NOTHING`) } else { if err := sql.ShowCreatePartitioning( - &rowenc.DatumAlloc{}, keys.SystemSQLCodec, test.new.parsed.tableDesc, testIndex, + &tree.DatumAlloc{}, keys.SystemSQLCodec, test.new.parsed.tableDesc, testIndex, testIndex.GetPartitioning(), &repartition, 0 /* indent */, 0, /* colOffset */ ); err != nil { t.Fatalf("%+v", err) diff --git a/pkg/ccl/serverccl/BUILD.bazel b/pkg/ccl/serverccl/BUILD.bazel index f8e15ae1f7c4..4e1c3c41bff3 100644 --- a/pkg/ccl/serverccl/BUILD.bazel +++ b/pkg/ccl/serverccl/BUILD.bazel @@ -31,7 +31,6 @@ go_test( "//pkg/server/serverpb", "//pkg/sql", "//pkg/sql/distsql", - "//pkg/sql/pgwire/pgcode", "//pkg/sql/tests", "//pkg/testutils/serverutils", "//pkg/testutils/sqlutils", diff --git a/pkg/ccl/serverccl/role_authentication_test.go b/pkg/ccl/serverccl/role_authentication_test.go index 308ebedc76e9..8a155875fd1f 100644 --- a/pkg/ccl/serverccl/role_authentication_test.go +++ b/pkg/ccl/serverccl/role_authentication_test.go @@ -42,11 +42,12 @@ func TestVerifyPassword(t *testing.T) { s.ExecutorConfig().(sql.ExecutorConfig).Settings, ) + ts := s.(*server.TestServer) + if util.RaceEnabled { // The default bcrypt cost makes this test approximately 30s slower when the // race detector is on. - defer func(prev int) { security.BcryptCost = prev }(security.BcryptCost) - security.BcryptCost = bcrypt.MinCost + security.BcryptCost.Override(ctx, &ts.Cfg.Settings.SV, int64(bcrypt.MinCost)) } //location is used for timezone testing. diff --git a/pkg/ccl/serverccl/server_sql_test.go b/pkg/ccl/serverccl/server_sql_test.go index 3c83807b5190..331fd3ddb3ff 100644 --- a/pkg/ccl/serverccl/server_sql_test.go +++ b/pkg/ccl/serverccl/server_sql_test.go @@ -14,6 +14,7 @@ import ( "fmt" "io/ioutil" "net/http" + "strings" "testing" "github.com/cockroachdb/cockroach/pkg/base" @@ -23,7 +24,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/distsql" - "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" @@ -86,7 +86,9 @@ func TestTenantCannotSetClusterSetting(t *testing.T) { var pqErr *pq.Error ok := errors.As(err, &pqErr) require.True(t, ok, "expected err to be a *pq.Error but is of type %T. error is: %v", err) - require.Equal(t, pq.ErrorCode(pgcode.InsufficientPrivilege.String()), pqErr.Code, "err %v has unexpected code", err) + if !strings.Contains(pqErr.Message, "unknown cluster setting") { + t.Errorf("unexpected error: %v", err) + } } func TestTenantCanUseEnterpriseFeatures(t *testing.T) { diff --git a/pkg/ccl/serverccl/statusccl/BUILD.bazel b/pkg/ccl/serverccl/statusccl/BUILD.bazel index 6dc238131364..ffc142050c38 100644 --- a/pkg/ccl/serverccl/statusccl/BUILD.bazel +++ b/pkg/ccl/serverccl/statusccl/BUILD.bazel @@ -42,6 +42,7 @@ go_test( "//pkg/server/serverpb", "//pkg/sql/catalog/catconstants", "//pkg/sql/catalog/descpb", + "//pkg/sql/sem/tree", "//pkg/sql/sqlstats", "//pkg/sql/tests", "//pkg/testutils", diff --git a/pkg/ccl/serverccl/statusccl/tenant_status_test.go b/pkg/ccl/serverccl/statusccl/tenant_status_test.go index c7dae43e9e8a..1d07a26c0b71 100644 --- a/pkg/ccl/serverccl/statusccl/tenant_status_test.go +++ b/pkg/ccl/serverccl/statusccl/tenant_status_test.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catconstants" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -111,19 +112,35 @@ func TestTenantCannotSeeNonTenantStats(t *testing.T) { tenantStatusServer := tenant.StatusServer().(serverpb.SQLStatusServer) type testCase struct { - stmt string - fingerprint string + stmt string + formattedStmt string + fingerprint string + formattedFingerprint string } testCaseTenant := []testCase{ - {stmt: `CREATE DATABASE roachblog_t`}, - {stmt: `SET database = roachblog_t`}, - {stmt: `CREATE TABLE posts_t (id INT8 PRIMARY KEY, body STRING)`}, { - stmt: `INSERT INTO posts_t VALUES (1, 'foo')`, - fingerprint: `INSERT INTO posts_t VALUES (_, '_')`, + stmt: `CREATE DATABASE roachblog_t`, + formattedStmt: "CREATE DATABASE roachblog_t\n", + }, + { + stmt: `SET database = roachblog_t`, + formattedStmt: "SET database = roachblog_t\n", + }, + { + stmt: `CREATE TABLE posts_t (id INT8 PRIMARY KEY, body STRING)`, + formattedStmt: "CREATE TABLE posts_t (id INT8 PRIMARY KEY, body STRING)\n", + }, + { + stmt: `INSERT INTO posts_t VALUES (1, 'foo')`, + fingerprint: `INSERT INTO posts_t VALUES (_, '_')`, + formattedStmt: "INSERT INTO posts_t VALUES (1, 'foo')\n", + formattedFingerprint: "INSERT INTO posts_t VALUES (_, '_')\n", + }, + { + stmt: `SELECT * FROM posts_t`, + formattedStmt: "SELECT * FROM posts_t\n", }, - {stmt: `SELECT * FROM posts_t`}, } for _, stmt := range testCaseTenant { @@ -135,14 +152,28 @@ func TestTenantCannotSeeNonTenantStats(t *testing.T) { require.NoError(t, err) testCaseNonTenant := []testCase{ - {stmt: `CREATE DATABASE roachblog_nt`}, - {stmt: `SET database = roachblog_nt`}, - {stmt: `CREATE TABLE posts_nt (id INT8 PRIMARY KEY, body STRING)`}, { - stmt: `INSERT INTO posts_nt VALUES (1, 'foo')`, - fingerprint: `INSERT INTO posts_nt VALUES (_, '_')`, + stmt: `CREATE DATABASE roachblog_nt`, + formattedStmt: "CREATE DATABASE roachblog_nt\n", + }, + { + stmt: `SET database = roachblog_nt`, + formattedStmt: "SET database = roachblog_nt\n", + }, + { + stmt: `CREATE TABLE posts_nt (id INT8 PRIMARY KEY, body STRING)`, + formattedStmt: "CREATE TABLE posts_nt (id INT8 PRIMARY KEY, body STRING)\n", + }, + { + stmt: `INSERT INTO posts_nt VALUES (1, 'foo')`, + fingerprint: `INSERT INTO posts_nt VALUES (_, '_')`, + formattedStmt: "INSERT INTO posts_nt VALUES (1, 'foo')\n", + formattedFingerprint: "INSERT INTO posts_nt VALUES (_, '_')\n", + }, + { + stmt: `SELECT * FROM posts_nt`, + formattedStmt: "SELECT * FROM posts_nt\n", }, - {stmt: `SELECT * FROM posts_nt`}, } pgURL, cleanupGoDB := sqlutils.PGUrl( @@ -195,14 +226,22 @@ func TestTenantCannotSeeNonTenantStats(t *testing.T) { err = serverutils.GetJSONProto(nonTenant, path, &nonTenantCombinedStats) require.NoError(t, err) - checkStatements := func(t *testing.T, tc []testCase, actual *serverpb.StatementsResponse) { + checkStatements := func(t *testing.T, tc []testCase, actual *serverpb.StatementsResponse, combined bool) { t.Helper() var expectedStatements []string for _, stmt := range tc { var expectedStmt = stmt.stmt + if combined { + expectedStmt = stmt.formattedStmt + } if stmt.fingerprint != "" { - expectedStmt = stmt.fingerprint + if combined { + expectedStmt = stmt.formattedFingerprint + } else { + expectedStmt = stmt.fingerprint + } } + expectedStatements = append(expectedStatements, expectedStmt) } @@ -229,14 +268,14 @@ func TestTenantCannotSeeNonTenantStats(t *testing.T) { // First we verify that we have expected stats from tenants. t.Run("tenant-stats", func(t *testing.T) { - checkStatements(t, testCaseTenant, tenantStats) - checkStatements(t, testCaseTenant, tenantCombinedStats) + checkStatements(t, testCaseTenant, tenantStats, false) + checkStatements(t, testCaseTenant, tenantCombinedStats, true) }) // Now we verify the non tenant stats are what we expected. t.Run("non-tenant-stats", func(t *testing.T) { - checkStatements(t, testCaseNonTenant, &nonTenantStats) - checkStatements(t, testCaseNonTenant, &nonTenantCombinedStats) + checkStatements(t, testCaseNonTenant, &nonTenantStats, false) + checkStatements(t, testCaseNonTenant, &nonTenantCombinedStats, true) }) // Now we verify that tenant and non-tenant have no visibility into each other's stats. @@ -270,10 +309,29 @@ func TestTenantCannotSeeNonTenantStats(t *testing.T) { func testResetSQLStatsRPCForTenant( ctx context.Context, t *testing.T, testHelper *tenantTestHelper, ) { - stmts := []string{ - "SELECT 1", - "SELECT 1, 1", - "SELECT 1, 1, 1", + + type testCase struct { + stmt string + formattedStmt string + } + stmts := []testCase{ + { + stmt: "SELECT 1", + formattedStmt: "SELECT 1\n", + }, + { + stmt: "SELECT 1, 1", + formattedStmt: "SELECT 1, 1\n", + }, + { + stmt: "SELECT 1, 1, 1", + formattedStmt: "SELECT 1, 1\n", + }, + } + + var expectedStatements []string + for _, tc := range stmts { + expectedStatements = append(expectedStatements, tc.formattedStmt) } testCluster := testHelper.testCluster() @@ -303,8 +361,8 @@ func testResetSQLStatsRPCForTenant( }() for _, stmt := range stmts { - testCluster.tenantConn(randomServer).Exec(t, stmt) - controlCluster.tenantConn(randomServer).Exec(t, stmt) + testCluster.tenantConn(randomServer).Exec(t, stmt.stmt) + controlCluster.tenantConn(randomServer).Exec(t, stmt.stmt) } if flushed { @@ -321,7 +379,7 @@ func testResetSQLStatsRPCForTenant( require.NotEqual(t, 0, len(statsPreReset.Statements), "expected to find stats for at least one statement, but found: %d", len(statsPreReset.Statements)) - ensureExpectedStmtFingerprintExistsInRPCResponse(t, stmts, statsPreReset, "test") + ensureExpectedStmtFingerprintExistsInRPCResponse(t, expectedStatements, statsPreReset, "test") _, err = status.ResetSQLStats(ctx, &serverpb.ResetSQLStatsRequest{ ResetPersistedStats: true, @@ -358,7 +416,7 @@ func testResetSQLStatsRPCForTenant( }) require.NoError(t, err) - ensureExpectedStmtFingerprintExistsInRPCResponse(t, stmts, statsFromControlCluster, "control") + ensureExpectedStmtFingerprintExistsInRPCResponse(t, expectedStatements, statsFromControlCluster, "control") }) } } @@ -545,10 +603,10 @@ SET DATABASE=test_db1; SELECT * FROM test; `) - getCreateStmtQuery := ` -SELECT indexdef -FROM pg_catalog.pg_indexes -WHERE tablename = 'test' AND indexname = $1` + getCreateStmtQuery := fmt.Sprintf(` + SELECT prettify_statement(indexdef, %d, %d, %d) + FROM pg_catalog.pg_indexes + WHERE tablename = 'test' AND indexname = $1`, tree.ConsoleLineWidth, tree.PrettyAlignAndDeindent, tree.UpperCase) // Get index usage stats and assert expected results. resp := getTableIndexStats(testHelper, "test_db1") diff --git a/pkg/ccl/spanconfigccl/spanconfigcomparedccl/datadriven_test.go b/pkg/ccl/spanconfigccl/spanconfigcomparedccl/datadriven_test.go index e2eec1d5293d..305f1f3b40fe 100644 --- a/pkg/ccl/spanconfigccl/spanconfigcomparedccl/datadriven_test.go +++ b/pkg/ccl/spanconfigccl/spanconfigcomparedccl/datadriven_test.go @@ -110,7 +110,7 @@ func TestDataDriven(t *testing.T) { tdb.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'`) } - spanConfigTestCluster := spanconfigtestcluster.NewHandle(t, tc) + spanConfigTestCluster := spanconfigtestcluster.NewHandle(t, tc, scKnobs) defer spanConfigTestCluster.Cleanup() kvSubscriber := tc.Server(0).SpanConfigKVSubscriber().(spanconfig.KVSubscriber) @@ -182,7 +182,7 @@ func TestDataDriven(t *testing.T) { switch d.Cmd { case "initialize": secondaryTenant := spanConfigTestCluster.InitializeTenant(ctx, tenantID) - secondaryTenant.Exec(`SET CLUSTER SETTING sql.zone_configs.experimental_allow_for_secondary_tenant.enabled = true`) + secondaryTenant.Exec(`SET CLUSTER SETTING sql.zone_configs.allow_for_secondary_tenant.enabled = true`) case "exec-sql": // Run under an explicit transaction -- we rely on having a @@ -199,8 +199,8 @@ func TestDataDriven(t *testing.T) { case "reconcile": tsBeforeReconcilerStart := tenant.Clock().Now() go func() { - err := tenant.Reconciler().Reconcile(ctx, hlc.Timestamp{}, func(checkpoint hlc.Timestamp) error { - tenant.Checkpoint(checkpoint) + err := tenant.Reconciler().Reconcile(ctx, hlc.Timestamp{} /* startTS */, func() error { + tenant.RecordCheckpoint() return nil }) require.NoError(t, err) diff --git a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/datadriven_test.go b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/datadriven_test.go index b70beff926e8..90bed1d76335 100644 --- a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/datadriven_test.go +++ b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/datadriven_test.go @@ -103,7 +103,7 @@ func TestDataDriven(t *testing.T) { tdb.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'`) } - spanConfigTestCluster := spanconfigtestcluster.NewHandle(t, tc) + spanConfigTestCluster := spanconfigtestcluster.NewHandle(t, tc, scKnobs) defer spanConfigTestCluster.Cleanup() systemTenant := spanConfigTestCluster.InitializeTenant(ctx, roachpb.SystemTenantID) @@ -128,7 +128,7 @@ func TestDataDriven(t *testing.T) { switch d.Cmd { case "initialize": secondaryTenant := spanConfigTestCluster.InitializeTenant(ctx, tenantID) - secondaryTenant.Exec(`SET CLUSTER SETTING sql.zone_configs.experimental_allow_for_secondary_tenant.enabled = true`) + secondaryTenant.Exec(`SET CLUSTER SETTING sql.zone_configs.allow_for_secondary_tenant.enabled = true`) case "exec-sql": // Run under an explicit transaction -- we rely on having a @@ -145,8 +145,8 @@ func TestDataDriven(t *testing.T) { case "reconcile": tsBeforeReconcilerStart := tenant.Clock().Now() go func() { - err := tenant.Reconciler().Reconcile(ctx, hlc.Timestamp{}, func(checkpoint hlc.Timestamp) error { - tenant.Checkpoint(checkpoint) + err := tenant.Reconciler().Reconcile(ctx, hlc.Timestamp{} /* startTS */, func() error { + tenant.RecordCheckpoint() return nil }) require.NoError(t, err) diff --git a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/datadriven_test.go b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/datadriven_test.go index f61d89ba3c4f..4ac3898895e9 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/datadriven_test.go +++ b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/datadriven_test.go @@ -88,13 +88,13 @@ func TestDataDriven(t *testing.T) { }) defer tc.Stopper().Stop(ctx) - spanConfigTestCluster := spanconfigtestcluster.NewHandle(t, tc) + spanConfigTestCluster := spanconfigtestcluster.NewHandle(t, tc, scKnobs) defer spanConfigTestCluster.Cleanup() var tenant *spanconfigtestcluster.Tenant if strings.Contains(path, "tenant") { tenant = spanConfigTestCluster.InitializeTenant(ctx, roachpb.MakeTenantID(10)) - tenant.Exec(`SET CLUSTER SETTING sql.zone_configs.experimental_allow_for_secondary_tenant.enabled = true`) + tenant.Exec(`SET CLUSTER SETTING sql.zone_configs.allow_for_secondary_tenant.enabled = true`) } else { tenant = spanConfigTestCluster.InitializeTenant(ctx, roachpb.SystemTenantID) } diff --git a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/tenant/misc b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/tenant/misc index e8e8b4bc577a..83b14c49f129 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/tenant/misc +++ b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/testdata/tenant/misc @@ -21,21 +21,10 @@ DROP TABLE db.t1; # We should no longer see the dropped table's spans. translate database=db ---- -/Tenant/10/Table/5{6-7} ttl_seconds=1 /Tenant/10/Table/5{7-8} range default # Same as above, except this time the translation starts from the table's ID. -translate id=53 ----- - -# By now t1's descriptor should have been deleted. -translate database=db ----- -/Tenant/10/Table/5{6-7} ttl_seconds=1 -/Tenant/10/Table/5{7-8} range default - -# This no longer exists, so no span configuration should be generated. -translate id=53 +translate id=56 ---- # Mark table t2 as offline, we should still be able to generate a span @@ -51,7 +40,6 @@ translate database=db table=t2 translate database=db ---- -/Tenant/10/Table/5{6-7} ttl_seconds=1 /Tenant/10/Table/5{7-8} range default @@ -69,19 +57,21 @@ CREATE SCHEMA db.sc; CREATE TYPE db.typ AS ENUM(); ---- +translate database=db +---- +/Tenant/10/Table/5{7-8} range default + # Schema. -translate id=55 +translate id=58 ---- # Enum. -translate id=56 +translate id=59 ---- -/Tenant/10/Table/5{6-7} ttl_seconds=1 # Array type alias. -translate id=57 +translate id=60 ---- -/Tenant/10/Table/5{7-8} range default # Test that non-existent IDs do not generate span configurations either. translate id=500 diff --git a/pkg/ccl/sqlproxyccl/proxy_handler_test.go b/pkg/ccl/sqlproxyccl/proxy_handler_test.go index f3af06fa83be..4e82abc1cc22 100644 --- a/pkg/ccl/sqlproxyccl/proxy_handler_test.go +++ b/pkg/ccl/sqlproxyccl/proxy_handler_test.go @@ -61,6 +61,7 @@ const notFoundTenantID = 99 func TestLongDBName(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) ctx := context.Background() te := newTester() @@ -88,6 +89,7 @@ func TestLongDBName(t *testing.T) { // deleted. func TestBackendDownRetry(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) ctx := context.Background() te := newTester() @@ -115,6 +117,7 @@ func TestBackendDownRetry(t *testing.T) { func TestFailedConnection(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) ctx := context.Background() te := newTester() @@ -165,6 +168,7 @@ func TestFailedConnection(t *testing.T) { func TestUnexpectedError(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) ctx := context.Background() te := newTester() @@ -201,13 +205,16 @@ func TestUnexpectedError(t *testing.T) { func TestProxyAgainstSecureCRDB(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) ctx := context.Background() te := newTester() defer te.Close() sql, db, _ := serverutils.StartServer(t, base.TestServerArgs{Insecure: false}) - sql.(*server.TestServer).PGServer().(*pgwire.Server).TestingSetTrustClientProvidedRemoteAddr(true) + pgs := sql.(*server.TestServer).PGServer().(*pgwire.Server) + pgs.TestingSetTrustClientProvidedRemoteAddr(true) + pgs.TestingEnableAuthLogging() defer sql.Stopper().Stop(ctx) sqlDB := sqlutils.MakeSQLRunner(db) @@ -234,6 +241,7 @@ func TestProxyAgainstSecureCRDB(t *testing.T) { func TestProxyTLSConf(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) t.Run("insecure", func(t *testing.T) { ctx := context.Background() @@ -314,6 +322,7 @@ func TestProxyTLSConf(t *testing.T) { func TestProxyTLSClose(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) // NB: The leaktest call is an important part of this test. We're // verifying that no goroutines are leaked, despite calling Close an // underlying TCP connection (rather than the TLSConn that wraps it). @@ -323,7 +332,9 @@ func TestProxyTLSClose(t *testing.T) { defer te.Close() sql, db, _ := serverutils.StartServer(t, base.TestServerArgs{Insecure: false}) - sql.(*server.TestServer).PGServer().(*pgwire.Server).TestingSetTrustClientProvidedRemoteAddr(true) + pgs := sql.(*server.TestServer).PGServer().(*pgwire.Server) + pgs.TestingSetTrustClientProvidedRemoteAddr(true) + pgs.TestingEnableAuthLogging() defer sql.Stopper().Stop(ctx) sqlDB := sqlutils.MakeSQLRunner(db) @@ -362,13 +373,16 @@ func TestProxyTLSClose(t *testing.T) { func TestProxyModifyRequestParams(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) ctx := context.Background() te := newTester() defer te.Close() sql, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{Insecure: false}) - sql.(*server.TestServer).PGServer().(*pgwire.Server).TestingSetTrustClientProvidedRemoteAddr(true) + pgs := sql.(*server.TestServer).PGServer().(*pgwire.Server) + pgs.TestingSetTrustClientProvidedRemoteAddr(true) + pgs.TestingEnableAuthLogging() defer sql.Stopper().Stop(ctx) // Create some user with password authn. @@ -415,13 +429,16 @@ func TestProxyModifyRequestParams(t *testing.T) { func TestInsecureProxy(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) ctx := context.Background() te := newTester() defer te.Close() sql, db, _ := serverutils.StartServer(t, base.TestServerArgs{Insecure: false}) - sql.(*server.TestServer).PGServer().(*pgwire.Server).TestingSetTrustClientProvidedRemoteAddr(true) + pgs := sql.(*server.TestServer).PGServer().(*pgwire.Server) + pgs.TestingSetTrustClientProvidedRemoteAddr(true) + pgs.TestingEnableAuthLogging() defer sql.Stopper().Stop(ctx) sqlDB := sqlutils.MakeSQLRunner(db) @@ -444,6 +461,7 @@ func TestInsecureProxy(t *testing.T) { func TestErroneousFrontend(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) ctx := context.Background() te := newTester() @@ -469,6 +487,7 @@ func TestErroneousFrontend(t *testing.T) { func TestErroneousBackend(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) ctx := context.Background() te := newTester() @@ -494,6 +513,7 @@ func TestErroneousBackend(t *testing.T) { func TestProxyRefuseConn(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) ctx := context.Background() te := newTester() @@ -518,6 +538,7 @@ func TestProxyRefuseConn(t *testing.T) { func TestDenylistUpdate(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) ctx := context.Background() te := newTester() @@ -590,6 +611,7 @@ func TestDenylistUpdate(t *testing.T) { func TestDirectoryConnect(t *testing.T) { defer leaktest.AfterTest(t)() skip.UnderDeadlockWithIssue(t, 71365) + defer log.Scope(t).Close(t) ctx := context.Background() te := newTester() @@ -733,6 +755,7 @@ func TestDirectoryConnect(t *testing.T) { func TestClusterNameAndTenantFromParams(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) ctx := context.Background() diff --git a/pkg/ccl/streamingccl/streamclient/BUILD.bazel b/pkg/ccl/streamingccl/streamclient/BUILD.bazel index 40332a352350..4069316f7ec1 100644 --- a/pkg/ccl/streamingccl/streamclient/BUILD.bazel +++ b/pkg/ccl/streamingccl/streamclient/BUILD.bazel @@ -5,12 +5,14 @@ go_library( srcs = [ "client.go", "cockroach_sinkless_replication_client.go", + "partitioned_stream_client.go", "random_stream_client.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streamclient", visibility = ["//visibility:public"], deps = [ "//pkg/ccl/streamingccl", + "//pkg/ccl/streamingccl/streampb", "//pkg/keys", "//pkg/roachpb:with-mocks", "//pkg/security", @@ -20,7 +22,9 @@ go_library( "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/tree", + "//pkg/streaming", "//pkg/util/hlc", "//pkg/util/log", "//pkg/util/protoutil", @@ -38,6 +42,7 @@ go_test( "client_test.go", "cockroach_sinkless_replication_client_test.go", "main_test.go", + "partitioned_stream_client_test.go", ], embed = [":streamclient"], deps = [ @@ -47,21 +52,27 @@ go_test( "//pkg/ccl/storageccl", "//pkg/ccl/streamingccl", "//pkg/ccl/streamingccl/streamingtest", + "//pkg/ccl/streamingccl/streampb", "//pkg/ccl/streamingccl/streamproducer", "//pkg/ccl/utilccl", + "//pkg/jobs", "//pkg/roachpb:with-mocks", "//pkg/security", "//pkg/security/securitytest", "//pkg/server", "//pkg/sql/catalog/catalogkv", + "//pkg/streaming", "//pkg/testutils/serverutils", + "//pkg/testutils/skip", "//pkg/testutils/testcluster", "//pkg/util/ctxgroup", "//pkg/util/hlc", "//pkg/util/leaktest", "//pkg/util/log", + "//pkg/util/protoutil", "//pkg/util/randutil", "//pkg/util/syncutil", + "//pkg/util/timeutil", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/ccl/streamingccl/streamclient/client.go b/pkg/ccl/streamingccl/streamclient/client.go index 7b15c0898a46..fa6a47e6afe8 100644 --- a/pkg/ccl/streamingccl/streamclient/client.go +++ b/pkg/ccl/streamingccl/streamclient/client.go @@ -13,6 +13,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/streaming" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/errors" ) @@ -20,11 +21,7 @@ import ( // Note on data APIs and datatypes. As much as possible, the data that makes // sense to the source cluster (e.g. checkpoint records, or subscription token, etc) // is treated as an opaque object (e.g. []bytes) by this API. This opacity is done -// on purpuse as it abstracts the operations on the source cluster behind this API. - -// StreamID identifies a stream across both its producer and consumer. It is -// used when the consumer wishes to interact with the stream's producer. -type StreamID uint64 +// on purpose as it abstracts the operations on the source cluster behind this API. // CheckpointToken is emitted by a stream producer to encode information about // what that producer has emitted, including what spans or timestamps it might @@ -36,6 +33,10 @@ type StreamID uint64 // to subscribe to a given partition. type SubscriptionToken []byte +// CheckpointToken is an opaque identifier which can be used to represent checkpoint +// information to start a stream processor. +type CheckpointToken []byte + // Client provides a way for the stream ingestion job to consume a // specified stream. // TODO(57427): The stream client does not yet support the concept of @@ -44,7 +45,7 @@ type Client interface { // Create initializes a stream with the source, potentially reserving any // required resources, such as protected timestamps, and returns an ID which // can be used to interact with this stream in the future. - Create(ctx context.Context, tenantID roachpb.TenantID) (StreamID, error) + Create(ctx context.Context, tenantID roachpb.TenantID) (streaming.StreamID, error) // Destroy informs the source of the stream that it may terminate production // and release resources such as protected timestamps. @@ -54,22 +55,25 @@ type Client interface { // that source cluster protected timestamp _may_ be advanced up to the passed ts // (which may be zero if no progress has been made e.g. during backfill). // TODO(dt): ts -> checkpointToken. - Heartbeat(ctx context.Context, ID StreamID, consumed hlc.Timestamp) error + Heartbeat(ctx context.Context, streamID streaming.StreamID, consumed hlc.Timestamp) error // Plan returns a Topology for this stream. // TODO(dt): separate target argument from address argument. - Plan(ctx context.Context, ID StreamID) (Topology, error) + Plan(ctx context.Context, streamID streaming.StreamID) (Topology, error) // Subscribe opens and returns a subscription for the specified partition from // the specified remote address. This is used by each consumer processor to // open its subscription to its partition of a larger stream. - // TODO(dt): ts -> checkpointToken, return -> Subscription. + // TODO(dt): ts -> checkpointToken. Subscribe( ctx context.Context, - stream StreamID, + streamID streaming.StreamID, spec SubscriptionToken, checkpoint hlc.Timestamp, - ) (chan streamingccl.Event, chan error, error) + ) (Subscription, error) + + // Close releases all the resources used by this client. + Close() error } // Topology is a configuration of stream partitions. These are particular to a @@ -86,6 +90,29 @@ type PartitionInfo struct { SrcLocality roachpb.Locality } +// Subscription represents subscription to a replication stream partition. +// Typical usage on the call site looks like: +// +// ctxWithCancel, cancelFn := context.WithCancel(ctx) +// g := ctxgroup.WithContext(ctxWithCancel) +// sub := client.Subscribe() +// g.GoCtx(sub.Subscribe) +// g.GoCtx(processEventsAndErrors(sub.Events(), sub.Err())) +// g.Wait() +type Subscription interface { + // Subscribe starts receiving subscription events. Terminates when context + // is cancelled. It will release all resources when the function returns. + Subscribe(ctx context.Context) error + + // Events is a channel receiving streaming events. + // This channel is closed when no additional values will be sent to this channel. + Events() <-chan streamingccl.Event + + // Err is set once when Events channel closed -- must not be called before + // the channel closes. + Err() error +} + // NewStreamClient creates a new stream client based on the stream // address. func NewStreamClient(streamAddress streamingccl.StreamAddress) (Client, error) { diff --git a/pkg/ccl/streamingccl/streamclient/client_test.go b/pkg/ccl/streamingccl/streamclient/client_test.go index 40c6f2244eb6..a4ad57ebd491 100644 --- a/pkg/ccl/streamingccl/streamclient/client_test.go +++ b/pkg/ccl/streamingccl/streamclient/client_test.go @@ -15,6 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/streaming" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/syncutil" @@ -25,12 +26,14 @@ type testStreamClient struct{} var _ Client = testStreamClient{} // Create implements the Client interface. -func (sc testStreamClient) Create(ctx context.Context, target roachpb.TenantID) (StreamID, error) { - return StreamID(1), nil +func (sc testStreamClient) Create( + ctx context.Context, target roachpb.TenantID, +) (streaming.StreamID, error) { + return streaming.StreamID(1), nil } // Plan implements the Client interface. -func (sc testStreamClient) Plan(ctx context.Context, ID StreamID) (Topology, error) { +func (sc testStreamClient) Plan(ctx context.Context, ID streaming.StreamID) (Topology, error) { return Topology([]PartitionInfo{ {SrcAddr: streamingccl.PartitionAddress("test://host1")}, {SrcAddr: streamingccl.PartitionAddress("test://host2")}, @@ -38,14 +41,21 @@ func (sc testStreamClient) Plan(ctx context.Context, ID StreamID) (Topology, err } // Heartbeat implements the Client interface. -func (sc testStreamClient) Heartbeat(ctx context.Context, ID StreamID, _ hlc.Timestamp) error { +func (sc testStreamClient) Heartbeat( + ctx context.Context, ID streaming.StreamID, _ hlc.Timestamp, +) error { + return nil +} + +// Close implements the Client interface. +func (sc testStreamClient) Close() error { return nil } // Subscribe implements the Client interface. func (sc testStreamClient) Subscribe( - ctx context.Context, stream StreamID, spec SubscriptionToken, checkpoint hlc.Timestamp, -) (chan streamingccl.Event, chan error, error) { + ctx context.Context, stream streaming.StreamID, spec SubscriptionToken, checkpoint hlc.Timestamp, +) (Subscription, error) { sampleKV := roachpb.KeyValue{ Key: []byte("key_1"), Value: roachpb.Value{ @@ -59,13 +69,37 @@ func (sc testStreamClient) Subscribe( events <- streamingccl.MakeCheckpointEvent(hlc.Timestamp{WallTime: 100}) close(events) - return events, nil, nil + return &testStreamSubscription{ + eventCh: events, + }, nil +} + +type testStreamSubscription struct { + eventCh chan streamingccl.Event +} + +// Subscribe implements the Subscription interface. +func (t testStreamSubscription) Subscribe(ctx context.Context) error { + return nil +} + +// Events implements the Subscription interface. +func (t testStreamSubscription) Events() <-chan streamingccl.Event { + return t.eventCh +} + +// Err implements the Subscription interface. +func (t testStreamSubscription) Err() error { + return nil } // ExampleClientUsage serves as documentation to indicate how a stream // client could be used. func ExampleClient() { client := testStreamClient{} + defer func() { + _ = client.Close() + }() id, err := client.Create(context.Background(), roachpb.MakeTenantID(1)) if err != nil { @@ -111,7 +145,7 @@ func ExampleClient() { for _, partition := range topology { // TODO(dt): use Subscribe helper and partition.SrcAddr - eventCh, _ /* errCh */, err := client.Subscribe(context.Background(), id, partition.SubscriptionToken, ts) + sub, err := client.Subscribe(context.Background(), id, partition.SubscriptionToken, ts) if err != nil { panic(err) } @@ -119,7 +153,7 @@ func ExampleClient() { // This example looks for the closing of the channel to terminate the test, // but an ingestion job should look for another event such as the user // cutting over to the new cluster to move to the next stage. - for event := range eventCh { + for event := range sub.Events() { switch event.Type() { case streamingccl.KVEvent: kv := event.GetKV() diff --git a/pkg/ccl/streamingccl/streamclient/cockroach_sinkless_replication_client.go b/pkg/ccl/streamingccl/streamclient/cockroach_sinkless_replication_client.go index 27f4edf01b1f..6e2a6ed656bb 100644 --- a/pkg/ccl/streamingccl/streamclient/cockroach_sinkless_replication_client.go +++ b/pkg/ccl/streamingccl/streamclient/cockroach_sinkless_replication_client.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/streaming" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" @@ -36,22 +37,24 @@ func newPGWireReplicationClient(remote *url.URL) (Client, error) { return &sinklessReplicationClient{remote: remote}, nil } -// Plan implements the Client interface. +// Create implements the Client interface. func (m *sinklessReplicationClient) Create( ctx context.Context, tenantID roachpb.TenantID, -) (StreamID, error) { - return StreamID(tenantID.ToUint64()), nil +) (streaming.StreamID, error) { + return streaming.StreamID(tenantID.ToUint64()), nil } // Heartbeat implements the Client interface. func (m *sinklessReplicationClient) Heartbeat( - ctx context.Context, streamID StreamID, complete hlc.Timestamp, + ctx context.Context, streamID streaming.StreamID, complete hlc.Timestamp, ) error { return nil } // Plan implements the Client interface. -func (m *sinklessReplicationClient) Plan(ctx context.Context, ID StreamID) (Topology, error) { +func (m *sinklessReplicationClient) Plan( + ctx context.Context, ID streaming.StreamID, +) (Topology, error) { // The core changefeed clients only have 1 partition, and it's located at the // stream address. return Topology([]PartitionInfo{ @@ -63,14 +66,19 @@ func (m *sinklessReplicationClient) Plan(ctx context.Context, ID StreamID) (Topo }), nil } -// ConsumePartition implements the Client interface. +// Close implements the Client interface. +func (m *sinklessReplicationClient) Close() error { + return nil +} + +// Subscribe implements the Client interface. func (m *sinklessReplicationClient) Subscribe( - ctx context.Context, stream StreamID, spec SubscriptionToken, checkpoint hlc.Timestamp, -) (chan streamingccl.Event, chan error, error) { + ctx context.Context, stream streaming.StreamID, spec SubscriptionToken, checkpoint hlc.Timestamp, +) (Subscription, error) { tenantToReplicate := string(spec) tenantID, err := strconv.Atoi(tenantToReplicate) if err != nil { - return nil, nil, errors.Wrap(err, "parsing tenant") + return nil, errors.Wrap(err, "parsing tenant") } streamTenantQuery := fmt.Sprintf( @@ -82,77 +90,96 @@ func (m *sinklessReplicationClient) Subscribe( db, err := gosql.Open("postgres", m.remote.String()) if err != nil { - return nil, nil, err + return nil, err } conn, err := db.Conn(ctx) if err != nil { - return nil, nil, err + return nil, err } _, err = conn.ExecContext(ctx, `SET enable_experimental_stream_replication = true`) if err != nil { - return nil, nil, err + return nil, err } rows, err := conn.QueryContext(ctx, streamTenantQuery) if err != nil { - return nil, nil, errors.Wrap(err, "creating source replication stream") + return nil, errors.Wrap(err, "creating source replication stream") } - eventCh := make(chan streamingccl.Event) - errCh := make(chan error, 1) - - go func() { - defer close(eventCh) - defer close(errCh) + sub := &sinklessReplicationSubscription{eventCh: make(chan streamingccl.Event)} + sub.receiveFn = func(ctx context.Context) error { + defer close(sub.eventCh) defer db.Close() defer rows.Close() for rows.Next() { var ignoreTopic gosql.NullString var k, v []byte if err := rows.Scan(&ignoreTopic, &k, &v); err != nil { - errCh <- err - return + sub.err = err + return err } var event streamingccl.Event if len(k) == 0 { var resolved hlc.Timestamp if err := protoutil.Unmarshal(v, &resolved); err != nil { - errCh <- err - return + sub.err = err + return err } event = streamingccl.MakeCheckpointEvent(resolved) } else { var kv roachpb.KeyValue kv.Key = k if err := protoutil.Unmarshal(v, &kv.Value); err != nil { - errCh <- err - return + sub.err = err + return err } event = streamingccl.MakeKVEvent(kv) } select { - case eventCh <- event: + case sub.eventCh <- event: case <-ctx.Done(): - errCh <- ctx.Err() - return + sub.err = err + return ctx.Err() } } if err := rows.Err(); err != nil { if errors.Is(err, driver.ErrBadConn) { select { - case eventCh <- streamingccl.MakeGenerationEvent(): + case sub.eventCh <- streamingccl.MakeGenerationEvent(): case <-ctx.Done(): - errCh <- ctx.Err() + sub.err = ctx.Err() } } else { - errCh <- err + sub.err = err } - return + return err } - }() + return nil + } + + return sub, nil +} + +type sinklessReplicationSubscription struct { + eventCh chan streamingccl.Event + err error + receiveFn func(ctx context.Context) error +} + +// Subscribe implements the Subscription interface. +func (s *sinklessReplicationSubscription) Subscribe(ctx context.Context) error { + return s.receiveFn(ctx) +} + +// Events implements the Subscription interface. +func (s *sinklessReplicationSubscription) Events() <-chan streamingccl.Event { + return s.eventCh +} - return eventCh, errCh, nil +// Err implements the Subscription interface. +func (s *sinklessReplicationSubscription) Err() error { + return s.err } diff --git a/pkg/ccl/streamingccl/streamclient/cockroach_sinkless_replication_client_test.go b/pkg/ccl/streamingccl/streamclient/cockroach_sinkless_replication_client_test.go index 8960e5840b79..5f1e1517ac3f 100644 --- a/pkg/ccl/streamingccl/streamclient/cockroach_sinkless_replication_client_test.go +++ b/pkg/ccl/streamingccl/streamclient/cockroach_sinkless_replication_client_test.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streamingtest" _ "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streamproducer" // Ensure we can start replication stream. "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" + "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/stretchr/testify/require" @@ -29,23 +30,19 @@ import ( type channelFeedSource struct { t *testing.T cancelIngestion context.CancelFunc - eventCh chan streamingccl.Event - errCh chan error + subscription Subscription } var _ streamingtest.FeedSource = (*channelFeedSource)(nil) // Next implements the streamingtest.FeedSource interface. func (f *channelFeedSource) Next() (streamingccl.Event, bool) { - // First check for any errors. - select { - case err := <-f.errCh: - require.NoError(f.t, err) + event, haveMoreRows := <-f.subscription.Events() + if !haveMoreRows { + // Err is set after Events channel is closed. + require.NoError(f.t, f.subscription.Err()) return nil, false - default: } - - event, haveMoreRows := <-f.eventCh return event, haveMoreRows } @@ -74,6 +71,9 @@ INSERT INTO d.t2 VALUES (2); t1 := catalogkv.TestingGetTableDescriptor(h.SysServer.DB(), h.Tenant.Codec, "d", "t1") client := &sinklessReplicationClient{remote: &h.PGUrl} + defer func() { + require.NoError(t, client.Close()) + }() id, err := client.Create(ctx, h.Tenant.ID) require.NoError(t, err) @@ -91,11 +91,13 @@ INSERT INTO d.t2 VALUES (2); t.Run("replicate_existing_tenant", func(t *testing.T) { clientCtx, cancelIngestion := context.WithCancel(ctx) - eventCh, errCh, err := client.Subscribe(clientCtx, id, token, startTime) + sub, err := client.Subscribe(clientCtx, id, token, startTime) require.NoError(t, err) - feedSource := &channelFeedSource{cancelIngestion: cancelIngestion, eventCh: eventCh, errCh: errCh} + feedSource := &channelFeedSource{cancelIngestion: cancelIngestion, subscription: sub} feed := streamingtest.MakeReplicationFeed(t, feedSource) + cg := ctxgroup.WithContext(clientCtx) + cg.GoCtx(sub.Subscribe) // We should observe 2 versions of this key: one with ("привет", "world"), and a later // version ("привет", "мир") expected := streamingtest.EncodeKV(t, h.Tenant.Codec, t1, 42, "привет", "world") @@ -108,15 +110,18 @@ INSERT INTO d.t2 VALUES (2); feed.ObserveResolved(ctx, secondObserved.Value.Timestamp) cancelIngestion() + require.Error(t, cg.Wait(), "context canceled") }) t.Run("stream-address-disconnects", func(t *testing.T) { clientCtx, cancelIngestion := context.WithCancel(ctx) - eventCh, errCh, err := client.Subscribe(clientCtx, id, token, startTime) + sub, err := client.Subscribe(clientCtx, id, token, startTime) require.NoError(t, err) - feedSource := &channelFeedSource{eventCh: eventCh, errCh: errCh} + feedSource := &channelFeedSource{subscription: sub} feed := streamingtest.MakeReplicationFeed(t, feedSource) + cg := ctxgroup.WithContext(ctx) + cg.GoCtx(sub.Subscribe) h.SysServer.Stopper().Stop(clientCtx) require.True(t, feed.ObserveGeneration(clientCtx)) diff --git a/pkg/ccl/streamingccl/streamclient/partitioned_stream_client.go b/pkg/ccl/streamingccl/streamclient/partitioned_stream_client.go new file mode 100644 index 000000000000..4bbbc2638f3d --- /dev/null +++ b/pkg/ccl/streamingccl/streamclient/partitioned_stream_client.go @@ -0,0 +1,269 @@ +// Copyright 2021 The Cockroach Authors. +// +// Licensed as a CockroachDB Enterprise file under the Cockroach Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt + +package streamclient + +import ( + "context" + gosql "database/sql" + "net/url" + + "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl" + "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streampb" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/streaming" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" + "github.com/cockroachdb/errors" +) + +type partitionedStreamClient struct { + db *gosql.DB // DB handle to the source cluster +} + +func newPartitionedStreamClient(remote *url.URL) (*partitionedStreamClient, error) { + db, err := gosql.Open("postgres", remote.String()) + if err != nil { + return nil, err + } + return &partitionedStreamClient{db: db}, nil +} + +var _ Client = &partitionedStreamClient{} + +// Create implements Client interface. +func (p *partitionedStreamClient) Create( + ctx context.Context, tenantID roachpb.TenantID, +) (streaming.StreamID, error) { + streamID := streaming.InvalidStreamID + + conn, err := p.db.Conn(ctx) + if err != nil { + return streamID, err + } + defer func() { + _ = conn.Close() + }() + + row := conn.QueryRowContext(ctx, `SELECT crdb_internal.start_replication_stream($1)`, tenantID.ToUint64()) + if row.Err() != nil { + return streamID, errors.Wrapf(row.Err(), "Error in creating replication stream for tenant %s", tenantID.String()) + } + + err = row.Scan(&streamID) + return streamID, err +} + +// Heartbeat implements Client interface. +func (p *partitionedStreamClient) Heartbeat( + ctx context.Context, streamID streaming.StreamID, consumed hlc.Timestamp, +) error { + conn, err := p.db.Conn(ctx) + if err != nil { + return err + } + defer func() { + _ = conn.Close() + }() + + row := conn.QueryRowContext(ctx, + `SELECT crdb_internal.replication_stream_progress($1, $2)`, streamID, consumed.String()) + if row.Err() != nil { + return errors.Wrapf(row.Err(), "Error in sending heartbeats to replication stream %d", streamID) + } + + var rawStatus []byte + if err := row.Scan(&rawStatus); err != nil { + return err + } + var status streampb.StreamReplicationStatus + if err := protoutil.Unmarshal(rawStatus, &status); err != nil { + return err + } + // TODO(casper): add observability for stream protected timestamp + if status.StreamStatus != streampb.StreamReplicationStatus_STREAM_ACTIVE { + return errors.Errorf("Replication stream %d is not running, status is %s", streamID, status.StreamStatus.String()) + } + return nil +} + +// Plan implements Client interface. +func (p *partitionedStreamClient) Plan( + ctx context.Context, streamID streaming.StreamID, +) (Topology, error) { + conn, err := p.db.Conn(ctx) + if err != nil { + return nil, err + } + defer func() { + _ = conn.Close() + }() + + row := conn.QueryRowContext(ctx, `SELECT crdb_internal.replication_stream_spec($1)`, streamID) + if row.Err() != nil { + return nil, errors.Wrap(row.Err(), "Error in planning a replication stream") + } + + var rawSpec []byte + if err = row.Scan(&rawSpec); err != nil { + return nil, err + } + var spec streampb.ReplicationStreamSpec + if err := protoutil.Unmarshal(rawSpec, &spec); err != nil { + return nil, err + } + + topology := Topology{} + for _, p := range spec.Partitions { + rawSpec, err := protoutil.Marshal(p.PartitionSpec) + if err != nil { + return nil, err + } + topology = append(topology, PartitionInfo{ + ID: p.NodeID.String(), // how do determine partition ID? + SubscriptionToken: SubscriptionToken(rawSpec), + SrcInstanceID: int(p.NodeID), + SrcAddr: streamingccl.PartitionAddress(p.SQLAddress.String()), + SrcLocality: p.Locality, + }) + } + return topology, nil +} + +// Close implements Client interface. +func (p *partitionedStreamClient) Close() error { + return p.db.Close() +} + +// Subscribe implements Client interface. +func (p *partitionedStreamClient) Subscribe( + ctx context.Context, stream streaming.StreamID, spec SubscriptionToken, checkpoint hlc.Timestamp, +) (Subscription, error) { + sps := streampb.StreamPartitionSpec{} + if err := protoutil.Unmarshal(spec, &sps); err != nil { + return nil, err + } + sps.StartFrom = checkpoint + + specBytes, err := protoutil.Marshal(&sps) + if err != nil { + return nil, err + } + + return &partitionedStreamSubscription{ + eventsChan: make(chan streamingccl.Event), + db: p.db, + specBytes: specBytes, + streamID: stream, + }, nil +} + +type partitionedStreamSubscription struct { + eventsChan chan streamingccl.Event + err error + db *gosql.DB + + streamEvent *streampb.StreamEvent + specBytes []byte + streamID streaming.StreamID +} + +var _ Subscription = (*partitionedStreamSubscription)(nil) + +// parseEvent parses next event from the batch of events inside streampb.StreamEvent. +func parseEvent(streamEvent *streampb.StreamEvent) streamingccl.Event { + if streamEvent == nil { + return nil + } + + if streamEvent.Checkpoint != nil { + event := streamingccl.MakeCheckpointEvent(streamEvent.Checkpoint.Spans[0].Timestamp) + streamEvent.Checkpoint = nil + return event + } + if streamEvent.Batch != nil { + event := streamingccl.MakeKVEvent(streamEvent.Batch.KeyValues[0]) + streamEvent.Batch.KeyValues = streamEvent.Batch.KeyValues[1:] + if len(streamEvent.Batch.KeyValues) == 0 { + streamEvent.Batch = nil + } + return event + } + return nil +} + +// Subscribe implements the Subscription interface. +func (p *partitionedStreamSubscription) Subscribe(ctx context.Context) error { + defer close(p.eventsChan) + conn, err := p.db.Conn(ctx) + if err != nil { + return err + } + defer func() { + _ = conn.Close() + }() + + _, err = conn.ExecContext(ctx, `SET avoid_buffering = true`) + if err != nil { + return err + } + rows, err := conn.QueryContext(ctx, `SELECT * FROM crdb_internal.stream_partition($1, $2)`, + p.streamID, p.specBytes) + if err != nil { + return err + } + defer rows.Close() + + // Get the next event from the cursor. + getNextEvent := func() (streamingccl.Event, error) { + if e := parseEvent(p.streamEvent); e != nil { + return e, nil + } + + if !rows.Next() { + if err := rows.Err(); err != nil { + return nil, err + } + return nil, nil + } + var data []byte + if err := rows.Scan(&data); err != nil { + return nil, err + } + var streamEvent streampb.StreamEvent + if err := protoutil.Unmarshal(data, &streamEvent); err != nil { + return nil, err + } + p.streamEvent = &streamEvent + return parseEvent(p.streamEvent), nil + } + + for { + event, err := getNextEvent() + if err != nil { + p.err = err + return err + } + select { + case p.eventsChan <- event: + case <-ctx.Done(): + p.err = err + return ctx.Err() + } + } +} + +// Events implements the Subscription interface. +func (p *partitionedStreamSubscription) Events() <-chan streamingccl.Event { + return p.eventsChan +} + +// Err implements the Subscription interface. +func (p *partitionedStreamSubscription) Err() error { + return p.err +} diff --git a/pkg/ccl/streamingccl/streamclient/partitioned_stream_client_test.go b/pkg/ccl/streamingccl/streamclient/partitioned_stream_client_test.go new file mode 100644 index 000000000000..0f7fba1f5ef2 --- /dev/null +++ b/pkg/ccl/streamingccl/streamclient/partitioned_stream_client_test.go @@ -0,0 +1,166 @@ +// Copyright 2021 The Cockroach Authors. +// +// Licensed as a CockroachDB Enterprise file under the Cockroach Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt + +package streamclient + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl" + "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streamingtest" + "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streampb" + "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" + "github.com/cockroachdb/cockroach/pkg/streaming" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" + "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/stretchr/testify/require" +) + +type subscriptionFeedSource struct { + sub Subscription +} + +var _ streamingtest.FeedSource = (*subscriptionFeedSource)(nil) + +// Next implements the streamingtest.FeedSource interface. +func (f *subscriptionFeedSource) Next() (streamingccl.Event, bool) { + event, hasMore := <-f.sub.Events() + return event, hasMore +} + +// Close implements the streamingtest.FeedSource interface. +func (f *subscriptionFeedSource) Close(ctx context.Context) {} + +func TestPartitionedStreamReplicationClient(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + skip.UnderRace(t, "partitionedStreamClient can't work under race") + + h, cleanup := streamingtest.NewReplicationHelper(t, base.TestServerArgs{ + Knobs: base.TestingKnobs{ + JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), + }, + }) + defer cleanup() + + ctx := context.Background() + // Makes sure source cluster producer job does not time out within test timeout + h.SysDB.Exec(t, "SET CLUSTER SETTING stream_replication.job_liveness_timeout = '500s'") + h.Tenant.SQL.Exec(t, ` +CREATE DATABASE d; +CREATE TABLE d.t1(i int primary key, a string, b string); +CREATE TABLE d.t2(i int primary key); +INSERT INTO d.t1 (i) VALUES (42); +INSERT INTO d.t2 VALUES (2); +`) + + client, err := newPartitionedStreamClient(&h.PGUrl) + defer func() { + require.NoError(t, client.Close()) + }() + require.NoError(t, err) + expectStreamState := func(streamID streaming.StreamID, status jobs.Status) { + h.SysDB.CheckQueryResultsRetry(t, fmt.Sprintf("SELECT status FROM system.jobs WHERE id = %d", streamID), + [][]string{{string(status)}}) + } + + id, err := client.Create(ctx, h.Tenant.ID) + require.NoError(t, err) + // We can create multiple replication streams for the same tenant. + _, err = client.Create(ctx, h.Tenant.ID) + require.NoError(t, err) + + top, err := client.Plan(ctx, id) + require.NoError(t, err) + require.Equal(t, 1, len(top)) + // Plan for a non-existent stream + _, err = client.Plan(ctx, 999) + require.Errorf(t, err, "Replication stream %d not found", 999) + + expectStreamState(id, jobs.StatusRunning) + require.NoError(t, client.Heartbeat(ctx, id, hlc.Timestamp{WallTime: timeutil.Now().UnixNano()})) + + // Pause the underlying producer job of the replication stream + h.SysDB.Exec(t, `PAUSE JOB $1`, id) + expectStreamState(id, jobs.StatusPaused) + require.Errorf(t, client.Heartbeat(ctx, id, hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}), + "Replication stream %d is not running, status is STREAM_PAUSED", id) + + // Cancel the underlying producer job of the replication stream + h.SysDB.Exec(t, `CANCEL JOB $1`, id) + expectStreamState(id, jobs.StatusCanceled) + require.Errorf(t, client.Heartbeat(ctx, id, hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}), + "Replication stream %d is not running, status is STREAM_INACTIVE", id) + + // Non-existent stream is not active in the source cluster. + require.Errorf(t, client.Heartbeat(ctx, 999, hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}), + "Replication stream %d is not running, status is STREAM_INACTIVE", 999) + + // Testing client.Subscribe() + makePartitionSpec := func(tables ...string) *streampb.StreamPartitionSpec { + var spans []roachpb.Span + for _, table := range tables { + desc := catalogkv.TestingGetTableDescriptor( + h.SysServer.DB(), h.Tenant.Codec, "d", table) + spans = append(spans, desc.PrimaryIndexSpan(h.Tenant.Codec)) + } + + return &streampb.StreamPartitionSpec{ + Spans: spans, + Config: streampb.StreamPartitionSpec_ExecutionConfig{ + MinCheckpointFrequency: 10 * time.Millisecond, + }, + } + } + + encodeSpec := func(tables ...string) []byte { + opaqueSpec, err := protoutil.Marshal(makePartitionSpec(tables...)) + require.NoError(t, err) + return opaqueSpec + } + + // Ignore table t2 and only subscribe to the changes to table t1. + sub, err := client.Subscribe(ctx, id, encodeSpec("t1"), hlc.Timestamp{}) + require.NoError(t, err) + + rf := streamingtest.MakeReplicationFeed(t, &subscriptionFeedSource{sub: sub}) + t1Descr := catalogkv.TestingGetTableDescriptor(h.SysServer.DB(), h.Tenant.Codec, "d", "t1") + + ctxWithCancel, cancelFn := context.WithCancel(ctx) + cg := ctxgroup.WithContext(ctxWithCancel) + cg.GoCtx(sub.Subscribe) + // Observe the existing single row in t1. + expected := streamingtest.EncodeKV(t, h.Tenant.Codec, t1Descr, 42) + firstObserved := rf.ObserveKey(ctx, expected.Key) + require.Equal(t, expected.Value.RawBytes, firstObserved.Value.RawBytes) + rf.ObserveResolved(ctx, firstObserved.Value.Timestamp) + + // Updates the existing row. + h.Tenant.SQL.Exec(t, `UPDATE d.t1 SET b = 'world' WHERE i = 42`) + expected = streamingtest.EncodeKV(t, h.Tenant.Codec, t1Descr, 42, nil, "world") + + // Observe its changes. + secondObserved := rf.ObserveKey(ctx, expected.Key) + require.Equal(t, expected.Value.RawBytes, secondObserved.Value.RawBytes) + require.True(t, firstObserved.Value.Timestamp.Less(secondObserved.Value.Timestamp)) + + // Test if Subscribe can react to cancellation signal. + cancelFn() + require.Error(t, cg.Wait(), "context canceled") +} diff --git a/pkg/ccl/streamingccl/streamclient/random_stream_client.go b/pkg/ccl/streamingccl/streamclient/random_stream_client.go index 4d5cc4afa0c1..6568dc17d055 100644 --- a/pkg/ccl/streamingccl/streamclient/random_stream_client.go +++ b/pkg/ccl/streamingccl/streamclient/random_stream_client.go @@ -26,7 +26,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/streaming" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/randutil" @@ -224,7 +226,7 @@ func (m *randomStreamClient) getNextTableID() int { } // Plan implements the Client interface. -func (m *randomStreamClient) Plan(ctx context.Context, id StreamID) (Topology, error) { +func (m *randomStreamClient) Plan(ctx context.Context, id streaming.StreamID) (Topology, error) { topology := make(Topology, 0, m.config.numPartitions) log.Infof(ctx, "planning random stream for tenant %d", m.config.tenantID) @@ -247,14 +249,16 @@ func (m *randomStreamClient) Plan(ctx context.Context, id StreamID) (Topology, e // Create implements the Client interface. func (m *randomStreamClient) Create( ctx context.Context, target roachpb.TenantID, -) (StreamID, error) { +) (streaming.StreamID, error) { log.Infof(ctx, "creating random stream for tenant %d", target.ToUint64()) m.config.tenantID = target - return StreamID(target.ToUint64()), nil + return streaming.StreamID(target.ToUint64()), nil } // Heartbeat implements the Client interface. -func (m *randomStreamClient) Heartbeat(ctx context.Context, ID StreamID, _ hlc.Timestamp) error { +func (m *randomStreamClient) Heartbeat( + ctx context.Context, ID streaming.StreamID, _ hlc.Timestamp, +) error { return nil } @@ -304,17 +308,22 @@ func (m *randomStreamClient) getDescriptorAndNamespaceKVForTableID( return testTable, []roachpb.KeyValue{namespaceKV, descKV}, nil } -// ConsumePartition implements the Client interface. +// Close implements the Client interface. +func (m *randomStreamClient) Close() error { + return nil +} + +// Subscribe implements the Client interface. func (m *randomStreamClient) Subscribe( - ctx context.Context, stream StreamID, spec SubscriptionToken, checkpoint hlc.Timestamp, -) (chan streamingccl.Event, chan error, error) { + ctx context.Context, stream streaming.StreamID, spec SubscriptionToken, checkpoint hlc.Timestamp, +) (Subscription, error) { partitionURL, err := url.Parse(string(spec)) if err != nil { - return nil, nil, err + return nil, err } config, err := parseRandomStreamConfig(partitionURL) if err != nil { - return nil, nil, err + return nil, err } eventCh := make(chan streamingccl.Event) @@ -327,14 +336,14 @@ func (m *randomStreamClient) Subscribe( var partitionTableID int partitionTableID, err = strconv.Atoi(partitionURL.Host) if err != nil { - return nil, nil, err + return nil, err } log.Infof(ctx, "producing kvs for metadata for table %d for tenant %d based on %q", partitionTableID, config.tenantID, spec) tableDesc, systemKVs, err := m.getDescriptorAndNamespaceKVForTableID(config, descpb.ID(partitionTableID)) if err != nil { - return nil, nil, err + return nil, err } - go func() { + receiveFn := func(ctx context.Context) error { defer close(eventCh) // rand is not thread safe, so create a random source for each partition. @@ -379,7 +388,7 @@ func (m *randomStreamClient) Subscribe( select { case eventCh <- event: case <-ctx.Done(): - return + return ctx.Err() } func() { @@ -397,9 +406,32 @@ func (m *randomStreamClient) Subscribe( time.Sleep(kvInterval) } - }() + } + + return &randomStreamSubscription{ + receiveFn: receiveFn, + eventCh: eventCh, + }, nil +} + +type randomStreamSubscription struct { + receiveFn func(ctx context.Context) error + eventCh chan streamingccl.Event +} - return eventCh, nil, nil +// Subscribe implements the Subscription interface. +func (r *randomStreamSubscription) Subscribe(ctx context.Context) error { + return r.receiveFn(ctx) +} + +// Events implements the Subscription interface. +func (r *randomStreamSubscription) Events() <-chan streamingccl.Event { + return r.eventCh +} + +// Err implements the Subscription interface. +func (r *randomStreamSubscription) Err() error { + return nil } func rekey(tenantID roachpb.TenantID, k roachpb.Key) roachpb.Key { @@ -427,7 +459,7 @@ func makeRandomKey( var colIDToRowIndex catalog.TableColMap colIDToRowIndex.Set(index.GetKeyColumnID(0), 0) - keyPrefix := rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, tableDesc, index.GetID()) + keyPrefix := rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, tableDesc.GetID(), index.GetID()) k, _, err := rowenc.EncodeIndexKey(tableDesc, index, colIDToRowIndex, tree.Datums{keyDatum}, keyPrefix) if err != nil { panic(err) @@ -438,8 +470,8 @@ func makeRandomKey( // Create a value holding a random integer. valueDatum := tree.NewDInt(tree.DInt(r.Intn(config.valueRange))) - valueBuf, err := rowenc.EncodeTableValue( - []byte(nil), tableDesc.Columns[1].ID, valueDatum, []byte(nil)) + valueBuf, err := valueside.Encode( + []byte(nil), valueside.MakeColumnIDDelta(0, tableDesc.Columns[1].ID), valueDatum, []byte(nil)) if err != nil { panic(err) } diff --git a/pkg/ccl/streamingccl/streamingest/BUILD.bazel b/pkg/ccl/streamingccl/streamingest/BUILD.bazel index 30a5f0e083ee..f6271f3e1eb0 100644 --- a/pkg/ccl/streamingccl/streamingest/BUILD.bazel +++ b/pkg/ccl/streamingccl/streamingest/BUILD.bazel @@ -39,6 +39,7 @@ go_library( "//pkg/sql/sem/tree", "//pkg/sql/types", "//pkg/storage", + "//pkg/streaming", "//pkg/util/ctxgroup", "//pkg/util/hlc", "//pkg/util/log", @@ -91,6 +92,7 @@ go_test( "//pkg/sql/execinfrapb", "//pkg/sql/sem/tree", "//pkg/storage", + "//pkg/streaming", "//pkg/testutils", "//pkg/testutils/distsqlutils", "//pkg/testutils/jobutils", diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go index cace59a73015..f32998536df3 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go @@ -47,7 +47,7 @@ type streamIngestionFrontier struct { flowCtx *execinfra.FlowCtx spec execinfrapb.StreamIngestionFrontierSpec - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc // input returns rows from one or more streamIngestion processors. input execinfra.RowSource diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor_test.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor_test.go index a15643c2857c..c7516fe33f29 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor_test.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor_test.go @@ -168,6 +168,9 @@ func TestStreamIngestionFrontierProcessor(t *testing.T) { sip.forceClientForTests = &mockStreamClient{ partitionEvents: tc.events, } + defer func() { + require.NoError(t, sip.forceClientForTests.Close()) + }() // Create a frontier processor. var frontierSpec execinfrapb.StreamIngestionFrontierSpec diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go index f259d8c3929a..7c12854839c6 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go @@ -29,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/storage" + "github.com/cockroachdb/cockroach/pkg/streaming" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -110,6 +111,9 @@ type streamIngestionProcessor struct { // cutover. cutoverCh chan struct{} + // cg is used to receive the subscription of events from the source cluster. + cg ctxgroup.Group + // closePoller is used to shutdown the poller that checks the job for a // cutover signal. closePoller chan struct{} @@ -214,8 +218,8 @@ func (sip *streamIngestionProcessor) Start(ctx context.Context) { log.Infof(ctx, "starting %d stream partitions", len(sip.spec.PartitionIds)) // Initialize the event streams. - eventChs := make(map[string]chan streamingccl.Event) - errChs := make(map[string]chan error) + subscriptions := make(map[string]streamclient.Subscription) + sip.cg = ctxgroup.WithContext(ctx) for i := range sip.spec.PartitionIds { id := sip.spec.PartitionIds[i] spec := streamclient.SubscriptionToken(sip.spec.PartitionSpecs[i]) @@ -232,15 +236,15 @@ func (sip *streamIngestionProcessor) Start(ctx context.Context) { } } - eventCh, errCh, err := streamClient.Subscribe(ctx, streamclient.StreamID(sip.spec.StreamID), spec, sip.spec.StartTime) + sub, err := streamClient.Subscribe(ctx, streaming.StreamID(sip.spec.StreamID), spec, sip.spec.StartTime) + subscriptions[id] = sub if err != nil { sip.MoveToDraining(errors.Wrapf(err, "consuming partition %v", addr)) return } - eventChs[id] = eventCh - errChs[id] = errCh + sip.cg.GoCtx(sub.Subscribe) } - sip.eventCh = sip.merge(ctx, eventChs, errChs) + sip.eventCh = sip.merge(ctx, subscriptions) } // Next is part of the RowSource interface. @@ -370,9 +374,7 @@ func (sip *streamIngestionProcessor) checkForCutoverSignal( // merge takes events from all the streams and merges them into a single // channel. func (sip *streamIngestionProcessor) merge( - ctx context.Context, - partitionStreams map[string]chan streamingccl.Event, - errorStreams map[string]chan error, + ctx context.Context, subscriptions map[string]streamclient.Subscription, ) chan partitionEvent { merged := make(chan partitionEvent) @@ -386,20 +388,16 @@ func (sip *streamIngestionProcessor) merge( } } - for partition, eventCh := range partitionStreams { + for partition, sub := range subscriptions { partition := partition - eventCh := eventCh - errCh, ok := errorStreams[partition] - if !ok { - log.Fatalf(ctx, "could not find error channel for partition %q", partition) - } + sub := sub g.GoCtx(func(ctx context.Context) error { ctxDone := ctx.Done() for { select { - case event, ok := <-eventCh: + case event, ok := <-sub.Events(): if !ok { - return nil + return sub.Err() } pe := partitionEvent{ @@ -412,8 +410,6 @@ func (sip *streamIngestionProcessor) merge( case <-ctxDone: return ctx.Err() } - case err := <-errCh: - return err case <-ctxDone: return ctx.Err() } diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor_test.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor_test.go index 847063515ff1..0f37a72e92ae 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor_test.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor_test.go @@ -31,6 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/storage" + "github.com/cockroachdb/cockroach/pkg/streaming" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/distsqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" @@ -58,35 +59,54 @@ var _ streamclient.Client = &mockStreamClient{} // Create implements the Client interface. func (m *mockStreamClient) Create( ctx context.Context, target roachpb.TenantID, -) (streamclient.StreamID, error) { +) (streaming.StreamID, error) { panic("unimplemented") } // Heartbeat implements the Client interface. func (m *mockStreamClient) Heartbeat( - ctx context.Context, ID streamclient.StreamID, _ hlc.Timestamp, + ctx context.Context, ID streaming.StreamID, _ hlc.Timestamp, ) error { panic("unimplemented") } // Plan implements the Client interface. func (m *mockStreamClient) Plan( - ctx context.Context, _ streamclient.StreamID, + ctx context.Context, _ streaming.StreamID, ) (streamclient.Topology, error) { panic("unimplemented mock method") } -// ConsumePartition implements the Client interface. +type mockSubscription struct { + eventsCh chan streamingccl.Event +} + +// Subscribe implements the Subscription interface. +func (m *mockSubscription) Subscribe(ctx context.Context) error { + return nil +} + +// Events implements the Subscription interface. +func (m *mockSubscription) Events() <-chan streamingccl.Event { + return m.eventsCh +} + +// Err implements the Subscription interface. +func (m *mockSubscription) Err() error { + return nil +} + +// Subscribe implements the Client interface. func (m *mockStreamClient) Subscribe( ctx context.Context, - stream streamclient.StreamID, + stream streaming.StreamID, spec streamclient.SubscriptionToken, checkpoint hlc.Timestamp, -) (chan streamingccl.Event, chan error, error) { +) (streamclient.Subscription, error) { var events []streamingccl.Event var ok bool if events, ok = m.partitionEvents[string(spec)]; !ok { - return nil, nil, errors.Newf("no events found for paritition %s", string(spec)) + return nil, errors.Newf("no events found for paritition %s", string(spec)) } log.Infof(ctx, "%q emitting %d events", string(spec), len(events)) @@ -97,8 +117,12 @@ func (m *mockStreamClient) Subscribe( } log.Infof(ctx, "%q done emitting %d events", string(spec), len(events)) close(eventCh) + return &mockSubscription{eventsCh: eventCh}, nil +} - return eventCh, nil, nil +// Close implements the Client interface. +func (m *mockStreamClient) Close() error { + return nil } // errorStreamClient always returns an error when consuming a partition. @@ -109,11 +133,11 @@ var _ streamclient.Client = &errorStreamClient{} // ConsumePartition implements the streamclient.Client interface. func (m *errorStreamClient) Subscribe( ctx context.Context, - stream streamclient.StreamID, + stream streaming.StreamID, spec streamclient.SubscriptionToken, checkpoint hlc.Timestamp, -) (chan streamingccl.Event, chan error, error) { - return nil, nil, errors.New("this client always returns an error") +) (streamclient.Subscription, error) { + return nil, errors.New("this client always returns an error") } func TestStreamIngestionProcessor(t *testing.T) { @@ -212,6 +236,9 @@ func TestStreamIngestionProcessor(t *testing.T) { }} sip, out, err := getStreamIngestionProcessor(ctx, t, registry, kvDB, "randomgen://test/", partitions, startTime, nil /* interceptEvents */, mockClient, streamingTestingKnob) + defer func() { + require.NoError(t, sip.forceClientForTests.Close()) + }() require.NoError(t, err) var wg sync.WaitGroup @@ -464,6 +491,9 @@ func runStreamIngestionProcessor( if !out.ProducerClosed() { t.Fatalf("output RowReceiver not closed") } + if err := sip.forceClientForTests.Close(); err != nil { + return nil, err + } return out, err } diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_test.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_test.go index 9569244ae8b1..fa134541d683 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_test.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_test.go @@ -88,6 +88,9 @@ func TestStreamIngestionJobWithRandomClient(t *testing.T) { streamValidator := newStreamClientValidator() registerValidator := registerValidatorWithClient(streamValidator) client := streamclient.GetRandomStreamClientSingletonForTesting() + defer func() { + require.NoError(t, client.Close()) + }() interceptEvents := []streamclient.InterceptFn{ completeJobAfterCheckpoints, registerValidator, diff --git a/pkg/ccl/streamingccl/streampb/BUILD.bazel b/pkg/ccl/streamingccl/streampb/BUILD.bazel index 2333917bfed7..dd81eed5032a 100644 --- a/pkg/ccl/streamingccl/streampb/BUILD.bazel +++ b/pkg/ccl/streamingccl/streampb/BUILD.bazel @@ -9,6 +9,7 @@ proto_library( visibility = ["//visibility:public"], deps = [ "//pkg/roachpb:roachpb_proto", + "//pkg/util:util_proto", "//pkg/util/hlc:hlc_proto", "@com_github_gogo_protobuf//gogoproto:gogo_proto", "@com_google_protobuf//:duration_proto", @@ -23,6 +24,7 @@ go_proto_library( visibility = ["//visibility:public"], deps = [ "//pkg/roachpb:with-mocks", + "//pkg/util", "//pkg/util/hlc", "@com_github_gogo_protobuf//gogoproto", ], @@ -30,6 +32,7 @@ go_proto_library( go_library( name = "streampb", + srcs = ["empty.go"], embed = [":streampb_go_proto"], importpath = "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streampb", visibility = ["//visibility:public"], diff --git a/pkg/ccl/streamingccl/streampb/empty.go b/pkg/ccl/streamingccl/streampb/empty.go new file mode 100644 index 000000000000..1e596c9ef362 --- /dev/null +++ b/pkg/ccl/streamingccl/streampb/empty.go @@ -0,0 +1,11 @@ +// Copyright 2022 The Cockroach Authors. +// +// Licensed as a CockroachDB Enterprise file under the Cockroach Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt + +package streampb + +// This file is intentionally left empty. diff --git a/pkg/ccl/streamingccl/streampb/stream.proto b/pkg/ccl/streamingccl/streampb/stream.proto index 55934d745b65..efedd9af5e20 100644 --- a/pkg/ccl/streamingccl/streampb/stream.proto +++ b/pkg/ccl/streamingccl/streampb/stream.proto @@ -13,7 +13,9 @@ option go_package = "streampb"; import "roachpb/data.proto"; +import "roachpb/metadata.proto"; import "util/hlc/timestamp.proto"; +import "util/unresolved_addr.proto"; import "gogoproto/gogo.proto"; import "google/protobuf/duration.proto"; @@ -42,6 +44,26 @@ message StreamPartitionSpec { ExecutionConfig config = 3 [(gogoproto.nullable) = false]; } +message ReplicationStreamSpec { + message Partition { + // ID of the node this partition resides + int32 node_id = 1 [(gogoproto.customname) = "NodeID", + (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.NodeID"]; + + // The SQL address of the node. + util.UnresolvedAddr sql_address = 2 [(gogoproto.nullable) = false, + (gogoproto.customname) = "SQLAddress"]; + + // Locality of the node + roachpb.Locality locality = 3 [(gogoproto.nullable) = false]; + + // The spec of the processor responsible for streaming this partition + StreamPartitionSpec partition_spec = 4 [(gogoproto.customname) = "PartitionSpec"]; + } + + repeated Partition partitions = 1 [(gogoproto.nullable) = false]; +} + // StreamEvent describes a replication stream event message StreamEvent { message Batch { @@ -65,3 +87,23 @@ message StreamEvent { Batch batch = 1; StreamCheckpoint checkpoint = 2; } + +message StreamReplicationStatus { + enum StreamStatus { + // Stream is running. Consumers should continue to heartbeat. + STREAM_ACTIVE = 0; + // Stream stopped running. Consumers should stop heartbeating and + // optionally start a new replication stream. + STREAM_INACTIVE = 1; + // Stream replication is paused. Consumers can resume the job and start heartbeating. + STREAM_PAUSED = 2; + // Stream status is unknown. Consumers should retry heartbeating. + UNKNOWN_STREAM_STATUS_RETRY = 4; + } + + StreamStatus stream_status = 1; + + // Current protected timestamp for spans being replicated. It is absent + // when the replication stream is 'STOPPED'. + util.hlc.Timestamp protected_timestamp = 2; +} diff --git a/pkg/ccl/streamingccl/streamproducer/BUILD.bazel b/pkg/ccl/streamingccl/streamproducer/BUILD.bazel index fadd05000078..2297661dd6a6 100644 --- a/pkg/ccl/streamingccl/streamproducer/BUILD.bazel +++ b/pkg/ccl/streamingccl/streamproducer/BUILD.bazel @@ -71,6 +71,7 @@ go_test( "//pkg/jobs", "//pkg/jobs/jobspb", "//pkg/jobs/jobsprotectedts", + "//pkg/keys", "//pkg/kv", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/roachpb:with-mocks", diff --git a/pkg/ccl/streamingccl/streamproducer/producer_job_test.go b/pkg/ccl/streamingccl/streamproducer/producer_job_test.go index bea7111c3a8c..89bf9dce2cd7 100644 --- a/pkg/ccl/streamingccl/streamproducer/producer_job_test.go +++ b/pkg/ccl/streamingccl/streamproducer/producer_job_test.go @@ -15,6 +15,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streampb" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" @@ -187,14 +188,14 @@ func TestStreamReplicationProducerJob(t *testing.T) { _, err := getPTSRecord(ptsID) require.Error(t, err, "protected timestamp record does not exist") - var status jobspb.StreamReplicationStatus + var status streampb.StreamReplicationStatus require.NoError(t, source.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { status, err = updateReplicationStreamProgress( ctx, timeutil.Now(), ptp, registry, streaming.StreamID(jr.JobID), hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}, txn) return err })) - require.Equal(t, jobspb.StreamReplicationStatus_STREAM_INACTIVE, status.StreamStatus) + require.Equal(t, streampb.StreamReplicationStatus_STREAM_INACTIVE, status.StreamStatus) } { // Job starts running and eventually fails after it's timed out @@ -216,7 +217,7 @@ func TestStreamReplicationProducerJob(t *testing.T) { } // Set expiration to a new time in the future - var streamStatus jobspb.StreamReplicationStatus + var streamStatus streampb.StreamReplicationStatus var err error expire := expirationTime(jr).Add(10 * time.Millisecond) require.NoError(t, source.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { @@ -225,7 +226,7 @@ func TestStreamReplicationProducerJob(t *testing.T) { ptp, registry, streaming.StreamID(jr.JobID), updatedFrontier, txn) return err })) - require.Equal(t, jobspb.StreamReplicationStatus_STREAM_ACTIVE, streamStatus.StreamStatus) + require.Equal(t, streampb.StreamReplicationStatus_STREAM_ACTIVE, streamStatus.StreamStatus) require.Equal(t, updatedFrontier, *streamStatus.ProtectedTimestamp) r, err := getPTSRecord(ptsID) diff --git a/pkg/ccl/streamingccl/streamproducer/replication_manager.go b/pkg/ccl/streamingccl/streamproducer/replication_manager.go index c792778f0cfe..e96f66544bd8 100644 --- a/pkg/ccl/streamingccl/streamproducer/replication_manager.go +++ b/pkg/ccl/streamingccl/streamproducer/replication_manager.go @@ -9,8 +9,8 @@ package streamproducer import ( + "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streampb" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" - "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -42,7 +42,7 @@ func (r *replicationStreamManagerImpl) StartReplicationStream( // UpdateReplicationStreamProgress implements ReplicationStreamManager interface. func (r *replicationStreamManagerImpl) UpdateReplicationStreamProgress( evalCtx *tree.EvalContext, streamID streaming.StreamID, frontier hlc.Timestamp, txn *kv.Txn, -) (jobspb.StreamReplicationStatus, error) { +) (streampb.StreamReplicationStatus, error) { return heartbeatReplicationStream(evalCtx, streamID, frontier, txn) } @@ -55,6 +55,13 @@ func (r *replicationStreamManagerImpl) StreamPartition( return streamPartition(evalCtx, streamID, opaqueSpec) } +// GetReplicationStreamSpec implements ReplicationStreamManager interface. +func (r *replicationStreamManagerImpl) GetReplicationStreamSpec( + evalCtx *tree.EvalContext, txn *kv.Txn, streamID streaming.StreamID, +) (*streampb.ReplicationStreamSpec, error) { + return getReplicationStreamSpec(evalCtx, txn, streamID) +} + func newReplicationStreamManagerWithPrivilegesCheck( evalCtx *tree.EvalContext, ) (streaming.ReplicationStreamManager, error) { diff --git a/pkg/ccl/streamingccl/streamproducer/replication_stream_planning.go b/pkg/ccl/streamingccl/streamproducer/replication_stream_planning.go index 7268fe9c88c5..fd6ad47ccc69 100644 --- a/pkg/ccl/streamingccl/streamproducer/replication_stream_planning.go +++ b/pkg/ccl/streamingccl/streamproducer/replication_stream_planning.go @@ -13,9 +13,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase" "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeeddist" + "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streampb" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" + "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql" @@ -24,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/streaming" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/errors" ) @@ -207,6 +211,56 @@ func createReplicationStreamHook( return fn, replicationStreamHeader, nil, avoidBuffering, nil } +func getReplicationStreamSpec( + evalCtx *tree.EvalContext, txn *kv.Txn, streamID streaming.StreamID, +) (*streampb.ReplicationStreamSpec, error) { + jobExecCtx := evalCtx.JobExecContext.(sql.JobExecContext) + // Returns error if the replication stream is not active + j, err := jobExecCtx.ExecCfg().JobRegistry.LoadJob(evalCtx.Ctx(), jobspb.JobID(streamID)) + if err != nil { + return nil, errors.Wrapf(err, "Replication stream %d has error", streamID) + } + if j.Status() != jobs.StatusRunning { + return nil, errors.Errorf("Replication stream %d is not running", streamID) + } + + // Partition the spans with SQLPlanner + var noTxn *kv.Txn + dsp := jobExecCtx.DistSQLPlanner() + planCtx := dsp.NewPlanningCtx(evalCtx.Ctx(), jobExecCtx.ExtendedEvalContext(), nil /* planner */, noTxn, + true /* distribute */) + + replicatedSpans := j.Details().(jobspb.StreamReplicationDetails).Spans + spans := make([]roachpb.Span, 0, len(replicatedSpans)) + for _, span := range replicatedSpans { + spans = append(spans, *span) + } + spanPartitions, err := dsp.PartitionSpans(planCtx, spans) + if err != nil { + return nil, err + } + + res := &streampb.ReplicationStreamSpec{ + Partitions: make([]streampb.ReplicationStreamSpec_Partition, 0, len(spanPartitions)), + } + for _, sp := range spanPartitions { + nodeInfo, err := dsp.GetNodeInfo(sp.Node) + if err != nil { + return nil, err + } + res.Partitions = append(res.Partitions, streampb.ReplicationStreamSpec_Partition{ + NodeID: sp.Node, + SQLAddress: nodeInfo.SQLAddress, + Locality: nodeInfo.Locality, + PartitionSpec: &streampb.StreamPartitionSpec{ + Spans: sp.Spans, + // Use default ExecutionConfig for now + }, + }) + } + return res, nil +} + func init() { sql.AddPlanHook(createReplicationStreamHook) } diff --git a/pkg/ccl/streamingccl/streamproducer/replication_stream_test.go b/pkg/ccl/streamingccl/streamproducer/replication_stream_test.go index ead8c85b3b62..bc592fb98e7a 100644 --- a/pkg/ccl/streamingccl/streamproducer/replication_stream_test.go +++ b/pkg/ccl/streamingccl/streamproducer/replication_stream_test.go @@ -22,7 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streamingtest" "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streampb" "github.com/cockroachdb/cockroach/pkg/jobs" - "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -287,16 +287,19 @@ func TestReplicationStreamInitialization(t *testing.T) { h, cleanup := streamingtest.NewReplicationHelper(t, serverArgs) defer cleanup() - checkStreamStatus := func(t *testing.T, streamID string, expectedStreamStatus jobspb.StreamReplicationStatus_StreamStatus) { + checkStreamStatus := func(t *testing.T, streamID string, expectedStreamStatus streampb.StreamReplicationStatus_StreamStatus) { hlcTime := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()} - rows := h.SysDB.QueryStr(t, "SELECT crdb_internal.replication_stream_progress($1, $2)", streamID, hlcTime.String()) + status, rawStatus := &streampb.StreamReplicationStatus{}, make([]byte, 0) + row := h.SysDB.QueryRow(t, "SELECT crdb_internal.replication_stream_progress($1, $2)", streamID, hlcTime.String()) - expectedStatus := jobspb.StreamReplicationStatus{StreamStatus: expectedStreamStatus} + row.Scan(&rawStatus) + require.NoError(t, protoutil.Unmarshal(rawStatus, status)) + expectedStatus := streampb.StreamReplicationStatus{StreamStatus: expectedStreamStatus} // A running stream is expected to report the current protected timestamp for the replicating spans. - if expectedStatus.StreamStatus == jobspb.StreamReplicationStatus_STREAM_ACTIVE { - expectedStatus.ProtectedTimestamp = &hlcTime + if expectedStatus.StreamStatus == streampb.StreamReplicationStatus_STREAM_ACTIVE { + require.Equal(t, hlcTime, *status.ProtectedTimestamp) } - require.Equal(t, expectedStatus.String(), rows[0][0]) + require.Equal(t, expectedStreamStatus, status.StreamStatus) } // Makes the stream time out really soon @@ -308,7 +311,7 @@ func TestReplicationStreamInitialization(t *testing.T) { h.SysDB.CheckQueryResultsRetry(t, fmt.Sprintf("SELECT status FROM system.jobs WHERE id = %s", streamID), [][]string{{"failed"}}) - checkStreamStatus(t, streamID, jobspb.StreamReplicationStatus_STREAM_INACTIVE) + checkStreamStatus(t, streamID, streampb.StreamReplicationStatus_STREAM_INACTIVE) }) // Make sure the stream does not time out within the test timeout @@ -325,12 +328,25 @@ func TestReplicationStreamInitialization(t *testing.T) { for start, end := now, now.Add(testDuration); start.Before(end); start = start.Add(300 * time.Millisecond) { h.SysDB.CheckQueryResults(t, fmt.Sprintf("SELECT status FROM system.jobs WHERE id = %s", streamID), [][]string{{"running"}}) - checkStreamStatus(t, streamID, jobspb.StreamReplicationStatus_STREAM_ACTIVE) + checkStreamStatus(t, streamID, streampb.StreamReplicationStatus_STREAM_ACTIVE) } + + // Get a replication stream spec + spec, rawSpec := &streampb.ReplicationStreamSpec{}, make([]byte, 0) + row := h.SysDB.QueryRow(t, "SELECT crdb_internal.replication_stream_spec($1)", streamID) + row.Scan(&rawSpec) + require.NoError(t, protoutil.Unmarshal(rawSpec, spec)) + + // Ensures the processor spec tracks the tenant span + require.Equal(t, 1, len(spec.Partitions)) + require.Equal(t, 1, len(spec.Partitions[0].PartitionSpec.Spans)) + tenantPrefix := keys.MakeTenantPrefix(h.Tenant.ID) + require.Equal(t, roachpb.Span{Key: tenantPrefix, EndKey: tenantPrefix.PrefixEnd()}, + spec.Partitions[0].PartitionSpec.Spans[0]) }) t.Run("nonexistent-replication-stream-has-inactive-status", func(t *testing.T) { - checkStreamStatus(t, "123", jobspb.StreamReplicationStatus_STREAM_INACTIVE) + checkStreamStatus(t, "123", streampb.StreamReplicationStatus_STREAM_INACTIVE) }) } diff --git a/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go b/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go index 3143570bbf0d..50aed1a8062e 100644 --- a/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go +++ b/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go @@ -13,6 +13,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl" + "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streampb" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" @@ -153,22 +154,22 @@ func updateReplicationStreamProgress( streamID streaming.StreamID, ts hlc.Timestamp, txn *kv.Txn, -) (status jobspb.StreamReplicationStatus, err error) { +) (status streampb.StreamReplicationStatus, err error) { const useReadLock = false err = registry.UpdateJobWithTxn(ctx, jobspb.JobID(streamID), txn, useReadLock, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { if md.Status == jobs.StatusRunning { - status.StreamStatus = jobspb.StreamReplicationStatus_STREAM_ACTIVE + status.StreamStatus = streampb.StreamReplicationStatus_STREAM_ACTIVE } else if md.Status == jobs.StatusPaused { - status.StreamStatus = jobspb.StreamReplicationStatus_STREAM_PAUSED + status.StreamStatus = streampb.StreamReplicationStatus_STREAM_PAUSED } else if md.Status.Terminal() { - status.StreamStatus = jobspb.StreamReplicationStatus_STREAM_INACTIVE + status.StreamStatus = streampb.StreamReplicationStatus_STREAM_INACTIVE } else { - status.StreamStatus = jobspb.StreamReplicationStatus_UNKNOWN_STREAM_STATUS_RETRY + status.StreamStatus = streampb.StreamReplicationStatus_UNKNOWN_STREAM_STATUS_RETRY } // Skip checking PTS record in cases that it might already be released - if status.StreamStatus != jobspb.StreamReplicationStatus_STREAM_ACTIVE && - status.StreamStatus != jobspb.StreamReplicationStatus_STREAM_PAUSED { + if status.StreamStatus != streampb.StreamReplicationStatus_STREAM_ACTIVE && + status.StreamStatus != streampb.StreamReplicationStatus_STREAM_PAUSED { return nil } @@ -178,7 +179,7 @@ func updateReplicationStreamProgress( return err } status.ProtectedTimestamp = &ptsRecord.Timestamp - if status.StreamStatus != jobspb.StreamReplicationStatus_STREAM_ACTIVE { + if status.StreamStatus != streampb.StreamReplicationStatus_STREAM_ACTIVE { return nil } @@ -197,7 +198,7 @@ func updateReplicationStreamProgress( }) if jobs.HasJobNotFoundError(err) || testutils.IsError(err, "not found in system.jobs table") { - status.StreamStatus = jobspb.StreamReplicationStatus_STREAM_INACTIVE + status.StreamStatus = streampb.StreamReplicationStatus_STREAM_INACTIVE err = nil } @@ -208,7 +209,7 @@ func updateReplicationStreamProgress( // record to the specified frontier. func heartbeatReplicationStream( evalCtx *tree.EvalContext, streamID streaming.StreamID, frontier hlc.Timestamp, txn *kv.Txn, -) (jobspb.StreamReplicationStatus, error) { +) (streampb.StreamReplicationStatus, error) { execConfig := evalCtx.Planner.ExecutorConfig().(*sql.ExecutorConfig) timeout := streamingccl.StreamReplicationJobLivenessTimeout.Get(&evalCtx.Settings.SV) diff --git a/pkg/cli/BUILD.bazel b/pkg/cli/BUILD.bazel index 04f5cbe867b6..419db42a1281 100644 --- a/pkg/cli/BUILD.bazel +++ b/pkg/cli/BUILD.bazel @@ -208,7 +208,7 @@ go_library( "//pkg/workload/tpch", "//pkg/workload/workloadsql", "//pkg/workload/ycsb", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_errors//oserror", "@com_github_cockroachdb_logtags//:logtags", diff --git a/pkg/cli/cert.go b/pkg/cli/cert.go index 2ed30820efa9..51580c3782ae 100644 --- a/pkg/cli/cert.go +++ b/pkg/cli/cert.go @@ -294,6 +294,7 @@ var certCmds = []*cobra.Command{ createNodeCertCmd, createClientCertCmd, mtCreateTenantCertCmd, + mtCreateTenantSigningCertCmd, listCertsCmd, } diff --git a/pkg/cli/debug_recover_loss_of_quorum.go b/pkg/cli/debug_recover_loss_of_quorum.go index 761606330412..7c95a932ecbc 100644 --- a/pkg/cli/debug_recover_loss_of_quorum.go +++ b/pkg/cli/debug_recover_loss_of_quorum.go @@ -280,8 +280,9 @@ Discarded live replicas: %d `, report.TotalReplicas, len(report.PlannedUpdates), report.DiscardedNonSurvivors) for _, r := range report.PlannedUpdates { _, _ = fmt.Fprintf(stderr, "Recovering range r%d:%s updating replica %s to %s. "+ - "Discarding replicas: %s\n", - r.RangeID, r.StartKey, r.OldReplica, r.Replica, r.DiscardedReplicas) + "Discarding available replicas: [%s], discarding dead replicas: [%s].\n", + r.RangeID, r.StartKey, r.OldReplica, r.Replica, + r.DiscardedAvailableReplicas, r.DiscardedDeadReplicas) } deadStoreMsg := fmt.Sprintf("\nDiscovered dead stores from provided files: %s", diff --git a/pkg/cli/democluster/demo_cluster.go b/pkg/cli/democluster/demo_cluster.go index 9974501527a6..9002be87bf71 100644 --- a/pkg/cli/democluster/demo_cluster.go +++ b/pkg/cli/democluster/demo_cluster.go @@ -1117,6 +1117,11 @@ func (demoCtx *Context) generateCerts(certsDir string) (err error) { if err := security.WriteTenantPair(certsDir, pair, false /* overwrite */); err != nil { return err } + if err := security.CreateTenantSigningPair( + certsDir, demoCtx.DefaultCertLifetime, false /* overwrite */, uint64(i+2), + ); err != nil { + return err + } } } return nil diff --git a/pkg/cli/doctor.go b/pkg/cli/doctor.go index 493c1b27c37e..6da8e033e819 100644 --- a/pkg/cli/doctor.go +++ b/pkg/cli/doctor.go @@ -23,7 +23,7 @@ import ( "strings" "time" - apd "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/cli/clierror" "github.com/cockroachdb/cockroach/pkg/cli/clierrorplus" "github.com/cockroachdb/cockroach/pkg/cli/clisqlclient" diff --git a/pkg/cli/gen.go b/pkg/cli/gen.go index 47c8686b526c..72811d8ad60c 100644 --- a/pkg/cli/gen.go +++ b/pkg/cli/gen.go @@ -211,8 +211,8 @@ Output the list of cluster settings known to this binary. settings.NewUpdater(&s.SV).ResetRemaining(context.Background()) var rows [][]string - for _, name := range settings.Keys() { - setting, ok := settings.Lookup(name, settings.LookupForLocalAccess) + for _, name := range settings.Keys(settings.ForSystemTenant) { + setting, ok := settings.Lookup(name, settings.LookupForLocalAccess, settings.ForSystemTenant) if !ok { panic(fmt.Sprintf("could not find setting %q", name)) } diff --git a/pkg/cli/mt.go b/pkg/cli/mt.go index 2501dc470b19..5bc4273a9fe5 100644 --- a/pkg/cli/mt.go +++ b/pkg/cli/mt.go @@ -21,6 +21,7 @@ func init() { mtCertsCmd.AddCommand( mtCreateTenantCACertCmd, mtCreateTenantCertCmd, + mtCreateTenantSigningCertCmd, ) mtCmd.AddCommand(mtCertsCmd) diff --git a/pkg/cli/mt_cert.go b/pkg/cli/mt_cert.go index e9ea0d76baaa..5b21c6e22cc1 100644 --- a/pkg/cli/mt_cert.go +++ b/pkg/cli/mt_cert.go @@ -110,3 +110,33 @@ If no server addresses are passed, then a default list containing 127.0.0.1, ::1 "failed to write tenant client certificate and key") }), } + +// A mtCreateTenantSigningCertCmd command generates a signing +// certificate and stores it in the cert directory under +// tenant-signing..crt and key under tenant-signing..key. +var mtCreateTenantSigningCertCmd = &cobra.Command{ + Use: "create-tenant-signing --certs-dir= ", + Short: "create tenant signing certificate and key", + Long: ` +Generate a tenant signing certificate "/tenant-signing..crt" and signing key +"/tenant-signing..key". + +If --overwrite is true, any existing files are overwritten. +`, + Args: cobra.ExactArgs(1), + RunE: clierrorplus.MaybeDecorateError( + func(cmd *cobra.Command, args []string) error { + tenantIDs := args[0] + tenantID, err := strconv.ParseUint(tenantIDs, 10, 64) + if err != nil { + return errors.Wrapf(err, "%s is invalid uint64", tenantIDs) + } + return errors.Wrap( + security.CreateTenantSigningPair( + certCtx.certsDir, + certCtx.certificateLifetime, + certCtx.overwriteFiles, + tenantID), + "failed to generate tenant signing cert and key") + }), +} diff --git a/pkg/clusterversion/cockroach_versions.go b/pkg/clusterversion/cockroach_versions.go index a8914f732563..f80353e7e6b8 100644 --- a/pkg/clusterversion/cockroach_versions.go +++ b/pkg/clusterversion/cockroach_versions.go @@ -223,7 +223,7 @@ const ( // This version comes with a migration to populate the same seed data // for existing tenants. SeedTenantSpanConfigs - // Public schema is backed by a descriptor. + // PublicSchemasWithDescriptors backs public schemas with descriptors. PublicSchemasWithDescriptors // AlterSystemProtectedTimestampAddColumn adds a target column to the // system.protected_ts_records table that describes what is protected by the diff --git a/pkg/cmd/cmp-sql/BUILD.bazel b/pkg/cmd/cmp-sql/BUILD.bazel index d6b0402d70c0..c27ffc7172e3 100644 --- a/pkg/cmd/cmp-sql/BUILD.bazel +++ b/pkg/cmd/cmp-sql/BUILD.bazel @@ -7,7 +7,7 @@ go_library( visibility = ["//visibility:private"], deps = [ "//pkg/util/randutil", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_jackc_pgtype//:pgtype", "@com_github_jackc_pgx_v4//:pgx", ], diff --git a/pkg/cmd/cmp-sql/main.go b/pkg/cmd/cmp-sql/main.go index c457a7cc21f2..caf0c61fbb78 100644 --- a/pkg/cmd/cmp-sql/main.go +++ b/pkg/cmd/cmp-sql/main.go @@ -34,7 +34,7 @@ import ( "regexp" "strings" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/util/randutil" "github.com/jackc/pgtype" "github.com/jackc/pgx/v4" diff --git a/pkg/cmd/cmpconn/BUILD.bazel b/pkg/cmd/cmpconn/BUILD.bazel index aa8e498fd01b..411dde20a5d1 100644 --- a/pkg/cmd/cmpconn/BUILD.bazel +++ b/pkg/cmd/cmpconn/BUILD.bazel @@ -12,7 +12,7 @@ go_library( "//pkg/sql/randgen", "//pkg/sql/sem/tree", "//pkg/util/duration", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_google_go_cmp//cmp", "@com_github_google_go_cmp//cmp/cmpopts", @@ -28,7 +28,7 @@ go_test( srcs = ["compare_test.go"], embed = [":cmpconn"], deps = [ - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_jackc_pgtype//:pgtype", ], ) diff --git a/pkg/cmd/cmpconn/compare.go b/pkg/cmd/cmpconn/compare.go index 07dbef3ab3b0..b82a82c33356 100644 --- a/pkg/cmd/cmpconn/compare.go +++ b/pkg/cmd/cmpconn/compare.go @@ -14,7 +14,7 @@ import ( "math/big" "strings" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/duration" "github.com/cockroachdb/errors" @@ -118,7 +118,9 @@ var ( if t.NaN { v = &apd.Decimal{Form: apd.NaN} } else { - v = apd.NewWithBigInt(t.Int, t.Exp) + var coeff apd.BigInt + coeff.SetMathBigInt(t.Int) + v = apd.NewWithBigInt(&coeff, t.Exp) } } case int64: diff --git a/pkg/cmd/cmpconn/compare_test.go b/pkg/cmd/cmpconn/compare_test.go index 3acd85f99f78..24fd9fb0c96e 100644 --- a/pkg/cmd/cmpconn/compare_test.go +++ b/pkg/cmd/cmpconn/compare_test.go @@ -15,7 +15,7 @@ import ( "math/big" "testing" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/jackc/pgtype" ) diff --git a/pkg/cmd/dev/bench.go b/pkg/cmd/dev/bench.go index 352ffe517bed..e14e672869f3 100644 --- a/pkg/cmd/dev/bench.go +++ b/pkg/cmd/dev/bench.go @@ -19,6 +19,11 @@ import ( "github.com/spf13/cobra" ) +const ( + benchTimeFlag = "bench-time" + benchMemFlag = "bench-mem" +) + // makeBenchCmd constructs the subcommand used to run the specified benchmarks. func makeBenchCmd(runE func(cmd *cobra.Command, args []string) error) *cobra.Command { benchCmd := &cobra.Command{ @@ -26,20 +31,39 @@ func makeBenchCmd(runE func(cmd *cobra.Command, args []string) error) *cobra.Com Short: `Run the specified benchmarks`, Long: `Run the specified benchmarks.`, Example: ` - dev bench pkg/sql/parser --filter=BenchmarkParse`, + dev bench pkg/sql/parser --filter=BenchmarkParse + dev bench pkg/bench -f='BenchmarkTracing/1node/scan/trace=off' --count=2 --bench-time=10x --bench-mem`, Args: cobra.MinimumNArgs(0), RunE: runE, } addCommonBuildFlags(benchCmd) addCommonTestFlags(benchCmd) + + benchCmd.Flags().BoolP(vFlag, "v", false, "show benchmark process output") + benchCmd.Flags().BoolP(showLogsFlag, "", false, "show crdb logs in-line") + benchCmd.Flags().Int(countFlag, 1, "run benchmark n times") + // We use a string flag for benchtime instead of a duration; the go test + // runner accepts input of the form "Nx" to run the benchmark N times (see + // `go help testflag`). + benchCmd.Flags().String(benchTimeFlag, "", "duration to run each benchmark for") + benchCmd.Flags().Bool(benchMemFlag, false, "print memory allocations for benchmarks") + return benchCmd } -func (d *dev) bench(cmd *cobra.Command, pkgs []string) error { +func (d *dev) bench(cmd *cobra.Command, commandLine []string) error { + pkgs, additionalBazelArgs := splitArgsAtDash(cmd, commandLine) ctx := cmd.Context() - filter := mustGetFlagString(cmd, filterFlag) - timeout := mustGetFlagDuration(cmd, timeoutFlag) - short := mustGetFlagBool(cmd, shortFlag) + var ( + filter = mustGetFlagString(cmd, filterFlag) + timeout = mustGetFlagDuration(cmd, timeoutFlag) + short = mustGetFlagBool(cmd, shortFlag) + showLogs = mustGetFlagBool(cmd, showLogsFlag) + verbose = mustGetFlagBool(cmd, vFlag) + count = mustGetFlagInt(cmd, countFlag) + benchTime = mustGetFlagString(cmd, benchTimeFlag) + benchMem = mustGetFlagBool(cmd, benchMemFlag) + ) // Enumerate all benches to run. if len(pkgs) == 0 { @@ -87,13 +111,30 @@ func (d *dev) bench(cmd *cobra.Command, pkgs []string) error { if numCPUs != 0 { argsBase = append(argsBase, fmt.Sprintf("--local_cpu_resources=%d", numCPUs)) } + if verbose { + argsBase = append(argsBase, "--test_arg", "-test.v") + } + if showLogs { + argsBase = append(argsBase, "--test_arg", "-show-logs") + } + if count != 1 { + argsBase = append(argsBase, "--test_arg", fmt.Sprintf("-test.count=%d", count)) + } + if benchTime != "" { + argsBase = append(argsBase, "--test_arg", fmt.Sprintf("-test.benchtime=%s", benchTime)) + } + if benchMem { + argsBase = append(argsBase, "--test_arg", "-test.benchmem") + } for _, bench := range benches { args := make([]string, len(argsBase)) copy(args, argsBase) base := filepath.Base(bench) target := "//" + bench + ":" + base + "_test" - args = append(args, target, "--", "-test.run=-") + args = append(args, target) + args = append(args, additionalBazelArgs...) + args = append(args, "--", "-test.run=-") if filter == "" { args = append(args, "-test.bench=.") } else { diff --git a/pkg/cmd/dev/build.go b/pkg/cmd/dev/build.go index 79b5334d441b..3238126886bb 100644 --- a/pkg/cmd/dev/build.go +++ b/pkg/cmd/dev/build.go @@ -12,6 +12,7 @@ package main import ( "context" + "encoding/json" "errors" "fmt" "log" @@ -36,6 +37,26 @@ type buildTarget struct { isGoBinary bool } +type bazelAqueryOutput struct { + Artifacts []bazelAqueryArtifact + PathFragments []bazelAqueryPathFragment + Configuration []bazelAqueryConfiguration +} + +type bazelAqueryArtifact struct { + PathFragmentID int +} + +type bazelAqueryConfiguration struct { + Mnemonic string +} + +type bazelAqueryPathFragment struct { + ID int + Label string + ParentID int `json:",omitempty"` +} + // makeBuildCmd constructs the subcommand used to build the specified binaries. func makeBuildCmd(runE func(cmd *cobra.Command, args []string) error) *cobra.Command { buildCmd := &cobra.Command{ @@ -66,6 +87,7 @@ func makeBuildCmd(runE func(cmd *cobra.Command, args []string) error) *cobra.Com // TODO(irfansharif): Make sure all the relevant binary targets are defined // above, and in usage docs. +// buildTargetMapping maintains shorthands that map 1:1 with bazel targets. var buildTargetMapping = map[string]string{ "buildifier": "@com_github_bazelbuild_buildtools//buildifier:buildifier", "buildozer": "@com_github_bazelbuild_buildtools//buildozer:buildozer", @@ -109,7 +131,7 @@ func (d *dev) build(cmd *cobra.Command, commandLine []string) error { if err := d.exec.CommandContextInheritingStdStreams(ctx, "bazel", args...); err != nil { return err } - return d.stageArtifacts(ctx, buildTargets, skipGenerate) + return d.stageArtifacts(ctx, buildTargets) } // Cross-compilation case. for _, target := range buildTargets { @@ -148,7 +170,7 @@ func (d *dev) build(cmd *cobra.Command, commandLine []string) error { return nil } -func (d *dev) stageArtifacts(ctx context.Context, targets []buildTarget, skipGenerate bool) error { +func (d *dev) stageArtifacts(ctx context.Context, targets []buildTarget) error { workspace, err := d.getWorkspace(ctx) if err != nil { return err @@ -208,7 +230,13 @@ func (d *dev) stageArtifacts(ctx context.Context, targets []buildTarget, skipGen logSuccessfulBuild(target.fullName, rel) } - if !skipGenerate { + shouldHoist := false + for _, target := range targets { + if target.fullName == "//:go_path" { + shouldHoist = true + } + } + if shouldHoist { if err := d.hoistGeneratedCode(ctx, workspace, bazelBin); err != nil { return err } @@ -234,7 +262,7 @@ func targetToBinBasename(target string) string { // (e.g. after translation, so short -> "//pkg/cmd/cockroach-short"). func (d *dev) getBasicBuildArgs( ctx context.Context, targets []string, skipGenerate bool, -) (args []string, buildTargets []buildTarget, err error) { +) (args []string, buildTargets []buildTarget, _ error) { if len(targets) == 0 { // Default to building the cockroach binary. targets = append(targets, "cockroach") @@ -256,8 +284,8 @@ func (d *dev) getBasicBuildArgs( queryArgs := []string{"query", target, "--output=label_kind"} labelKind, queryErr := d.exec.CommandContextSilent(ctx, "bazel", queryArgs...) if queryErr != nil { - err = fmt.Errorf("could not run `bazel %s` (%w)", shellescape.QuoteCommand(queryArgs), queryErr) - return + return nil, nil, fmt.Errorf("could not run `bazel %s` (%w)", + shellescape.QuoteCommand(queryArgs), queryErr) } for _, line := range strings.Split(strings.TrimSpace(string(labelKind)), "\n") { fields := strings.Fields(line) @@ -278,19 +306,15 @@ func (d *dev) getBasicBuildArgs( } continue } + aliased, ok := buildTargetMapping[target] if !ok { - err = fmt.Errorf("unrecognized target: %s", target) - return + return nil, nil, fmt.Errorf("unrecognized target: %s", target) } args = append(args, aliased) buildTargets = append(buildTargets, buildTarget{fullName: aliased, isGoBinary: true}) } - // If we're hoisting generated code, we also want to build //:go_path. - if !skipGenerate { - args = append(args, "//:go_path") - } // Add --config=with_ui iff we're building a target that needs it. for _, target := range buildTargets { @@ -299,11 +323,26 @@ func (d *dev) getBasicBuildArgs( break } } + shouldSkipGenerate := true + for _, target := range buildTargets { + if strings.Contains(target.fullName, "//pkg/cmd/cockroach") { + shouldSkipGenerate = false + break + } + } + if shouldSkipGenerate { + skipGenerate = true + } + // If we're hoisting generated code, we also want to build //:go_path. + if !skipGenerate { + args = append(args, "//:go_path") + buildTargets = append(buildTargets, buildTarget{fullName: "//:go_path"}) + } if shouldBuildWithTestConfig { args = append(args, "--config=test") } - return + return args, buildTargets, nil } // Hoist generated code out of the sandbox and into the workspace. @@ -331,13 +370,43 @@ func (d *dev) hoistGeneratedCode(ctx context.Context, workspace string, bazelBin return err } } - // Enumerate generated .go files in the sandbox so we can hoist - // them out. - cockroachDir := filepath.Join(bazelBin, "go_path", "src", "github.com", "cockroachdb", "cockroach") - goFiles, err := d.os.ListFilesWithSuffix(cockroachDir, ".go") + // List the files in the built go_path. We do this by running + // `bazel aquery` and parsing the results. We should be able to just + // list all the .go files in the go_path and copy them directly, but + // https://github.com/bazelbuild/rules_go/issues/3041 is in the way. + jsonBlob, err := d.exec.CommandContextSilent(ctx, "bazel", "aquery", "--output=jsonproto", "//:go_path") if err != nil { return err } + var aqueryOutput bazelAqueryOutput + if err := json.Unmarshal(jsonBlob, &aqueryOutput); err != nil { + return err + } + pathFragmentMap := make(map[int]bazelAqueryPathFragment) + for _, pathFragment := range aqueryOutput.PathFragments { + pathFragmentMap[pathFragment.ID] = pathFragment + } + var outputFiles []string + for _, artifact := range aqueryOutput.Artifacts { + fragment := pathFragmentMap[artifact.PathFragmentID] + var fullPath string + for { + fullPath = filepath.Join(fragment.Label, fullPath) + if fragment.ParentID == 0 { + break + } + fragment = pathFragmentMap[fragment.ParentID] + } + outputFiles = append(outputFiles, fullPath) + } + if len(aqueryOutput.Configuration) != 1 { + return fmt.Errorf("expected exactly one configuration in `bazel aquery` output; got %v", aqueryOutput.Configuration) + } + mnemonic := aqueryOutput.Configuration[0].Mnemonic + bazelBinPrefix := filepath.Join("bazel-out", mnemonic, "bin") + generatedPrefix := filepath.Join(bazelBinPrefix, "go_path", "src", "github.com", "cockroachdb", "cockroach") + + // Parse out the list of checked-in generated files. fileContents, err := d.os.ReadFile(filepath.Join(workspace, "build/bazelutil/checked_in_genfiles.txt")) if err != nil { return err @@ -356,19 +425,24 @@ func (d *dev) hoistGeneratedCode(ctx context.Context, workspace string, bazelBin renameMap[filepath.Join(dir, oldBasename)] = filepath.Join(dir, newBasename) } - for _, file := range goFiles { + for _, file := range outputFiles { + if !strings.HasPrefix(file, generatedPrefix+"/") { + continue + } + relativeToBazelBin := strings.TrimPrefix(file, bazelBinPrefix+"/") + absPath := filepath.Join(bazelBin, relativeToBazelBin) + relPath := strings.TrimPrefix(file, generatedPrefix+"/") // First case: generated Go code that's checked into tree. - relPath := strings.TrimPrefix(file, cockroachDir+"/") dst, ok := renameMap[relPath] if ok { - err := d.os.CopyFile(file, filepath.Join(workspace, dst)) + err := d.os.CopyFile(absPath, filepath.Join(workspace, dst)) if err != nil { return err } continue } // Otherwise, just copy the file to the same place in the workspace. - err := d.os.CopyFile(file, filepath.Join(workspace, relPath)) + err := d.os.CopyFile(absPath, filepath.Join(workspace, relPath)) if err != nil { return err } diff --git a/pkg/cmd/dev/test.go b/pkg/cmd/dev/test.go index a7353ecf2080..f4fdd6adcb03 100644 --- a/pkg/cmd/dev/test.go +++ b/pkg/cmd/dev/test.go @@ -27,6 +27,7 @@ const ( stressTarget = "@com_github_cockroachdb_stress//:stress" // General testing flags. + countFlag = "count" vFlag = "verbose" showLogsFlag = "show-logs" stressFlag = "stress" @@ -35,6 +36,7 @@ const ( ignoreCacheFlag = "ignore-cache" rewriteFlag = "rewrite" rewriteArgFlag = "rewrite-arg" + vModuleFlag = "vmodule" ) func makeTestCmd(runE func(cmd *cobra.Command, args []string) error) *cobra.Command { @@ -46,6 +48,7 @@ func makeTestCmd(runE func(cmd *cobra.Command, args []string) error) *cobra.Comm Example: ` dev test dev test pkg/kv/kvserver --filter=TestReplicaGC* -v --timeout=1m + dev test pkg/server -f=TestSpanStatsResponse -v --count=5 --vmodule='raft=1' dev test --stress --race ...`, Args: cobra.MinimumNArgs(0), RunE: runE, @@ -63,14 +66,16 @@ func makeTestCmd(runE func(cmd *cobra.Command, args []string) error) *cobra.Comm // under test, controlling whether the process-internal logs are made // visible. testCmd.Flags().BoolP(vFlag, "v", false, "show testing process output") + testCmd.Flags().Int(countFlag, 1, "run test the given number of times") testCmd.Flags().BoolP(showLogsFlag, "", false, "show crdb logs in-line") testCmd.Flags().Bool(stressFlag, false, "run tests under stress") testCmd.Flags().String(stressArgsFlag, "", "additional arguments to pass to stress") testCmd.Flags().Bool(raceFlag, false, "run tests using race builds") testCmd.Flags().Bool(ignoreCacheFlag, false, "ignore cached test runs") - testCmd.Flags().String(rewriteFlag, "", "argument to pass to underlying (only applicable for certain tests, e.g. logic and datadriven tests). If unspecified, -rewrite will be passed to the test binary.") + testCmd.Flags().String(rewriteFlag, "", "argument to pass to underlying test binary (only applicable to certain tests)") testCmd.Flags().String(rewriteArgFlag, "", "additional argument to pass to -rewrite (implies --rewrite)") testCmd.Flags().Lookup(rewriteFlag).NoOptDefVal = "-rewrite" + testCmd.Flags().String(vModuleFlag, "", "comma-separated list of pattern=N settings for file-filtered logging") return testCmd } @@ -89,6 +94,8 @@ func (d *dev) test(cmd *cobra.Command, commandLine []string) error { timeout = mustGetFlagDuration(cmd, timeoutFlag) verbose = mustGetFlagBool(cmd, vFlag) showLogs = mustGetFlagBool(cmd, showLogsFlag) + count = mustGetFlagInt(cmd, countFlag) + vModule = mustGetFlagString(cmd, vModuleFlag) ) var args []string @@ -227,6 +234,15 @@ func (d *dev) test(cmd *cobra.Command, commandLine []string) error { if showLogs { args = append(args, "--test_arg", "-show-logs") } + if count != 1 { + args = append(args, "--test_arg", fmt.Sprintf("-test.count=%d", count)) + } + if vModule != "" { + args = append(args, "--test_arg", fmt.Sprintf("-vmodule=%s", vModule)) + } + // TODO(irfansharif): Support --go-flags, to pass in an arbitrary set of + // flags into the go test binaries. Gives better coverage to everything + // listed under `go help testflags`. { // Handle test output flags. testOutputArgs := []string{"--test_output", "errors"} diff --git a/pkg/cmd/dev/testdata/build.txt b/pkg/cmd/dev/testdata/build.txt index 3f0c9f6bac0e..1adbfca400a9 100644 --- a/pkg/cmd/dev/testdata/build.txt +++ b/pkg/cmd/dev/testdata/build.txt @@ -44,7 +44,7 @@ rm go/src/github.com/cockroachdb/cockroach/cockroach-short ln -s /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/pkg/cmd/cockroach-short/cockroach-short_/cockroach-short go/src/github.com/cockroachdb/cockroach/cockroach-short git status --ignored --short go/src/github.com/cockroachdb/cockroach/pkg rm pkg/file_to_delete.go -find /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach -name *.go +bazel aquery --output=jsonproto //:go_path cat go/src/github.com/cockroachdb/cockroach/build/bazelutil/checked_in_genfiles.txt cp /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach/pkg/kv/kvserver/storage_services.pb.go go/src/github.com/cockroachdb/cockroach/pkg/kv/kvserver/storage_services.pb.go cp /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach/pkg/roachpb/batch_generated-gen.go go/src/github.com/cockroachdb/cockroach/pkg/roachpb/batch_generated.go diff --git a/pkg/cmd/dev/testdata/generate.txt b/pkg/cmd/dev/testdata/generate.txt index 30c6d4655507..f5b2a5bd0820 100644 --- a/pkg/cmd/dev/testdata/generate.txt +++ b/pkg/cmd/dev/testdata/generate.txt @@ -26,7 +26,7 @@ bazel info workspace --color=no bazel info bazel-bin --color=no git status --ignored --short go/src/github.com/cockroachdb/cockroach/pkg rm pkg/file_to_delete.go -find /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach -name *.go +bazel aquery --output=jsonproto //:go_path cat go/src/github.com/cockroachdb/cockroach/build/bazelutil/checked_in_genfiles.txt cp /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach/pkg/kv/kvserver/storage_services.pb.go go/src/github.com/cockroachdb/cockroach/pkg/kv/kvserver/storage_services.pb.go cp /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach/pkg/roachpb/batch_generated-gen.go go/src/github.com/cockroachdb/cockroach/pkg/roachpb/batch_generated.go diff --git a/pkg/cmd/dev/testdata/recording/build.txt b/pkg/cmd/dev/testdata/recording/build.txt index e78a025980c3..48a7b0a5b3eb 100644 --- a/pkg/cmd/dev/testdata/recording/build.txt +++ b/pkg/cmd/dev/testdata/recording/build.txt @@ -112,13 +112,109 @@ git status --ignored --short go/src/github.com/cockroachdb/cockroach/pkg rm pkg/file_to_delete.go ---- -find /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach -name *.go ----- ----- -/private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach/pkg/kv/kvserver/storage_services.pb.go -/private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach/pkg/roachpb/batch_generated-gen.go -/private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach/pkg/sql/opt/optgen/lang/expr-gen.og.go -/private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach/pkg/sql/opt/optgen/lang/operator-gen.og.go +bazel aquery --output=jsonproto //:go_path +---- +---- +{ + "artifacts": [{ + "id": 1, + "pathFragmentId": 1 + }, { + "id": 2, + "pathFragmentId": 13 + }, { + "id": 3, + "pathFragmentId": 15 + }, { + "id": 4, + "pathFragmentId": 20 + }], + "configuration": [{ + "id": 1, + "mnemonic": "darwin-fastbuild", + "platformName": "darwin" + }], + "pathFragments": [{ + "id": 1, + "label": "storage_services.pb.go", + "parentId": 2 + }, { + "id": 2, + "label": "kvserver", + "parentId": 3 + }, { + "id": 3, + "label": "kv", + "parentId": 4 + }, { + "id": 4, + "label": "pkg", + "parentId": 5 + }, { + "id": 5, + "label": "cockroach", + "parentId": 6 + }, { + "id": 6, + "label": "cockroachdb", + "parentId": 7 + }, { + "id": 7, + "label": "github.com", + "parentId": 8 + }, { + "id": 8, + "label": "src", + "parentId": 9 + }, { + "id": 9, + "label": "go_path", + "parentId": 10 + }, { + "id": 10, + "label": "bin", + "parentId": 11 + }, { + "id": 11, + "label": "darwin-fastbuild", + "parentId": 12 + }, { + "id": 12, + "label": "bazel-out" + }, { + "id": 13, + "label": "batch_generated-gen.go", + "parentId": 14 + }, { + "id": 14, + "label": "roachpb", + "parentId": 4 + }, { + "id": 15, + "label": "expr-gen.og.go", + "parentId": 16 + }, { + "id": 16, + "label": "lang", + "parentId": 17 + }, { + "id": 17, + "label": "optgen", + "parentId": 18 + }, { + "id": 18, + "label": "opt", + "parentId": 19 + }, { + "id": 19, + "label": "sql", + "parentId": 4 + }, { + "id": 20, + "label": "operator-gen.og.go", + "parentId": 16 + }] +} ---- ---- diff --git a/pkg/cmd/dev/testdata/recording/generate.txt b/pkg/cmd/dev/testdata/recording/generate.txt index f63082a250e3..24a906fbedee 100644 --- a/pkg/cmd/dev/testdata/recording/generate.txt +++ b/pkg/cmd/dev/testdata/recording/generate.txt @@ -128,13 +128,109 @@ git status --ignored --short go/src/github.com/cockroachdb/cockroach/pkg rm pkg/file_to_delete.go ---- -find /private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach -name *.go ----- ----- -/private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach/pkg/kv/kvserver/storage_services.pb.go -/private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach/pkg/roachpb/batch_generated-gen.go -/private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach/pkg/sql/opt/optgen/lang/expr-gen.og.go -/private/var/tmp/_bazel/99e666e4e674209ecdb66b46371278df/execroot/cockroach/bazel-out/darwin-fastbuild/bin/go_path/src/github.com/cockroachdb/cockroach/pkg/sql/opt/optgen/lang/operator-gen.og.go +bazel aquery --output=jsonproto //:go_path +---- +---- +{ + "artifacts": [{ + "id": 1, + "pathFragmentId": 1 + }, { + "id": 2, + "pathFragmentId": 13 + }, { + "id": 3, + "pathFragmentId": 15 + }, { + "id": 4, + "pathFragmentId": 20 + }], + "configuration": [{ + "id": 1, + "mnemonic": "darwin-fastbuild", + "platformName": "darwin" + }], + "pathFragments": [{ + "id": 1, + "label": "storage_services.pb.go", + "parentId": 2 + }, { + "id": 2, + "label": "kvserver", + "parentId": 3 + }, { + "id": 3, + "label": "kv", + "parentId": 4 + }, { + "id": 4, + "label": "pkg", + "parentId": 5 + }, { + "id": 5, + "label": "cockroach", + "parentId": 6 + }, { + "id": 6, + "label": "cockroachdb", + "parentId": 7 + }, { + "id": 7, + "label": "github.com", + "parentId": 8 + }, { + "id": 8, + "label": "src", + "parentId": 9 + }, { + "id": 9, + "label": "go_path", + "parentId": 10 + }, { + "id": 10, + "label": "bin", + "parentId": 11 + }, { + "id": 11, + "label": "darwin-fastbuild", + "parentId": 12 + }, { + "id": 12, + "label": "bazel-out" + }, { + "id": 13, + "label": "batch_generated-gen.go", + "parentId": 14 + }, { + "id": 14, + "label": "roachpb", + "parentId": 4 + }, { + "id": 15, + "label": "expr-gen.og.go", + "parentId": 16 + }, { + "id": 16, + "label": "lang", + "parentId": 17 + }, { + "id": 17, + "label": "optgen", + "parentId": 18 + }, { + "id": 18, + "label": "opt", + "parentId": 19 + }, { + "id": 19, + "label": "sql", + "parentId": 4 + }, { + "id": 20, + "label": "operator-gen.og.go", + "parentId": 16 + }] +} ---- ---- diff --git a/pkg/cmd/dev/util.go b/pkg/cmd/dev/util.go index c5f19fa5df12..98d8a6548e99 100644 --- a/pkg/cmd/dev/util.go +++ b/pkg/cmd/dev/util.go @@ -57,6 +57,14 @@ func mustGetFlagBool(cmd *cobra.Command, name string) bool { return val } +func mustGetFlagInt(cmd *cobra.Command, name string) int { + val, err := cmd.Flags().GetInt(name) + if err != nil { + log.Fatalf("unexpected error: %v", err) + } + return val +} + func mustGetFlagDuration(cmd *cobra.Command, name string) time.Duration { val, err := cmd.Flags().GetDuration(name) if err != nil { diff --git a/pkg/cmd/roachtest/tests/pgjdbc_blocklist.go b/pkg/cmd/roachtest/tests/pgjdbc_blocklist.go index ee7abb943655..105382d20f80 100644 --- a/pkg/cmd/roachtest/tests/pgjdbc_blocklist.go +++ b/pkg/cmd/roachtest/tests/pgjdbc_blocklist.go @@ -21,132 +21,80 @@ var pgjdbcBlocklists = blocklistsForVersion{ // After a failed run, an updated version of this blocklist should be available // in the test log. var pgjdbcBlockList22_1 = blocklist{ - "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testDeepInternalsBatchedQueryDecorator": "26508", - "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testUnspecifiedParameterType": "26508", - "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testVaryingTypeCounts": "26508", - "org.postgresql.test.jdbc2.ArrayTest.testEscaping[binary = FORCE]": "32552", - "org.postgresql.test.jdbc2.ArrayTest.testEscaping[binary = REGULAR]": "32552", - "org.postgresql.test.jdbc2.ArrayTest.testIndexAccess[binary = FORCE]": "32552", - "org.postgresql.test.jdbc2.ArrayTest.testIndexAccess[binary = REGULAR]": "32552", - "org.postgresql.test.jdbc2.ArrayTest.testMultiDimensionalArray[binary = FORCE]": "32552", - "org.postgresql.test.jdbc2.ArrayTest.testMultiDimensionalArray[binary = REGULAR]": "32552", - "org.postgresql.test.jdbc2.ArrayTest.testNonStandardBounds[binary = FORCE]": "41405", - "org.postgresql.test.jdbc2.ArrayTest.testNonStandardBounds[binary = REGULAR]": "41405", - "org.postgresql.test.jdbc2.ArrayTest.testNonStandardDelimiter[binary = FORCE]": "21286", - "org.postgresql.test.jdbc2.ArrayTest.testNonStandardDelimiter[binary = REGULAR]": "21286", - "org.postgresql.test.jdbc2.ArrayTest.testNullValues[binary = FORCE]": "26925", - "org.postgresql.test.jdbc2.ArrayTest.testNullValues[binary = REGULAR]": "26925", - "org.postgresql.test.jdbc2.ArrayTest.testRecursiveResultSets[binary = FORCE]": "32552", - "org.postgresql.test.jdbc2.ArrayTest.testRecursiveResultSets[binary = REGULAR]": "32552", - "org.postgresql.test.jdbc2.ArrayTest.testRetrieveArrays[binary = FORCE]": "41405", - "org.postgresql.test.jdbc2.ArrayTest.testRetrieveArrays[binary = REGULAR]": "41405", - "org.postgresql.test.jdbc2.ArrayTest.testRetrieveResultSets[binary = FORCE]": "41405", - "org.postgresql.test.jdbc2.ArrayTest.testRetrieveResultSets[binary = REGULAR]": "41405", - "org.postgresql.test.jdbc2.ArrayTest.testSetArray[binary = FORCE]": "26925", - "org.postgresql.test.jdbc2.ArrayTest.testSetArray[binary = REGULAR]": "26925", - "org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveArraysObjects[binary = FORCE]": "26925", - "org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveArraysObjects[binary = REGULAR]": "26925", - "org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveObjects[binary = FORCE]": "26925", - "org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveObjects[binary = REGULAR]": "26925", - "org.postgresql.test.jdbc2.ArrayTest.testStringEscaping[binary = FORCE]": "unknown", - "org.postgresql.test.jdbc2.ArrayTest.testStringEscaping[binary = REGULAR]": "unknown", - "org.postgresql.test.jdbc2.ArrayTest.testUnknownArrayType[binary = FORCE]": "unknown", - "org.postgresql.test.jdbc2.ArrayTest.testUnknownArrayType[binary = REGULAR]": "unknown", - "org.postgresql.test.jdbc2.ArrayTest.testWriteMultiDimensional[binary = FORCE]": "32552", - "org.postgresql.test.jdbc2.ArrayTest.testWriteMultiDimensional[binary = REGULAR]": "32552", - "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = FORCE, insertRewrite = false]": "41513", - "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = FORCE, insertRewrite = true]": "41513", - "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = REGULAR, insertRewrite = false]": "41513", - "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = REGULAR, insertRewrite = true]": "41513", - "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = FORCE, insertRewrite = false]": "26366", - "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = FORCE, insertRewrite = true]": "26366", - "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = REGULAR, insertRewrite = false]": "26366", - "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = REGULAR, insertRewrite = true]": "26366", - "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = FORCE, insertRewrite = false]": "31463", - "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = FORCE, insertRewrite = true]": "31463", - "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = false]": "31463", - "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = true]": "31463", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = false]": "44803", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = FORCE, insertRewrite = true]": "44803", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = false]": "44803", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatchThrowsAutoCommit[binary = REGULAR, insertRewrite = true]": "44803", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = FORCE, insertRewrite = false]": "40195", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = FORCE, insertRewrite = true]": "40195", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = REGULAR, insertRewrite = false]": "40195", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = REGULAR, insertRewrite = true]": "40195", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = FORCE, insertRewrite = false]": "44803", - "org.postgresql.test.jdbc2.BatchExecuteTest.testSmallBatchUpdateFailureSimple[binary = REGULAR, insertRewrite = false]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[105: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[107: batchTest(mode=FAIL_VIA_DUP_KEY, position=SECOND_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[112: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[113: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[114: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[115: batchTest(mode=FAIL_VIA_DUP_KEY, position=MIDDLE, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[120: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[121: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[122: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[123: batchTest(mode=FAIL_VIA_DUP_KEY, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[128: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[129: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[130: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[131: batchTest(mode=FAIL_VIA_DUP_KEY, position=LAST_ROW, autoCommit=YES, batchType=PREPARED, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[16: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[17: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[18: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[19: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[24: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[25: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[26: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[27: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[32: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[33: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[34: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[35: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[40: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[41: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[42: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[43: batchTest(mode=FAIL_VIA_SELECT_PARSE, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[56: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[57: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[58: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[59: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=SECOND_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[64: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[65: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[66: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[67: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=MIDDLE, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[72: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[73: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[74: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[75: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=ALMOST_LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[80: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[81: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=REGULAR, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[82: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=true)]": "44803", - "org.postgresql.test.jdbc2.BatchFailureTest.run[83: batchTest(mode=FAIL_VIA_SELECT_RUNTIME, position=LAST_ROW, autoCommit=YES, batchType=SIMPLE, generateKeys=YES, binary=FORCE, insertRewrite=false)]": "44803", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[3: autoCommit=NO, binary=FORCE]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test32000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test32000Binds[3: autoCommit=NO, binary=FORCE]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenBatchStatementWithFixedParameter[2: autoCommit=NO, binary=REGULAR]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenBatchStatementWithFixedParameter[3: autoCommit=NO, binary=FORCE]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenBatchStatementWithFixedParametersOnly[2: autoCommit=NO, binary=REGULAR]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenBatchStatementWithFixedParametersOnly[3: autoCommit=NO, binary=FORCE]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenRepeatedInsertStatementOptimizationEnabled[2: autoCommit=NO, binary=REGULAR]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenRepeatedInsertStatementOptimizationEnabled[3: autoCommit=NO, binary=FORCE]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBindsInNestedParens[2: autoCommit=NO, binary=REGULAR]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBindsInNestedParens[3: autoCommit=NO, binary=FORCE]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testConsistentOutcome[2: autoCommit=NO, binary=REGULAR]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testConsistentOutcome[3: autoCommit=NO, binary=FORCE]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testINSERTwithNamedColumnsNotBroken[2: autoCommit=NO, binary=REGULAR]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testINSERTwithNamedColumnsNotBroken[3: autoCommit=NO, binary=FORCE]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testMixedCaseInSeRtStatement[2: autoCommit=NO, binary=REGULAR]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testMixedCaseInSeRtStatement[3: autoCommit=NO, binary=FORCE]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testMultiValues1bind[2: autoCommit=NO, binary=REGULAR]": "26508", - "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testMultiValues1bind[3: autoCommit=NO, binary=FORCE]": "26508", - "org.postgresql.test.jdbc2.BlobTest.testGetBytesOffset": "26725", - "org.postgresql.test.jdbc2.BlobTest.testLargeLargeObject": "26725", - "org.postgresql.test.jdbc2.BlobTest.testMarkResetStream": "26725", - "org.postgresql.test.jdbc2.BlobTest.testMultipleStreams": "26725", - "org.postgresql.test.jdbc2.BlobTest.testParallelStreams": "26725", + "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testDeepInternalsBatchedQueryDecorator": "26508", + "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testUnspecifiedParameterType": "26508", + "org.postgresql.jdbc.DeepBatchedInsertStatementTest.testVaryingTypeCounts": "26508", + "org.postgresql.test.jdbc2.ArrayTest.testEscaping[binary = FORCE]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testEscaping[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testIndexAccess[binary = FORCE]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testIndexAccess[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testMultiDimensionalArray[binary = FORCE]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testMultiDimensionalArray[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testNonStandardBounds[binary = FORCE]": "41405", + "org.postgresql.test.jdbc2.ArrayTest.testNonStandardBounds[binary = REGULAR]": "41405", + "org.postgresql.test.jdbc2.ArrayTest.testNonStandardDelimiter[binary = FORCE]": "21286", + "org.postgresql.test.jdbc2.ArrayTest.testNonStandardDelimiter[binary = REGULAR]": "21286", + "org.postgresql.test.jdbc2.ArrayTest.testNullValues[binary = FORCE]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testNullValues[binary = REGULAR]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testRecursiveResultSets[binary = FORCE]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testRecursiveResultSets[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testRetrieveArrays[binary = FORCE]": "41405", + "org.postgresql.test.jdbc2.ArrayTest.testRetrieveArrays[binary = REGULAR]": "41405", + "org.postgresql.test.jdbc2.ArrayTest.testRetrieveResultSets[binary = FORCE]": "41405", + "org.postgresql.test.jdbc2.ArrayTest.testRetrieveResultSets[binary = REGULAR]": "41405", + "org.postgresql.test.jdbc2.ArrayTest.testSetArray[binary = FORCE]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testSetArray[binary = REGULAR]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveArraysObjects[binary = FORCE]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveArraysObjects[binary = REGULAR]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveObjects[binary = FORCE]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testSetPrimitiveObjects[binary = REGULAR]": "26925", + "org.postgresql.test.jdbc2.ArrayTest.testStringEscaping[binary = FORCE]": "unknown", + "org.postgresql.test.jdbc2.ArrayTest.testStringEscaping[binary = REGULAR]": "unknown", + "org.postgresql.test.jdbc2.ArrayTest.testUnknownArrayType[binary = FORCE]": "unknown", + "org.postgresql.test.jdbc2.ArrayTest.testUnknownArrayType[binary = REGULAR]": "unknown", + "org.postgresql.test.jdbc2.ArrayTest.testWriteMultiDimensional[binary = FORCE]": "32552", + "org.postgresql.test.jdbc2.ArrayTest.testWriteMultiDimensional[binary = REGULAR]": "32552", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = FORCE, insertRewrite = false]": "41513", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = FORCE, insertRewrite = true]": "41513", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = REGULAR, insertRewrite = false]": "41513", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes[binary = REGULAR, insertRewrite = true]": "41513", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = FORCE, insertRewrite = false]": "26366", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = FORCE, insertRewrite = true]": "26366", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = REGULAR, insertRewrite = false]": "26366", + "org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithEmbeddedNulls[binary = REGULAR, insertRewrite = true]": "26366", + "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = FORCE, insertRewrite = false]": "31463", + "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = FORCE, insertRewrite = true]": "31463", + "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = false]": "31463", + "org.postgresql.test.jdbc2.BatchExecuteTest.testMixedBatch[binary = REGULAR, insertRewrite = true]": "31463", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = FORCE, insertRewrite = false]": "40195", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = FORCE, insertRewrite = true]": "40195", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = REGULAR, insertRewrite = false]": "40195", + "org.postgresql.test.jdbc2.BatchExecuteTest.testSelectInBatch[binary = REGULAR, insertRewrite = true]": "40195", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test17000Binds[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test32000Binds[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.test32000Binds[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenBatchStatementWithFixedParameter[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenBatchStatementWithFixedParameter[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenBatchStatementWithFixedParametersOnly[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenBatchStatementWithFixedParametersOnly[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenRepeatedInsertStatementOptimizationEnabled[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBatchWithReWrittenRepeatedInsertStatementOptimizationEnabled[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBindsInNestedParens[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testBindsInNestedParens[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testConsistentOutcome[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testConsistentOutcome[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testINSERTwithNamedColumnsNotBroken[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testINSERTwithNamedColumnsNotBroken[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testMixedCaseInSeRtStatement[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testMixedCaseInSeRtStatement[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testMultiValues1bind[2: autoCommit=NO, binary=REGULAR]": "26508", + "org.postgresql.test.jdbc2.BatchedInsertReWriteEnabledTest.testMultiValues1bind[3: autoCommit=NO, binary=FORCE]": "26508", + "org.postgresql.test.jdbc2.BlobTest.testGetBytesOffset": "26725", + "org.postgresql.test.jdbc2.BlobTest.testLargeLargeObject": "26725", + "org.postgresql.test.jdbc2.BlobTest.testMarkResetStream": "26725", + "org.postgresql.test.jdbc2.BlobTest.testMultipleStreams": "26725", + "org.postgresql.test.jdbc2.BlobTest.testParallelStreams": "26725", "org.postgresql.test.jdbc2.BlobTest.testSet": "26725", "org.postgresql.test.jdbc2.BlobTest.testSetNull": "26725", "org.postgresql.test.jdbc2.BlobTest.testUploadBlob_LOOP": "26725", diff --git a/pkg/cmd/roachtest/tests/predecessor_version.go b/pkg/cmd/roachtest/tests/predecessor_version.go index 5f6bf6798c28..b097540d0a44 100644 --- a/pkg/cmd/roachtest/tests/predecessor_version.go +++ b/pkg/cmd/roachtest/tests/predecessor_version.go @@ -36,7 +36,7 @@ func PredecessorVersion(buildVersion version.Version) (string, error) { // checkpoint option enabled to create the missing store directory // fixture (see runVersionUpgrade). verMap := map[string]string{ - "22.1": "21.2.3", + "22.1": "21.2.4", "21.2": "21.1.12", "21.1": "20.2.12", "20.2": "20.1.16", diff --git a/pkg/cmd/roachtest/tests/sequelize.go b/pkg/cmd/roachtest/tests/sequelize.go index 5886033e199d..fc1eeb37bdd1 100644 --- a/pkg/cmd/roachtest/tests/sequelize.go +++ b/pkg/cmd/roachtest/tests/sequelize.go @@ -23,7 +23,7 @@ import ( ) var sequelizeCockroachDBReleaseTagRegex = regexp.MustCompile(`^v(?P\d+)\.(?P\d+)\.(?P\d+)$`) -var supportedSequelizeCockroachDBRelease = "v6.0.3" +var supportedSequelizeCockroachDBRelease = "v6.0.5" // This test runs sequelize's full test suite against a single cockroach node. diff --git a/pkg/cmd/roachtest/tests/tlp.go b/pkg/cmd/roachtest/tests/tlp.go index 5484443feae3..1fbf931d74b7 100644 --- a/pkg/cmd/roachtest/tests/tlp.go +++ b/pkg/cmd/roachtest/tests/tlp.go @@ -49,7 +49,7 @@ func runTLP(ctx context.Context, t test.Test, c cluster.Cluster) { timeout := 10 * time.Minute // Run 10 minute iterations of TLP in a loop for about the entire test, // giving 5 minutes at the end to allow the test to shut down cleanly. - until := time.After(t.Spec().(registry.TestSpec).Timeout - 5*time.Minute) + until := time.After(t.Spec().(*registry.TestSpec).Timeout - 5*time.Minute) done := ctx.Done() c.Put(ctx, t.Cockroach(), "./cockroach") diff --git a/pkg/cmd/teamcity-trigger/main.go b/pkg/cmd/teamcity-trigger/main.go index a261e2e9856c..8e59b03a67a1 100644 --- a/pkg/cmd/teamcity-trigger/main.go +++ b/pkg/cmd/teamcity-trigger/main.go @@ -106,11 +106,11 @@ func runTC(queueBuild func(string, map[string]string)) { } else { opts["env.COCKROACH_KVNEMESIS_STEPS"] = "10000" } - case baseImportPath + "sql/logictest": - // Stress logic tests with reduced parallelism (to avoid overloading the + case baseImportPath + "sql/logictest", baseImportPath + "kv/kvserver": + // Stress heavy with reduced parallelism (to avoid overloading the // machine, see https://github.com/cockroachdb/cockroach/pull/10966). parallelism /= 2 - // Increase logic test timeout. + // Increase test timeout to compensate. testTimeout = 2 * time.Hour maxTime = 3 * time.Hour } diff --git a/pkg/col/coldata/BUILD.bazel b/pkg/col/coldata/BUILD.bazel index f8fe780e140b..f1c330f5f9a4 100644 --- a/pkg/col/coldata/BUILD.bazel +++ b/pkg/col/coldata/BUILD.bazel @@ -25,7 +25,7 @@ go_library( "//pkg/util/duration", "//pkg/util/json", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", ], @@ -43,6 +43,7 @@ go_test( "vec_test.go", ], embed = [":coldata"], + tags = ["no-remote"], deps = [ "//pkg/col/coldatatestutils", "//pkg/sql/colconv", diff --git a/pkg/col/coldata/native_types.go b/pkg/col/coldata/native_types.go index 02a901d062ec..708ab44d3825 100644 --- a/pkg/col/coldata/native_types.go +++ b/pkg/col/coldata/native_types.go @@ -13,7 +13,7 @@ package coldata import ( "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/util/duration" ) @@ -106,8 +106,6 @@ func (c Float64s) Set(idx int, val float64) { c[idx] = val } // Note that this method is usually inlined, but it isn't in case of the merge // joiner generated code (probably because of the size of the functions), so we // don't assert the inlining with the GCAssert linter. -// TODO(yuzefovich): consider whether Get and Set on Decimals should operate on -// pointers to apd.Decimal. func (c Decimals) Set(idx int, val apd.Decimal) { c[idx].Set(&val) } // Set sets the element at index idx of the vector to val. diff --git a/pkg/col/coldata/nulls.go b/pkg/col/coldata/nulls.go index c116396f695b..6d686b0ca880 100644 --- a/pkg/col/coldata/nulls.go +++ b/pkg/col/coldata/nulls.go @@ -340,12 +340,12 @@ func (n *Nulls) SetNullBitmap(bm []byte, size int) { // Or returns a new Nulls vector where NullAt(i) iff n1.NullAt(i) or // n2.NullAt(i). -func (n *Nulls) Or(n2 *Nulls) *Nulls { +func (n Nulls) Or(n2 Nulls) Nulls { // For simplicity, enforce that len(n.nulls) <= len(n2.nulls). if len(n.nulls) > len(n2.nulls) { n, n2 = n2, n } - res := &Nulls{ + res := Nulls{ maybeHasNulls: n.maybeHasNulls || n2.maybeHasNulls, nulls: make([]byte, len(n2.nulls)), } diff --git a/pkg/col/coldata/nulls_test.go b/pkg/col/coldata/nulls_test.go index 7c4a93e5b758..bc7c12b00328 100644 --- a/pkg/col/coldata/nulls_test.go +++ b/pkg/col/coldata/nulls_test.go @@ -161,7 +161,7 @@ func TestNullsSet(t *testing.T) { } for _, withSel := range []bool{false, true} { t.Run(fmt.Sprintf("WithSel=%t", withSel), func(t *testing.T) { - var srcNulls *Nulls + var srcNulls Nulls if withSel { args.Sel = make([]int, BatchSize()) // Make a selection vector with every even index. (This turns nulls10 into @@ -169,10 +169,10 @@ func TestNullsSet(t *testing.T) { for i := range args.Sel { args.Sel[i] = i * 2 } - srcNulls = &nulls10 + srcNulls = nulls10 } else { args.Sel = nil - srcNulls = &nulls5 + srcNulls = nulls5 } for _, destStartIdx := range pos { for _, srcStartIdx := range pos { @@ -241,7 +241,7 @@ func TestNullsOr(t *testing.T) { n1Choice, n2Choice := rng.Intn(len(nullsToChooseFrom)), rng.Intn(len(nullsToChooseFrom)) n1 := nullsToChooseFrom[n1Choice].Slice(0, length1) n2 := nullsToChooseFrom[n2Choice].Slice(0, length2) - or := n1.Or(&n2) + or := n1.Or(n2) require.Equal(t, or.maybeHasNulls, n1.maybeHasNulls || n2.maybeHasNulls) maxLength := length1 if length2 > length1 { diff --git a/pkg/col/coldata/vec.eg.go b/pkg/col/coldata/vec.eg.go index 36b3b3be4655..36c9c3b887b5 100644 --- a/pkg/col/coldata/vec.eg.go +++ b/pkg/col/coldata/vec.eg.go @@ -13,7 +13,7 @@ import ( "fmt" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/types" diff --git a/pkg/col/coldata/vec.go b/pkg/col/coldata/vec.go index 4e468cacb108..d8f025cdf46b 100644 --- a/pkg/col/coldata/vec.go +++ b/pkg/col/coldata/vec.go @@ -128,7 +128,7 @@ type Vec interface { Nulls() *Nulls // SetNulls sets the nulls vector for this column. - SetNulls(*Nulls) + SetNulls(Nulls) // Length returns the length of the slice that is underlying this Vec. Length() int @@ -294,8 +294,8 @@ func (m *memColumn) Nulls() *Nulls { return &m.nulls } -func (m *memColumn) SetNulls(n *Nulls) { - m.nulls = *n +func (m *memColumn) SetNulls(n Nulls) { + m.nulls = n } func (m *memColumn) Length() int { diff --git a/pkg/col/coldata/vec_tmpl.go b/pkg/col/coldata/vec_tmpl.go index 4a8cdff84052..40f9d939f951 100644 --- a/pkg/col/coldata/vec_tmpl.go +++ b/pkg/col/coldata/vec_tmpl.go @@ -24,7 +24,7 @@ package coldata import ( "fmt" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" diff --git a/pkg/col/coldataext/BUILD.bazel b/pkg/col/coldataext/BUILD.bazel index 00557871173f..561256a51fd3 100644 --- a/pkg/col/coldataext/BUILD.bazel +++ b/pkg/col/coldataext/BUILD.bazel @@ -11,13 +11,12 @@ go_library( deps = [ "//pkg/col/coldata", "//pkg/col/typeconv", - "//pkg/sql/catalog/descpb", "//pkg/sql/colexecerror", "//pkg/sql/memsize", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util/encoding", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/col/coldataext/datum_vec.go b/pkg/col/coldataext/datum_vec.go index f4fc5fe1033a..aaca8a057236 100644 --- a/pkg/col/coldataext/datum_vec.go +++ b/pkg/col/coldataext/datum_vec.go @@ -14,13 +14,12 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/col/coldata" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/memsize" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/errors" ) @@ -34,7 +33,7 @@ type datumVec struct { evalCtx *tree.EvalContext scratch []byte - da rowenc.DatumAlloc + da tree.DatumAlloc } var _ coldata.DatumVec = &datumVec{} @@ -59,7 +58,7 @@ func CompareDatum(d, dVec, other interface{}) int { } // Hash returns the hash of the datum as a byte slice. -func Hash(d tree.Datum, da *rowenc.DatumAlloc) []byte { +func Hash(d tree.Datum, da *tree.DatumAlloc) []byte { ed := rowenc.EncDatum{Datum: convertToDatum(d)} // We know that we have tree.Datum, so there will definitely be no need to // decode ed for fingerprinting, so we pass in nil memory account. @@ -134,15 +133,15 @@ func (dv *datumVec) Cap() int { // MarshalAt implements coldata.DatumVec interface. func (dv *datumVec) MarshalAt(appendTo []byte, i int) ([]byte, error) { dv.maybeSetDNull(i) - return rowenc.EncodeTableValue( - appendTo, descpb.ColumnID(encoding.NoColumnID), dv.data[i], dv.scratch, + return valueside.Encode( + appendTo, valueside.NoColumnID, dv.data[i], dv.scratch, ) } // UnmarshalTo implements coldata.DatumVec interface. func (dv *datumVec) UnmarshalTo(i int, b []byte) error { var err error - dv.data[i], _, err = rowenc.DecodeTableValue(&dv.da, dv.t, b) + dv.data[i], _, err = valueside.Decode(&dv.da, dv.t, b) return err } diff --git a/pkg/col/colserde/BUILD.bazel b/pkg/col/colserde/BUILD.bazel index 2a860239c65a..41ba3d8eff68 100644 --- a/pkg/col/colserde/BUILD.bazel +++ b/pkg/col/colserde/BUILD.bazel @@ -43,15 +43,13 @@ go_test( "//pkg/col/coldatatestutils", "//pkg/col/typeconv", "//pkg/settings/cluster", - "//pkg/sql/catalog/descpb", "//pkg/sql/colmem", "//pkg/sql/execinfra", "//pkg/sql/memsize", "//pkg/sql/randgen", - "//pkg/sql/rowenc", + "//pkg/sql/rowenc/valueside", "//pkg/sql/types", "//pkg/testutils", - "//pkg/util/encoding", "//pkg/util/json", "//pkg/util/leaktest", "//pkg/util/mon", @@ -60,7 +58,7 @@ go_test( "@com_github_apache_arrow_go_arrow//:arrow", "@com_github_apache_arrow_go_arrow//array", "@com_github_apache_arrow_go_arrow//memory", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/col/colserde/record_batch_test.go b/pkg/col/colserde/record_batch_test.go index 37633b285ce7..58a4c1077072 100644 --- a/pkg/col/colserde/record_batch_test.go +++ b/pkg/col/colserde/record_batch_test.go @@ -23,18 +23,16 @@ import ( "github.com/apache/arrow/go/arrow" "github.com/apache/arrow/go/arrow/array" "github.com/apache/arrow/go/arrow/memory" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/colserde" "github.com/cockroachdb/cockroach/pkg/col/typeconv" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/memsize" "github.com/cockroachdb/cockroach/pkg/sql/randgen" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils" - "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/json" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/randutil" @@ -218,7 +216,7 @@ func randomDataFromType(rng *rand.Rand, t *types.T, n int, nullProbability float ) for i := range data { d := randgen.RandDatum(rng, t, false /* nullOk */) - data[i], err = rowenc.EncodeTableValue(data[i], descpb.ColumnID(encoding.NoColumnID), d, scratch) + data[i], err = valueside.Encode(data[i], valueside.NoColumnID, d, scratch) if err != nil { panic(err) } diff --git a/pkg/col/typeconv/BUILD.bazel b/pkg/col/typeconv/BUILD.bazel index be0a22bce0f2..4763a3aa9778 100644 --- a/pkg/col/typeconv/BUILD.bazel +++ b/pkg/col/typeconv/BUILD.bazel @@ -9,6 +9,6 @@ go_library( "//pkg/sql/types", "//pkg/util/duration", "//pkg/util/json", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", ], ) diff --git a/pkg/col/typeconv/typeconv.go b/pkg/col/typeconv/typeconv.go index 7b507791081f..9a81a107be36 100644 --- a/pkg/col/typeconv/typeconv.go +++ b/pkg/col/typeconv/typeconv.go @@ -14,7 +14,7 @@ import ( "fmt" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/duration" "github.com/cockroachdb/cockroach/pkg/util/json" diff --git a/pkg/jobs/BUILD.bazel b/pkg/jobs/BUILD.bazel index 3fcf3e68f6e8..ac02388b9b51 100644 --- a/pkg/jobs/BUILD.bazel +++ b/pkg/jobs/BUILD.bazel @@ -45,6 +45,7 @@ go_library( "//pkg/sql/sqlliveness", "//pkg/sql/sqlutil", "//pkg/sql/types", + "//pkg/util", "//pkg/util/envutil", "//pkg/util/hlc", "//pkg/util/log", @@ -60,8 +61,8 @@ go_library( "@com_github_cockroachdb_logtags//:logtags", "@com_github_cockroachdb_redact//:redact", "@com_github_gogo_protobuf//types", - "@com_github_gorhill_cronexpr//:cronexpr", "@com_github_prometheus_client_model//go", + "@com_github_robfig_cron_v3//:cron", "@io_opentelemetry_go_otel//attribute", ], ) @@ -129,13 +130,13 @@ go_test( "//pkg/util/timeutil", "//pkg/util/tracing", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", "@com_github_gogo_protobuf//types", "@com_github_google_go_cmp//cmp", - "@com_github_gorhill_cronexpr//:cronexpr", "@com_github_kr_pretty//:pretty", + "@com_github_robfig_cron_v3//:cron", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", ], diff --git a/pkg/jobs/executor_impl_test.go b/pkg/jobs/executor_impl_test.go index a7414d8e67ec..b092b4bbd54b 100644 --- a/pkg/jobs/executor_impl_test.go +++ b/pkg/jobs/executor_impl_test.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/gorhill/cronexpr" "github.com/stretchr/testify/require" ) @@ -40,7 +39,7 @@ func TestInlineExecutorFailedJobsHandling(t *testing.T) { }{ { onError: jobspb.ScheduleDetails_RETRY_SCHED, - expectedNextRun: cronexpr.MustParse("@daily").Next(h.env.Now()).Round(time.Microsecond), + expectedNextRun: cronMustParse(t, "@daily").Next(h.env.Now()).Round(time.Microsecond), }, { onError: jobspb.ScheduleDetails_RETRY_SOON, diff --git a/pkg/jobs/job_scheduler_test.go b/pkg/jobs/job_scheduler_test.go index b5ff13a50719..1bb8f20104b1 100644 --- a/pkg/jobs/job_scheduler_test.go +++ b/pkg/jobs/job_scheduler_test.go @@ -38,10 +38,16 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/gogo/protobuf/types" - "github.com/gorhill/cronexpr" + "github.com/robfig/cron/v3" "github.com/stretchr/testify/require" ) +func cronMustParse(t *testing.T, s string) cron.Schedule { + e, err := cron.ParseStandard(s) + require.NoError(t, err) + return e +} + func TestJobSchedulerReschedulesRunning(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -75,7 +81,7 @@ func TestJobSchedulerReschedulesRunning(t *testing.T) { })) // Verify the job has expected nextRun time. - expectedRunTime := cronexpr.MustParse("@hourly").Next(h.env.Now()) + expectedRunTime := cronMustParse(t, "@hourly").Next(h.env.Now()) loaded := h.loadSchedule(t, j.ScheduleID()) require.Equal(t, expectedRunTime, loaded.NextRun()) @@ -93,7 +99,7 @@ func TestJobSchedulerReschedulesRunning(t *testing.T) { if wait == jobspb.ScheduleDetails_WAIT { expectedRunTime = h.env.Now().Add(recheckRunningAfter) } else { - expectedRunTime = cronexpr.MustParse("@hourly").Next(h.env.Now()) + expectedRunTime = cronMustParse(t, "@hourly").Next(h.env.Now()) } loaded = h.loadSchedule(t, j.ScheduleID()) require.Equal(t, expectedRunTime, loaded.NextRun()) @@ -133,7 +139,7 @@ func TestJobSchedulerExecutesAfterTerminal(t *testing.T) { })) // Verify the job has expected nextRun time. - expectedRunTime := cronexpr.MustParse("@hourly").Next(h.env.Now()) + expectedRunTime := cronMustParse(t, "@hourly").Next(h.env.Now()) loaded := h.loadSchedule(t, j.ScheduleID()) require.Equal(t, expectedRunTime, loaded.NextRun()) @@ -147,7 +153,7 @@ func TestJobSchedulerExecutesAfterTerminal(t *testing.T) { return s.executeSchedules(ctx, allSchedules, txn) })) - expectedRunTime = cronexpr.MustParse("@hourly").Next(h.env.Now()) + expectedRunTime = cronMustParse(t, "@hourly").Next(h.env.Now()) loaded = h.loadSchedule(t, j.ScheduleID()) require.Equal(t, expectedRunTime, loaded.NextRun()) }) @@ -173,7 +179,7 @@ func TestJobSchedulerExecutesAndSchedulesNextRun(t *testing.T) { })) // Verify the job has expected nextRun time. - expectedRunTime := cronexpr.MustParse("@hourly").Next(h.env.Now()) + expectedRunTime := cronMustParse(t, "@hourly").Next(h.env.Now()) loaded := h.loadSchedule(t, j.ScheduleID()) require.Equal(t, expectedRunTime, loaded.NextRun()) @@ -187,7 +193,7 @@ func TestJobSchedulerExecutesAndSchedulesNextRun(t *testing.T) { return s.executeSchedules(ctx, allSchedules, txn) })) - expectedRunTime = cronexpr.MustParse("@hourly").Next(h.env.Now()) + expectedRunTime = cronMustParse(t, "@hourly").Next(h.env.Now()) loaded = h.loadSchedule(t, j.ScheduleID()) require.Equal(t, expectedRunTime, loaded.NextRun()) } @@ -550,7 +556,7 @@ func TestJobSchedulerRetriesFailed(t *testing.T) { startTime := h.env.Now() execTime := startTime.Add(time.Hour).Add(time.Second) - cron := cronexpr.MustParse("@hourly") + cron := cronMustParse(t, "@hourly") for _, tc := range []struct { onError jobspb.ScheduleDetails_ErrorHandlingBehavior diff --git a/pkg/jobs/jobs.go b/pkg/jobs/jobs.go index 3cd7c6f73745..745cfd51b2f3 100644 --- a/pkg/jobs/jobs.go +++ b/pkg/jobs/jobs.go @@ -26,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/syncutil" @@ -612,7 +613,19 @@ func (j *Job) failed( // a pause-requested job can transition to failed, which may or may not be // acceptable depending on the job. ju.UpdateStatus(StatusFailed) - md.Payload.Error = err.Error() + + // Truncate all errors to avoid large rows in the jobs + // table. + const ( + jobErrMaxRuneCount = 1024 + jobErrTruncatedMarker = " -- TRUNCATED" + ) + errStr := err.Error() + if len(errStr) > jobErrMaxRuneCount { + errStr = util.TruncateString(errStr, jobErrMaxRuneCount) + jobErrTruncatedMarker + } + md.Payload.Error = errStr + md.Payload.FinishedMicros = timeutil.ToUnixMicros(j.registry.clock.Now().GoTime()) ju.UpdatePayload(md.Payload) return nil diff --git a/pkg/jobs/jobs_test.go b/pkg/jobs/jobs_test.go index 1e9b1ab1cbf5..6aaa0a6e6203 100644 --- a/pkg/jobs/jobs_test.go +++ b/pkg/jobs/jobs_test.go @@ -24,7 +24,7 @@ import ( "testing" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs" @@ -1423,6 +1423,20 @@ func TestJobLifecycle(t *testing.T) { t.Fatalf("unexpected: %v", err) } }) + t.Run("huge errors are truncated if marking job as failed", func(t *testing.T) { + hugeErr := strings.Repeat("a", 2048) + truncatedHugeErr := "boom: " + strings.Repeat("a", 1018) + " -- TRUNCATED" + err := errors.Errorf("boom: %s", hugeErr) + job, exp := createDefaultJob() + exp.Error = truncatedHugeErr + if err := job.Failed(ctx, err); err != nil { + t.Fatal(err) + } + if err := exp.verify(job.ID(), jobs.StatusFailed); err != nil { + t.Fatal(err) + } + }) + }) t.Run("cancelable jobs can be paused until finished", func(t *testing.T) { diff --git a/pkg/jobs/jobspb/BUILD.bazel b/pkg/jobs/jobspb/BUILD.bazel index e3efba9db95a..557fbc7b1620 100644 --- a/pkg/jobs/jobspb/BUILD.bazel +++ b/pkg/jobs/jobspb/BUILD.bazel @@ -49,6 +49,7 @@ go_proto_library( "//pkg/clusterversion", "//pkg/roachpb:with-mocks", "//pkg/security", # keep + "//pkg/sql/catalog/catpb", # keep "//pkg/sql/catalog/descpb", "//pkg/sql/schemachanger/scpb", "//pkg/sql/sem/tree", # keep diff --git a/pkg/jobs/jobspb/jobs.proto b/pkg/jobs/jobspb/jobs.proto index 1fd47aeb6d71..2c981b53bfd8 100644 --- a/pkg/jobs/jobspb/jobs.proto +++ b/pkg/jobs/jobspb/jobs.proto @@ -112,27 +112,6 @@ message StreamIngestionProgress { map partition_progress = 2 [(gogoproto.nullable) = false]; } - -message StreamReplicationStatus { - enum StreamStatus { - // Stream is running. Consumers should continue to heartbeat. - STREAM_ACTIVE = 0; - // Stream stopped running. Consumers should stop heartbeating and - // optionally start a new replication stream. - STREAM_INACTIVE = 1; - // Stream replication is paused. Consumers can resume the job and start heartbeating. - STREAM_PAUSED = 2; - // Stream status is unknown. Consumers should retry heartbeating. - UNKNOWN_STREAM_STATUS_RETRY = 4; - } - - StreamStatus stream_status = 1; - - // Current protected timestamp for spans being replicated. It is absent - // when the replication stream is 'STOPPED'. - util.hlc.Timestamp protected_timestamp = 2; -} - message StreamReplicationDetails { // Key spans we are replicating repeated roachpb.Span spans = 1; @@ -418,7 +397,7 @@ message ImportDetails { // If the database being imported into is a multi-region database, then this // field stores the databases' primary region. string database_primary_region = 27 [ - (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.RegionName" + (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb.RegionName" ]; // next val: 28 @@ -493,7 +472,8 @@ message TypeSchemaChangeProgress { // TypeSchemaChangeDetails is the job detail information for the new schema change job. message NewSchemaChangeDetails { - repeated cockroach.sql.schemachanger.scpb.Target targets = 1; + + cockroach.sql.schemachanger.scpb.TargetState target_state = 5 [(gogoproto.nullable) = false]; // BackfillProgress stores the progress for index backfills which may // be ongoing. @@ -539,15 +519,10 @@ message BackfillProgress { // NewSchemaChangeProgress is the persisted progress for the new schema change job. message NewSchemaChangeProgress { - // Tracks the current status of nodes in an ordered manner for - // keeping track of progress. - repeated cockroach.sql.schemachanger.scpb.Status states = 1; - // Statements for the schema changes which this job is currently - // running work for. A transaction can have multiple statements - // for example associated. - repeated cockroach.sql.schemachanger.scpb.Statement statements = 2; - // Authorization information for the schema change. - cockroach.sql.schemachanger.scpb.Authorization authorization = 3; + + // Current tracks the current state of elements in the same order as in the job + // details. + repeated cockroach.sql.schemachanger.scpb.Status current = 1; } // AutoSpanConfigReconciliationDetails is the job detail information for the diff --git a/pkg/jobs/scheduled_job.go b/pkg/jobs/scheduled_job.go index 30c8578f2ccc..cf6a520ffc3a 100644 --- a/pkg/jobs/scheduled_job.go +++ b/pkg/jobs/scheduled_job.go @@ -27,7 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" - "github.com/gorhill/cronexpr" + "github.com/robfig/cron/v3" ) // scheduledJobRecord is a reflective representation of a row in @@ -194,12 +194,13 @@ func (j *ScheduledJob) Frequency() (time.Duration, error) { return 0, errors.Newf( "schedule %d is not periodic", j.rec.ScheduleID) } - expr, err := cronexpr.Parse(j.rec.ScheduleExpr) + expr, err := cron.ParseStandard(j.rec.ScheduleExpr) if err != nil { return 0, errors.Wrapf(err, "parsing schedule expression: %q; it must be a valid cron expression", j.rec.ScheduleExpr) } + next := expr.Next(j.env.Now()) nextNext := expr.Next(next) return nextNext.Sub(next), nil @@ -211,7 +212,7 @@ func (j *ScheduledJob) ScheduleNextRun() error { return errors.Newf( "cannot set next run for schedule %d (empty schedule)", j.rec.ScheduleID) } - expr, err := cronexpr.Parse(j.rec.ScheduleExpr) + expr, err := cron.ParseStandard(j.rec.ScheduleExpr) if err != nil { return errors.Wrapf(err, "parsing schedule expression: %q", j.rec.ScheduleExpr) } diff --git a/pkg/keys/BUILD.bazel b/pkg/keys/BUILD.bazel index 879f9bad3c56..b399bfb762ca 100644 --- a/pkg/keys/BUILD.bazel +++ b/pkg/keys/BUILD.bazel @@ -1,4 +1,5 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//build:STRINGER.bzl", "stringer") go_library( name = "keys", @@ -11,6 +12,7 @@ go_library( "spans.go", "sql.go", "system.go", + ":gen-comment-type-stringer", #keep ], importpath = "github.com/cockroachdb/cockroach/pkg/keys", visibility = ["//visibility:public"], @@ -39,8 +41,14 @@ go_test( "//pkg/util/keysutil", "//pkg/util/leaktest", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", ], ) + +stringer( + name = "gen-comment-type-stringer", + src = "constants.go", + typ = "CommentType", +) diff --git a/pkg/keys/commenttype_string.go b/pkg/keys/commenttype_string.go new file mode 100644 index 000000000000..e060f87f8a74 --- /dev/null +++ b/pkg/keys/commenttype_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer"; DO NOT EDIT. + +package keys + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[DatabaseCommentType-0] + _ = x[TableCommentType-1] + _ = x[ColumnCommentType-2] + _ = x[IndexCommentType-3] + _ = x[SchemaCommentType-4] + _ = x[ConstraintCommentType-5] +} + +const _CommentType_name = "DatabaseCommentTypeTableCommentTypeColumnCommentTypeIndexCommentTypeSchemaCommentTypeConstraintCommentType" + +var _CommentType_index = [...]uint8{0, 19, 35, 52, 68, 85, 106} + +func (i CommentType) String() string { + if i < 0 || i >= CommentType(len(_CommentType_index)-1) { + return "CommentType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _CommentType_name[_CommentType_index[i]:_CommentType_index[i+1]] +} diff --git a/pkg/keys/constants.go b/pkg/keys/constants.go index 4df92b77c917..ae537cc96196 100644 --- a/pkg/keys/constants.go +++ b/pkg/keys/constants.go @@ -407,14 +407,27 @@ const ( TenantUsageTableID = 45 SQLInstancesTableID = 46 SpanConfigurationsTableID = 47 +) + +// CommentType the type of the schema object on which a comment has been +// applied. +type CommentType int - // CommentType is type for system.comments - DatabaseCommentType = 0 - TableCommentType = 1 - ColumnCommentType = 2 - IndexCommentType = 3 - SchemaCommentType = 4 - ConstraintCommentType = 5 +//go:generate stringer --type CommentType + +const ( + // DatabaseCommentType comment on a database. + DatabaseCommentType CommentType = 0 + // TableCommentType comment on a table/view/sequence. + TableCommentType CommentType = 1 + // ColumnCommentType comment on a column. + ColumnCommentType CommentType = 2 + // IndexCommentType comment on an index. + IndexCommentType CommentType = 3 + // SchemaCommentType comment on a schema. + SchemaCommentType CommentType = 4 + // ConstraintCommentType comment on a constraint. + ConstraintCommentType CommentType = 5 ) const ( diff --git a/pkg/keys/printer_test.go b/pkg/keys/printer_test.go index 4a25b82428cb..4ce284200607 100644 --- a/pkg/keys/printer_test.go +++ b/pkg/keys/printer_test.go @@ -21,7 +21,7 @@ import ( "time" "unicode/utf8" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/bitarray" diff --git a/pkg/kv/BUILD.bazel b/pkg/kv/BUILD.bazel index 6bf800ba83cc..2100cf9da850 100644 --- a/pkg/kv/BUILD.bazel +++ b/pkg/kv/BUILD.bazel @@ -37,7 +37,7 @@ go_library( "//pkg/util/timeutil", "//pkg/util/tracing", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/kv/bulk/buffering_adder.go b/pkg/kv/bulk/buffering_adder.go index 055063788dae..f327156a2908 100644 --- a/pkg/kv/bulk/buffering_adder.go +++ b/pkg/kv/bulk/buffering_adder.go @@ -109,6 +109,7 @@ func MakeBulkAdder( disallowShadowingBelow: opts.DisallowShadowingBelow, splitAfter: opts.SplitAndScatterAfter, batchTS: opts.BatchTimestamp, + writeAtBatchTS: opts.WriteAtRequestTime, }, timestamp: timestamp, curBufferSize: opts.MinBufferSize, diff --git a/pkg/kv/bulk/sst_batcher.go b/pkg/kv/bulk/sst_batcher.go index affc8e75a023..42ac83a03326 100644 --- a/pkg/kv/bulk/sst_batcher.go +++ b/pkg/kv/bulk/sst_batcher.go @@ -96,6 +96,9 @@ type SSTBatcher struct { // produced SSTs. batchTS hlc.Timestamp + // writeAtBatchTS is passed to the writeAtBatchTs argument to db.AddSStable. + writeAtBatchTS bool + // The rest of the fields accumulated state as opposed to configuration. Some, // like totalRows, are accumulated _across_ batches and are not reset between // batches when Reset() is called. @@ -135,9 +138,14 @@ func MakeSSTBatcher( settings *cluster.Settings, flushBytes func() int64, disallowShadowingBelow hlc.Timestamp, + writeAtBatchTs bool, ) (*SSTBatcher, error) { b := &SSTBatcher{ - db: db, settings: settings, maxSize: flushBytes, disallowShadowingBelow: disallowShadowingBelow, + db: db, + settings: settings, + maxSize: flushBytes, + disallowShadowingBelow: disallowShadowingBelow, + writeAtBatchTS: writeAtBatchTs, } err := b.Reset(ctx) return b, err @@ -347,7 +355,7 @@ func (b *SSTBatcher) doFlush(ctx context.Context, reason int, nextKey roachpb.Ke } beforeSend := timeutil.Now() - files, err := AddSSTable(ctx, b.db, start, end, b.sstFile.Data(), b.disallowShadowingBelow, b.ms, b.settings, b.batchTS) + files, err := AddSSTable(ctx, b.db, start, end, b.sstFile.Data(), b.disallowShadowingBelow, b.ms, b.settings, b.batchTS, b.writeAtBatchTS) if err != nil { return err } @@ -442,6 +450,7 @@ func AddSSTable( ms enginepb.MVCCStats, settings *cluster.Settings, batchTs hlc.Timestamp, + writeAtBatchTs bool, ) (int, error) { var files int now := timeutil.Now() @@ -489,7 +498,7 @@ func AddSSTable( // This will fail if the range has split but we'll check for that below. err = db.AddSSTable(ctx, item.start, item.end, item.sstBytes, false, /* disallowConflicts */ !item.disallowShadowingBelow.IsEmpty(), item.disallowShadowingBelow, &item.stats, - ingestAsWriteBatch, batchTs, false /* writeAtBatchTs */) + ingestAsWriteBatch, batchTs, writeAtBatchTs) if err == nil { log.VEventf(ctx, 3, "adding %s AddSSTable [%s,%s) took %v", sz(len(item.sstBytes)), item.start, item.end, timeutil.Since(before)) return nil diff --git a/pkg/kv/bulk/sst_batcher_test.go b/pkg/kv/bulk/sst_batcher_test.go index 9e0b01244d7d..562e5d63eacb 100644 --- a/pkg/kv/bulk/sst_batcher_test.go +++ b/pkg/kv/bulk/sst_batcher_test.go @@ -330,7 +330,14 @@ func TestAddBigSpanningSSTWithSplits(t *testing.T) { t.Logf("Adding %dkb sst spanning %d splits from %v to %v", len(sst)/kb, len(splits), start, end) if _, err := bulk.AddSSTable( - ctx, mock, start, end, sst, hlc.Timestamp{}, enginepb.MVCCStats{}, cluster.MakeTestingClusterSettings(), hlc.Timestamp{}, + ctx, mock, + start, end, + sst, + hlc.Timestamp{}, + enginepb.MVCCStats{}, + cluster.MakeTestingClusterSettings(), + hlc.Timestamp{}, + false, ); err != nil { t.Fatal(err) } diff --git a/pkg/kv/kvclient/kvcoord/BUILD.bazel b/pkg/kv/kvclient/kvcoord/BUILD.bazel index 999764845e1a..080a7809ab36 100644 --- a/pkg/kv/kvclient/kvcoord/BUILD.bazel +++ b/pkg/kv/kvclient/kvcoord/BUILD.bazel @@ -138,6 +138,7 @@ go_test( ], data = glob(["testdata/**"]), embed = [":with-mocks"], # keep + tags = ["no-remote"], deps = [ "//build/bazelutil:noop", "//pkg/base", diff --git a/pkg/kv/kvclient/kvcoord/batch.go b/pkg/kv/kvclient/kvcoord/batch.go index 2f741385c34d..7da549d933c0 100644 --- a/pkg/kv/kvclient/kvcoord/batch.go +++ b/pkg/kv/kvclient/kvcoord/batch.go @@ -18,7 +18,7 @@ import ( var emptySpan = roachpb.Span{} -// truncate restricts all requests to the given key range and returns new, +// Truncate restricts all requests to the given key range and returns new, // truncated, requests. All returned requests are "truncated" to the given span, // and requests which are found to not overlap the given span at all are // removed. A mapping of response index to request index is returned. For @@ -27,8 +27,8 @@ var emptySpan = roachpb.Span{} // reqs = Put[a], Put[c], Put[b], // rs = [a,bb], // -// then truncate(reqs,rs) returns (Put[a], Put[b]) and positions [0,2]. -func truncate( +// then Truncate(reqs,rs) returns (Put[a], Put[b]) and positions [0,2]. +func Truncate( reqs []roachpb.RequestUnion, rs roachpb.RSpan, ) ([]roachpb.RequestUnion, []int, error) { truncateOne := func(args roachpb.Request) (bool, roachpb.Span, error) { @@ -191,18 +191,18 @@ func prev(reqs []roachpb.RequestUnion, k roachpb.RKey) (roachpb.RKey, error) { return candidate, nil } -// next gives the left boundary of the union of all requests which don't affect +// Next gives the left boundary of the union of all requests which don't affect // keys less than the given key. Note that the left boundary is inclusive, that // is, the returned RKey is the inclusive left endpoint of the keys the request // should operate on next. // -// Informally, a call `next(reqs, k)` means: we've already executed the parts of +// Informally, a call `Next(reqs, k)` means: we've already executed the parts of // `reqs` that intersect `[KeyMin, k)`; please tell me how far to the right the // next relevant request begins. // // TODO(tschottdorf): again, better on BatchRequest itself, but can't pull // 'keys' into 'proto'. -func next(reqs []roachpb.RequestUnion, k roachpb.RKey) (roachpb.RKey, error) { +func Next(reqs []roachpb.RequestUnion, k roachpb.RKey) (roachpb.RKey, error) { candidate := roachpb.RKeyMax for _, union := range reqs { inner := union.GetInner() diff --git a/pkg/kv/kvclient/kvcoord/batch_test.go b/pkg/kv/kvclient/kvcoord/batch_test.go index b0ccd8f83651..2fae3611dcaf 100644 --- a/pkg/kv/kvclient/kvcoord/batch_test.go +++ b/pkg/kv/kvclient/kvcoord/batch_test.go @@ -193,7 +193,7 @@ func TestBatchPrevNext(t *testing.T) { args.Key, args.EndKey = span.Key, span.EndKey ba.Add(args) } - if next, err := next(ba.Requests, roachpb.RKey(test.key)); err != nil { + if next, err := Next(ba.Requests, roachpb.RKey(test.key)); err != nil { t.Error(err) } else if !bytes.Equal(next, roachpb.Key(test.expFW)) { t.Errorf("next: expected %q, got %q", test.expFW, next) diff --git a/pkg/kv/kvclient/kvcoord/dist_sender.go b/pkg/kv/kvclient/kvcoord/dist_sender.go index 184cc6e1f55a..2fa16df88a8d 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender.go @@ -1318,7 +1318,7 @@ func (ds *DistSender) divideAndSendBatchToRanges( // one, and unless both descriptors are stale, the next descriptor's // StartKey would move us to the beginning of the current range, // resulting in a duplicate scan. - seekKey, err = next(ba.Requests, ri.Desc().EndKey) + seekKey, err = Next(ba.Requests, ri.Desc().EndKey) nextRS.Key = seekKey } if err != nil { @@ -1509,7 +1509,7 @@ func (ds *DistSender) sendPartialBatch( if err != nil { return response{pErr: roachpb.NewError(err)} } - ba.Requests, positions, err = truncate(ba.Requests, rs) + ba.Requests, positions, err = Truncate(ba.Requests, rs) if len(positions) == 0 && err == nil { // This shouldn't happen in the wild, but some tests exercise it. return response{ diff --git a/pkg/kv/kvclient/kvcoord/send_test.go b/pkg/kv/kvclient/kvcoord/send_test.go index 30d4b112a0c1..80a2b197c905 100644 --- a/pkg/kv/kvclient/kvcoord/send_test.go +++ b/pkg/kv/kvclient/kvcoord/send_test.go @@ -91,6 +91,12 @@ func (n Node) UpdateSpanConfigs( panic("unimplemented") } +func (n Node) TenantSettings( + *roachpb.TenantSettingsRequest, roachpb.Internal_TenantSettingsServer, +) error { + panic("unimplemented") +} + // TestSendToOneClient verifies that Send correctly sends a request // to one server using the heartbeat RPC. func TestSendToOneClient(t *testing.T) { diff --git a/pkg/kv/kvclient/kvcoord/transport_test.go b/pkg/kv/kvclient/kvcoord/transport_test.go index 63960f6fa1c5..51ff462b7ed5 100644 --- a/pkg/kv/kvclient/kvcoord/transport_test.go +++ b/pkg/kv/kvclient/kvcoord/transport_test.go @@ -208,3 +208,9 @@ func (m *mockInternalClient) UpdateSpanConfigs( ) (*roachpb.UpdateSpanConfigsResponse, error) { return nil, fmt.Errorf("unsupported UpdateSpanConfigs call") } + +func (m *mockInternalClient) TenantSettings( + context.Context, *roachpb.TenantSettingsRequest, ...grpc.CallOption, +) (roachpb.Internal_TenantSettingsClient, error) { + return nil, fmt.Errorf("unsupported TenantSettings call") +} diff --git a/pkg/kv/kvclient/kvcoord/truncate_test.go b/pkg/kv/kvclient/kvcoord/truncate_test.go index 4bab7cdd144f..04127f59c25d 100644 --- a/pkg/kv/kvclient/kvcoord/truncate_test.go +++ b/pkg/kv/kvclient/kvcoord/truncate_test.go @@ -164,7 +164,7 @@ func TestTruncate(t *testing.T) { t.Errorf("%d: intersection failure: %v", i, err) continue } - reqs, pos, err := truncate(original.Requests, rs) + reqs, pos, err := Truncate(original.Requests, rs) if err != nil || test.err != "" { if !testutils.IsError(err, test.err) { t.Errorf("%d: %v (expected: %q)", i, err, test.err) diff --git a/pkg/kv/kvclient/kvstreamer/BUILD.bazel b/pkg/kv/kvclient/kvstreamer/BUILD.bazel new file mode 100644 index 000000000000..6718c4893851 --- /dev/null +++ b/pkg/kv/kvclient/kvstreamer/BUILD.bazel @@ -0,0 +1,56 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "kvstreamer", + srcs = [ + "avg_response_estimator.go", + "budget.go", + "streamer.go", + ], + importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvstreamer", + visibility = ["//visibility:public"], + deps = [ + "//pkg/keys", + "//pkg/kv", + "//pkg/kv/kvclient/kvcoord:with-mocks", + "//pkg/kv/kvserver/concurrency/lock", + "//pkg/roachpb:with-mocks", + "//pkg/settings", + "//pkg/settings/cluster", + "//pkg/util/admission", + "//pkg/util/mon", + "//pkg/util/quotapool", + "//pkg/util/stop", + "//pkg/util/syncutil", + "@com_github_cockroachdb_errors//:errors", + ], +) + +go_test( + name = "kvstreamer_test", + srcs = [ + "avg_response_estimator_test.go", + "main_test.go", + "streamer_test.go", + ], + embed = [":kvstreamer"], + deps = [ + "//pkg/base", + "//pkg/kv", + "//pkg/kv/kvclient/kvcoord:with-mocks", + "//pkg/kv/kvserver/concurrency/lock", + "//pkg/roachpb:with-mocks", + "//pkg/security", + "//pkg/security/securitytest", + "//pkg/server", + "//pkg/settings/cluster", + "//pkg/testutils", + "//pkg/testutils/serverutils", + "//pkg/testutils/skip", + "//pkg/util/leaktest", + "//pkg/util/log", + "//pkg/util/mon", + "//pkg/util/randutil", + "@com_github_stretchr_testify//require", + ], +) diff --git a/pkg/kv/kvclient/kvstreamer/avg_response_estimator.go b/pkg/kv/kvclient/kvstreamer/avg_response_estimator.go new file mode 100644 index 000000000000..56c45eff01d4 --- /dev/null +++ b/pkg/kv/kvclient/kvstreamer/avg_response_estimator.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package kvstreamer + +// avgResponseEstimator is a helper that estimates the average size of responses +// received by the Streamer. It is **not** thread-safe. +type avgResponseEstimator struct { + // responseBytes tracks the total footprint of all responses that the + // Streamer has already received. + responseBytes int64 + numResponses int64 +} + +// TODO(yuzefovich): use the optimizer-driven estimates. +const initialAvgResponseSize = 1 << 10 // 1KiB + +func (e *avgResponseEstimator) getAvgResponseSize() int64 { + if e.numResponses == 0 { + return initialAvgResponseSize + } + // TODO(yuzefovich): we currently use a simple average over the received + // responses, but it is likely to be suboptimal because it would be unfair + // to "large" batches that come in late (i.e. it would not be reactive + // enough). Consider using another function here. + return e.responseBytes / e.numResponses +} + +// update updates the actual information of the estimator based on numResponses +// responses that took up responseBytes bytes and correspond to a single +// BatchResponse. +func (e *avgResponseEstimator) update(responseBytes int64, numResponses int64) { + e.responseBytes += responseBytes + e.numResponses += numResponses +} diff --git a/pkg/kv/kvclient/kvstreamer/avg_response_estimator_test.go b/pkg/kv/kvclient/kvstreamer/avg_response_estimator_test.go new file mode 100644 index 000000000000..7c3337f59f26 --- /dev/null +++ b/pkg/kv/kvclient/kvstreamer/avg_response_estimator_test.go @@ -0,0 +1,56 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package kvstreamer + +import ( + "math" + "testing" + + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/stretchr/testify/require" +) + +func TestAvgResponseEstimator(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + var e avgResponseEstimator + + // Before receiving any responses, we should be using the initial estimate. + require.Equal(t, int64(initialAvgResponseSize), e.getAvgResponseSize()) + + // Simulate receiving a single response. + firstResponseSize := int64(42) + e.update(firstResponseSize, 1) + // The estimate should now be exactly the size of that single response. + require.Equal(t, firstResponseSize, e.getAvgResponseSize()) + + // Simulate receiving 100 small BatchResponses. + smallResponseSize := int64(63) + for i := 0; i < 100; i++ { + e.update(smallResponseSize*5, 5) + } + // The estimate should now be pretty close to the size of a single response + // in the small BatchResponse. + diff := smallResponseSize - e.getAvgResponseSize() + require.True(t, math.Abs(float64(diff))/float64(smallResponseSize) < 0.05) + + // Now simulate receiving 10 large BatchResponses. + largeResponseSize := int64(17) + for i := 0; i < 10; i++ { + e.update(largeResponseSize*1000, 1000) + } + // The estimate should now be pretty close to the size of a single response + // in the large BatchResponse. + diff = largeResponseSize - e.getAvgResponseSize() + require.True(t, math.Abs(float64(diff))/float64(smallResponseSize) < 0.15) +} diff --git a/pkg/kv/kvclient/kvstreamer/budget.go b/pkg/kv/kvclient/kvstreamer/budget.go new file mode 100644 index 000000000000..e97a8d751e1a --- /dev/null +++ b/pkg/kv/kvclient/kvstreamer/budget.go @@ -0,0 +1,126 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package kvstreamer + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/util/mon" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" +) + +// budget abstracts the memory budget that is provided to the Streamer by its +// client. +// +// This struct is a wrapper on top of mon.BoundAccount because we want to +// support the notion of budget "going in debt". This can occur in a degenerate +// case when a single large row exceeds the provided limit. The Streamer is +// expected to have only a single request in flight in this case. Additionally, +// the budget provides blocking (via waitCh) until it gets out of debt. +type budget struct { + mu struct { + // If the Streamer's mutex also needs to be locked, the budget's mutex + // must be acquired first. + syncutil.Mutex + // acc represents the current reservation of this budget against the + // root memory pool. + acc *mon.BoundAccount + } + // limitBytes is the maximum amount of bytes that this budget should reserve + // against acc, i.e. acc.Used() should not exceed limitBytes. However, in a + // degenerate case of a single large row, the budget can go into debt and + // acc.Used() might exceed limitBytes. + limitBytes int64 + // waitCh is used by the main loop of the workerCoordinator to block until + // available() becomes positive (until some release calls occur). + waitCh chan struct{} +} + +// newBudget creates a new budget with the specified limit. The limit determines +// the maximum amount of memory this budget is allowed to use (i.e. it'll be +// used lazily, as needed). +// +// The budget itself is responsible for staying under the limit, so acc should +// be bound to an unlimited memory monitor. This is needed in order to support +// the case of budget going into debt. Note that although it is an "unlimited +// memory monitor", the monitor is still limited by --max-sql-memory in size +// eventually because all monitors are descendants of the root SQL monitor. +// +// The budget takes ownership of the memory account, and the caller is allowed +// to interact with the account only after canceling the Streamer (because +// memory accounts are not thread-safe). +func newBudget(acc *mon.BoundAccount, limitBytes int64) *budget { + b := budget{ + limitBytes: limitBytes, + waitCh: make(chan struct{}), + } + b.mu.acc = acc + return &b +} + +// available returns how many bytes are currently available in the budget. The +// answer can be negative, in case the Streamer has used un-budgeted memory +// (e.g. one result was very large putting the budget in debt). +// +// Note that it's possible that actually available budget is less than the +// number returned - this might occur if --max-sql-memory root pool is almost +// used up. +func (b *budget) available() int64 { + b.mu.Lock() + defer b.mu.Unlock() + return b.limitBytes - b.mu.acc.Used() +} + +// consume draws bytes from the available budget. An error is returned if the +// root pool budget is used up such that the budget's limit cannot be fully +// reserved. +// - allowDebt indicates whether the budget is allowed to go into debt on this +// consumption. In other words, if allowDebt is true, then acc's reservation is +// allowed to exceed limitBytes (but the error is still returned if the root +// pool budget is exceeded). Note that allowDebt value applies only to this +// consume() call and is not carried forward. +// +// b's mutex should not be held when calling this method. +func (b *budget) consume(ctx context.Context, bytes int64, allowDebt bool) error { + b.mu.Lock() + defer b.mu.Unlock() + return b.consumeLocked(ctx, bytes, allowDebt) +} + +// consumeLocked is the same as consume but assumes that the b's lock is held. +func (b *budget) consumeLocked(ctx context.Context, bytes int64, allowDebt bool) error { + b.mu.AssertHeld() + // If we're asked to not exceed the limit (and the limit is greater than + // five bytes - limits of five bytes or less are treated as a special case + // for "forced disk spilling" scenarios like in logic tests), we have to + // check whether we'll stay within the budget. + if !allowDebt && b.limitBytes > 5 { + if b.mu.acc.Used()+bytes > b.limitBytes { + return mon.MemoryResource.NewBudgetExceededError(bytes, b.mu.acc.Used(), b.limitBytes) + } + } + return b.mu.acc.Grow(ctx, bytes) +} + +// release returns bytes to the available budget. +func (b *budget) release(ctx context.Context, bytes int64) { + b.mu.Lock() + defer b.mu.Unlock() + b.mu.acc.Shrink(ctx, bytes) + if b.limitBytes > b.mu.acc.Used() { + // Since we now have some available budget, we non-blockingly send on + // the wait channel to notify the mainCoordinator about it. + select { + case b.waitCh <- struct{}{}: + default: + } + } +} diff --git a/pkg/kv/kvclient/kvstreamer/main_test.go b/pkg/kv/kvclient/kvstreamer/main_test.go new file mode 100644 index 000000000000..40dc560be5f8 --- /dev/null +++ b/pkg/kv/kvclient/kvstreamer/main_test.go @@ -0,0 +1,31 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package kvstreamer_test + +import ( + "os" + "testing" + + "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/security/securitytest" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/util/randutil" +) + +//go:generate ../../../util/leaktest/add-leaktest.sh *_test.go + +func TestMain(m *testing.M) { + security.SetAssetLoader(securitytest.EmbeddedAssets) + randutil.SeedForTests() + serverutils.InitTestServerFactory(server.TestServerFactory) + os.Exit(m.Run()) +} diff --git a/pkg/kv/kvclient/kvstreamer/streamer.go b/pkg/kv/kvclient/kvstreamer/streamer.go new file mode 100644 index 000000000000..08aa4b00fea6 --- /dev/null +++ b/pkg/kv/kvclient/kvstreamer/streamer.go @@ -0,0 +1,1175 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package kvstreamer + +import ( + "context" + "runtime" + "sort" + "sync" + "unsafe" + + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/util/admission" + "github.com/cockroachdb/cockroach/pkg/util/mon" + "github.com/cockroachdb/cockroach/pkg/util/quotapool" + "github.com/cockroachdb/cockroach/pkg/util/stop" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "github.com/cockroachdb/errors" +) + +// OperationMode describes the mode of operation of the Streamer. +type OperationMode int + +const ( + _ OperationMode = iota + // InOrder is the mode of operation in which the results are delivered in + // the order in which the requests were handed off to the Streamer. This + // mode forces the Streamer to buffer the results it produces through its + // internal parallel execution of the requests. Since the results of the + // concurrent requests can come in an arbitrary order, they are buffered and + // might end up being dropped (resulting in wasted/duplicate work) to make + // space for the results at the front of the line. This would occur when the + // budget limitBytes is reached and the size estimates that lead to too much + // concurrency in the execution were wrong. + InOrder + // OutOfOrder is the mode of operation in which the results are delivered in + // the order in which they're produced. The caller will use the keys field + // of each Result to associate it with the corresponding requests. This mode + // of operation lets the Streamer reuse the memory budget as quickly as + // possible. + OutOfOrder +) + +// Remove an unused warning for now. +// TODO(yuzefovich): remove this when supported. +var _ = InOrder + +// Result describes the result of performing a single KV request. +// +// The recipient of the Result is required to call Release() when the Result is +// not in use any more so that its memory is returned to the Streamer's budget. +type Result struct { + // GetResp and ScanResp represent the response to a request. Only one of the + // two will be populated. + // + // The responses are to be considered immutable; the Streamer might hold on + // to the respective memory. Calling Result.Release() tells the Streamer + // that the response is no longer needed. + GetResp *roachpb.GetResponse + // ScanResp can contain a partial response to a ScanRequest (when Complete + // is false). In that case, there will be a further result with the + // continuation; that result will use the same Key. Notably, SQL rows will + // never be split across multiple results. + ScanResp struct { + *roachpb.ScanResponse + // If the Result represents a scan result, Complete indicates whether + // this is the last response for the respective scan, or if there are + // more responses to come. In any case, ScanResp never contains partial + // rows (i.e. a single row is never split into different Results). + // + // When running in InOrder mode, Results for a single scan will be + // delivered in key order (in addition to results for different scans + // being delivered in request order). When running in OutOfOrder mode, + // Results for a single scan can be delivered out of key order (in + // addition to results for different scans being delivered out of + // request order). + Complete bool + } + // EnqueueKeysSatisfied identifies the requests that this Result satisfies. + // In OutOfOrder mode, a single Result can satisfy multiple identical + // requests. In InOrder mode a Result can only satisfy multiple consecutive + // requests. + EnqueueKeysSatisfied []int + // memoryTok describes the memory reservation of this Result that needs to + // be released back to the budget when the Result is Release()'d. + memoryTok struct { + budget *budget + toRelease int64 + } + // position tracks the ordinal among all originally enqueued requests that + // this result satisfies. See singleRangeBatch.positions for more details. + // + // If Streamer.Enqueue() was called with nil enqueueKeys argument, then + // EnqueueKeysSatisfied will exactly contain position; if non-nil + // enqueueKeys argument was passed, then position is used as an ordinal to + // lookup into enqueueKeys to populate EnqueueKeysSatisfied. + // TODO(yuzefovich): this might need to be []int when non-unique requests + // are supported. + position int +} + +// Hints provides different hints to the Streamer for optimization purposes. +type Hints struct { + // UniqueRequests tells the Streamer that the requests will be unique. As + // such, there's no point in de-duping them or caching results. + UniqueRequests bool +} + +// Release needs to be called by the recipient of the Result exactly once when +// this Result is not needed any more. If this was the last (or only) reference +// to this Result, the memory used by this Result is made available in the +// Streamer's budget. +// +// Internally, Results are refcounted. Multiple Results referencing the same +// GetResp/ScanResp can be returned from separate `GetResults()` calls, and the +// Streamer internally does buffering and caching of Results - which also +// contributes to the refcounts. +func (r Result) Release(ctx context.Context) { + if r.memoryTok.budget != nil { + r.memoryTok.budget.release(ctx, r.memoryTok.toRelease) + } +} + +// Streamer provides a streaming oriented API for reading from the KV layer. At +// the moment the Streamer only works when SQL rows are comprised of a single KV +// (i.e. a single column family). +// TODO(yuzefovich): lift the restriction on a single column family once KV is +// updated so that rows are never split across different BatchResponses when +// TargetBytes limitBytes is exceeded. +// +// The example usage is roughly as follows: +// +// s := NewStreamer(...) +// s.Init(OperationMode, Hints) +// ... +// for needMoreKVs { +// // Check whether there are results to the previously enqueued requests. +// // This will block if no results are available, but there are some +// // enqueued requests. +// results, err := s.GetResults(ctx) +// // err check +// ... +// if len(results) > 0 { +// processResults(results) +// // return to the client +// ... +// // when results are no longer needed, Release() them +// } +// // All previously enqueued requests have already been responded to. +// if moreRequestsToEnqueue { +// err := s.Enqueue(ctx, requests, enqueueKeys) +// // err check +// ... +// } else { +// // done +// ... +// } +// } +// ... +// s.Close() +// +// The Streamer builds on top of the BatchRequest API provided by the DistSender +// and aims to allow for executing the requests in parallel (to improve the +// performance) while setting the memory limits on those requests (for stability +// purposes). +// +// The parallelism is achieved by splitting the incoming requests into +// single-range batches where each such batch will hit a fast-path in the +// DistSender (unless there have been changes to range boundaries). Since these +// batches are executed concurrently, the LeafTxns are used. +// +// The memory limit handling is achieved by the Streamer guessing the size of +// the response for each request and setting TargetBytes accordingly. The +// concurrency of the Streamer is limited by its memory limit. +// +// The Streamer additionally utilizes different optimizations to improve the +// performance: +// - when possible, sorting requests in key order to take advantage of low-level +// Pebble locality optimizations +// - when necessary, buffering the responses received out of order +// - when necessary, caching the responses to short-circuit repeated lookups. +// TODO(yuzefovich): add an optimization of transparent refreshes when there is +// a single Streamer in the local flow. +// TODO(yuzefovich): support pipelining of Enqueue and GetResults calls. +type Streamer struct { + distSender *kvcoord.DistSender + stopper *stop.Stopper + + mode OperationMode + hints Hints + budget *budget + + coordinator workerCoordinator + coordinatorStarted bool + coordinatorCtxCancel context.CancelFunc + + waitGroup sync.WaitGroup + + enqueueKeys []int + + // waitForResults is used to block GetResults() call until some results are + // available. + waitForResults chan struct{} + + mu struct { + // If the budget's mutex also needs to be locked, the budget's mutex + // must be acquired first. + syncutil.Mutex + + avgResponseEstimator avgResponseEstimator + + // requestsToServe contains all single-range sub-requests that have yet + // to be served. + // TODO(yuzefovich): consider using ring.Buffer instead of a slice. + requestsToServe []singleRangeBatch + + // numRangesLeftPerScanRequest tracks how many ranges a particular + // originally enqueued ScanRequest touches, but scanning of those ranges + // isn't complete. It is allocated lazily when the first ScanRequest is + // encountered in Enqueue. + numRangesLeftPerScanRequest []int + + // numEnqueuedRequests tracks the number of the originally enqueued + // requests. + numEnqueuedRequests int + + // numCompleteRequests tracks the number of the originally enqueued + // requests that have already been completed. + numCompleteRequests int + + // numRequestsInFlight tracks the number of single-range batches that + // are currently being served asynchronously (i.e. those that have + // already left requestsToServe queue, but for which we haven't received + // the results yet). + // TODO(yuzefovich): check whether the contention on mu when accessing + // this field is sufficient to justify pulling it out into an atomic. + numRequestsInFlight int + + // results are the results of already completed requests that haven't + // been returned by GetResults() yet. + results []Result + err error + } +} + +// streamerConcurrencyLimit is an upper bound on the number of asynchronous +// requests that a single Streamer can have in flight. The default value for +// this setting is chosen arbitrarily as 1/8th of the default value for the +// senderConcurrencyLimit. +var streamerConcurrencyLimit = settings.RegisterIntSetting( + settings.TenantWritable, + "kv.streamer.concurrency_limit", + "maximum number of asynchronous requests by a single streamer", + max(128, int64(8*runtime.GOMAXPROCS(0))), + settings.NonNegativeInt, +) + +func max(a, b int64) int64 { + if a > b { + return a + } + return b +} + +// NewStreamer creates a new Streamer. +// +// limitBytes determines the maximum amount of memory this Streamer is allowed +// to use (i.e. it'll be used lazily, as needed). The more memory it has, the +// higher its internal concurrency and throughput. +// +// acc should be bound to an unlimited memory monitor, and the Streamer itself +// is responsible for staying under the limitBytes. +// +// The Streamer takes ownership of the memory account, and the caller is allowed +// to interact with the account only after canceling the Streamer (because +// memory accounts are not thread-safe). +func NewStreamer( + distSender *kvcoord.DistSender, + stopper *stop.Stopper, + txn *kv.Txn, + st *cluster.Settings, + lockWaitPolicy lock.WaitPolicy, + limitBytes int64, + acc *mon.BoundAccount, +) *Streamer { + s := &Streamer{ + distSender: distSender, + stopper: stopper, + budget: newBudget(acc, limitBytes), + } + s.coordinator = workerCoordinator{ + s: s, + txn: txn, + lockWaitPolicy: lockWaitPolicy, + requestAdmissionHeader: txn.AdmissionHeader(), + responseAdmissionQ: txn.DB().SQLKVResponseAdmissionQ, + } + // TODO(yuzefovich): consider lazily allocating this IntPool only when + // enqueued requests span multiple batches. + s.coordinator.asyncSem = quotapool.NewIntPool( + "single Streamer async concurrency", + uint64(streamerConcurrencyLimit.Get(&st.SV)), + ) + s.coordinator.mu.hasWork = sync.NewCond(&s.coordinator.mu) + streamerConcurrencyLimit.SetOnChange(&st.SV, func(ctx context.Context) { + s.coordinator.asyncSem.UpdateCapacity(uint64(streamerConcurrencyLimit.Get(&st.SV))) + }) + stopper.AddCloser(s.coordinator.asyncSem.Closer("stopper")) + return s +} + +// Init initializes the Streamer. +// +// OperationMode controls the order in which results are delivered to the +// client. When possible, prefer OutOfOrder mode. +// +// Hints can be used to hint the aggressiveness of the caching policy. In +// particular, it can be used to disable caching when the client knows that all +// looked-up keys are unique (e.g. in the case of an index-join). +func (s *Streamer) Init(mode OperationMode, hints Hints) { + if mode != OutOfOrder { + panic(errors.AssertionFailedf("only OutOfOrder mode is supported")) + } + s.mode = mode + if !hints.UniqueRequests { + panic(errors.AssertionFailedf("only unique requests are currently supported")) + } + s.hints = hints + s.waitForResults = make(chan struct{}, 1) +} + +// Enqueue dispatches multiple requests for execution. Results are delivered +// through the GetResults call. If enqueueKeys is not nil, it needs to contain +// one ID for each request; responses will reference that ID so that the client +// can associate them to the requests. If enqueueKeys is nil, then the responses +// will reference the ordinals of the corresponding requests among reqs. +// +// Multiple requests can specify the same key. In this case, their respective +// responses will also reference the same key. This is useful, for example, for +// "range-based lookup joins" where multiple spans are read in the context of +// the same input-side row (see multiSpanGenerator implementation of +// rowexec.joinReaderSpanGenerator interface for more details). +// +// The Streamer takes over the given requests, will perform the memory +// accounting against its budget and might modify the requests in place. +// +// In InOrder operation mode, responses will be delivered in reqs order. +// +// It is the caller's responsibility to ensure that the memory footprint of reqs +// (i.e. roachpb.Spans inside of the requests) is reasonable. Enqueue will +// return an error if that footprint exceeds the Streamer's limitBytes. The +// exception is made only when a single request is enqueued in order to allow +// the caller to proceed when the key to lookup is arbitrarily large. As a rule +// of thumb though, the footprint of reqs should be on the order of MBs, and not +// tens of MBs. +// +// Currently, enqueuing new requests while there are still requests in progress +// from the previous invocation is prohibited. +// TODO(yuzefovich): lift this restriction and introduce the pipelining. +func (s *Streamer) Enqueue( + ctx context.Context, reqs []roachpb.RequestUnion, enqueueKeys []int, +) (retErr error) { + if !s.coordinatorStarted { + var coordinatorCtx context.Context + coordinatorCtx, s.coordinatorCtxCancel = context.WithCancel(ctx) + s.waitGroup.Add(1) + if err := s.stopper.RunAsyncTask(coordinatorCtx, "streamer-coordinator", s.coordinator.mainLoop); err != nil { + // The new goroutine wasn't spun up, so mainLoop won't get executed + // and we have to decrement the wait group ourselves. + s.waitGroup.Done() + return err + } + s.coordinatorStarted = true + } + + // TODO(yuzefovich): we might want to have more fine-grained lock + // acquisitions once pipelining is implemented. + s.mu.Lock() + locked := true + defer func() { + if retErr != nil { + if !locked { + s.mu.Lock() + locked = true + } + if s.mu.err == nil { + // Set the error so that mainLoop of the worker coordinator + // exits as soon as possible, without issuing any more requests. + s.mu.err = retErr + } + } + if locked { + s.mu.Unlock() + } + }() + + if enqueueKeys != nil && len(enqueueKeys) != len(reqs) { + return errors.AssertionFailedf("invalid enqueueKeys: len(reqs) = %d, len(enqueueKeys) = %d", len(reqs), len(enqueueKeys)) + } + s.enqueueKeys = enqueueKeys + + if s.mu.numEnqueuedRequests != s.mu.numCompleteRequests { + return errors.AssertionFailedf("Enqueue is called before the previous requests have been completed") + } + if len(s.mu.results) > 0 { + return errors.AssertionFailedf("Enqueue is called before the results of the previous requests have been retrieved") + } + + s.mu.numEnqueuedRequests = len(reqs) + s.mu.numCompleteRequests = 0 + + // The minimal key range encompassing all requests contained within. + // Local addressing has already been resolved. + rs, err := keys.Range(reqs) + if err != nil { + return err + } + + // Divide the given requests into single-range batches that are added to + // requestsToServe, and the worker coordinator will then pick those batches + // up to execute asynchronously. + var totalReqsMemUsage int64 + // TODO(yuzefovich): in InOrder mode we need to treat the head-of-the-line + // request differently. + seekKey := rs.Key + const scanDir = kvcoord.Ascending + ri := kvcoord.MakeRangeIterator(s.distSender) + ri.Seek(ctx, seekKey, scanDir) + if !ri.Valid() { + return ri.Error() + } + firstScanRequest := true + for ; ri.Valid(); ri.Seek(ctx, seekKey, scanDir) { + // Truncate the request span to the current range. + singleRangeSpan, err := rs.Intersect(ri.Token().Desc()) + if err != nil { + return err + } + // Find all requests that touch the current range. + singleRangeReqs, positions, err := kvcoord.Truncate(reqs, singleRangeSpan) + if err != nil { + return err + } + for _, pos := range positions { + if _, isScan := reqs[pos].GetInner().(*roachpb.ScanRequest); isScan { + if firstScanRequest { + // We have some ScanRequests, so we have to set up + // numRangesLeftPerScanRequest. + if cap(s.mu.numRangesLeftPerScanRequest) < len(reqs) { + s.mu.numRangesLeftPerScanRequest = make([]int, len(reqs)) + } else { + // We can reuse numRangesLeftPerScanRequest allocated on + // the previous call to Enqueue after we zero it out. + s.mu.numRangesLeftPerScanRequest = s.mu.numRangesLeftPerScanRequest[:len(reqs)] + for n := 0; n < len(s.mu.numRangesLeftPerScanRequest); { + n += copy(s.mu.numRangesLeftPerScanRequest[n:], zeroIntSlice) + } + } + } + s.mu.numRangesLeftPerScanRequest[pos]++ + firstScanRequest = false + } + } + + // TODO(yuzefovich): perform the de-duplication here. + //if !s.hints.UniqueRequests { + //} + + r := singleRangeBatch{ + reqs: singleRangeReqs, + positions: positions, + reqsReservedBytes: requestsMemUsage(singleRangeReqs), + } + totalReqsMemUsage += r.reqsReservedBytes + + if s.mode == OutOfOrder { + // Sort all single-range requests to be in the key order. + sort.Sort(&r) + } + + s.mu.requestsToServe = append(s.mu.requestsToServe, r) + + // Determine next seek key, taking potentially sparse requests into + // consideration. + // + // In next iteration, query next range. + // It's important that we use the EndKey of the current descriptor + // as opposed to the StartKey of the next one: if the former is stale, + // it's possible that the next range has since merged the subsequent + // one, and unless both descriptors are stale, the next descriptor's + // StartKey would move us to the beginning of the current range, + // resulting in a duplicate scan. + seekKey, err = kvcoord.Next(reqs, ri.Desc().EndKey) + rs.Key = seekKey + if err != nil { + return err + } + } + + // Release the Streamer's mutex so that there is no overlap with the + // budget's mutex - the budget's mutex needs to be acquired first in order + // to eliminate a potential deadlock. + s.mu.Unlock() + locked = false + + // Account for the memory used by all the requests. We allow the budget to + // go into debt iff a single request was enqueued. This is needed to support + // the case of arbitrarily large keys - the caller is expected to produce + // requests with such cases one at a time. + allowDebt := len(reqs) == 1 + if err = s.budget.consume(ctx, totalReqsMemUsage, allowDebt); err != nil { + return err + } + + // TODO(yuzefovich): it might be better to notify the coordinator once + // one singleRangeBatch object has been appended to s.mu.requestsToServe. + s.coordinator.mu.hasWork.Signal() + return nil +} + +// GetResults blocks until at least one result is available. If the operation +// mode is OutOfOrder, any result will do, and the caller is expected to examine +// Result.EnqueueKeysSatisfied to understand which request the result +// corresponds to. For InOrder, only head-of-line results will do. Zero-length +// result slice is returned once all enqueued requests have been responded to. +func (s *Streamer) GetResults(ctx context.Context) ([]Result, error) { + s.mu.Lock() + results := s.mu.results + err := s.mu.err + s.mu.results = nil + allComplete := s.mu.numCompleteRequests == s.mu.numEnqueuedRequests + // Non-blockingly clear the waitForResults channel in case we've just picked + // up some results. We do so while holding the mutex so that new results + // aren't appended. + select { + case <-s.waitForResults: + default: + } + s.mu.Unlock() + + if len(results) > 0 || allComplete || err != nil { + return results, err + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-s.waitForResults: + s.mu.Lock() + results = s.mu.results + err = s.mu.err + s.mu.results = nil + s.mu.Unlock() + return results, err + } +} + +// notifyGetResultsLocked non-blockingly sends a message on waitForResults +// channel. This method should be called only while holding the lock of s.mu so +// that other results couldn't be appended which would cause us to miss the +// notification about that. +func (s *Streamer) notifyGetResultsLocked() { + s.mu.AssertHeld() + select { + case s.waitForResults <- struct{}{}: + default: + } +} + +// setError sets the error on the Streamer if no error has been set previously +// and unblocks GetResults() if needed. +// +// The mutex of s must not be already held. +func (s *Streamer) setError(err error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.mu.err == nil { + s.mu.err = err + } + s.notifyGetResultsLocked() +} + +// Close cancels all in-flight operations and releases all of the resources of +// the Streamer. It blocks until all goroutines created by the Streamer exit. No +// other calls on s are allowed after this. +func (s *Streamer) Close() { + if s.coordinatorStarted { + s.coordinatorCtxCancel() + s.coordinator.mu.Lock() + s.coordinator.mu.done = true + // Unblock the coordinator in case it is waiting for more work. + s.coordinator.mu.hasWork.Signal() + s.coordinator.mu.Unlock() + } + s.waitGroup.Wait() + *s = Streamer{} +} + +// getNumRequestsInFlight returns the number of requests that are currently in +// flight. This method should be called without holding the lock of s. +func (s *Streamer) getNumRequestsInFlight() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.mu.numRequestsInFlight +} + +// adjustNumRequestsInFlight updates the number of requests that are currently +// in flight. This method should be called without holding the lock of s. +func (s *Streamer) adjustNumRequestsInFlight(delta int) { + s.mu.Lock() + defer s.mu.Unlock() + s.mu.numRequestsInFlight += delta +} + +// singleRangeBatch contains parts of the originally enqueued requests that have +// been truncated to be within a single range. All requests within the +// singleRangeBatch will be issued as a single BatchRequest. +type singleRangeBatch struct { + reqs []roachpb.RequestUnion + // positions is a 1-to-1 mapping with reqs to indicate which ordinal among + // the originally enqueued requests a particular reqs[i] corresponds to. In + // other words, if reqs[i] is (or a part of) enqueuedReqs[j], then + // positions[i] = j. + // TODO(yuzefovich): this might need to be [][]int when non-unique requests + // are supported. + positions []int + // reqsReservedBytes tracks the memory reservation against the budget for + // the memory usage of reqs. + reqsReservedBytes int64 +} + +var _ sort.Interface = &singleRangeBatch{} + +func (r *singleRangeBatch) Len() int { + return len(r.reqs) +} + +func (r *singleRangeBatch) Swap(i, j int) { + r.reqs[i], r.reqs[j] = r.reqs[j], r.reqs[i] + r.positions[i], r.positions[j] = r.positions[j], r.positions[i] +} + +// Less returns true if r.reqs[i]'s key comes before r.reqs[j]'s key. +func (r *singleRangeBatch) Less(i, j int) bool { + // TODO(yuzefovich): figure out whether it's worth extracting the keys when + // constructing singleRangeBatch object. + return r.reqs[i].GetInner().Header().Key.Compare(r.reqs[j].GetInner().Header().Key) < 0 +} + +type workerCoordinator struct { + s *Streamer + txn *kv.Txn + lockWaitPolicy lock.WaitPolicy + + asyncSem *quotapool.IntPool + + // For request and response admission control. + requestAdmissionHeader roachpb.AdmissionHeader + responseAdmissionQ *admission.WorkQueue + + mu struct { + syncutil.Mutex + hasWork *sync.Cond + // done is set to true once the Streamer is closed meaning the worker + // coordinator must exit. + done bool + } +} + +// mainLoop runs throughout the lifetime of the Streamer (from the first Enqueue +// call until Cancel) and routes the single-range batches for asynchronous +// execution. This function is dividing up the Streamer's budget for each of +// those batches and won't start executing the batches if the available budget +// is insufficient. The function exits when an error is encountered by one of +// the asynchronous requests. +func (w *workerCoordinator) mainLoop(ctx context.Context) { + defer w.s.waitGroup.Done() + for { + // Get next requests to serve. + requestsToServe, avgResponseSize, shouldExit := w.getRequests() + if shouldExit { + return + } + if len(requestsToServe) == 0 { + // If the Streamer isn't closed yet, block until there are enqueued + // requests. + w.mu.Lock() + if !w.mu.done { + w.mu.hasWork.Wait() + } + w.mu.Unlock() + if ctx.Err() != nil { + w.s.setError(ctx.Err()) + return + } + continue + } + + // Now wait until there is enough budget to at least receive one full + // response (but only if there are requests in flight - if there are + // none, then we might have a degenerate case when a single row is + // expected to exceed the budget). + // TODO(yuzefovich): consider using a multiple of avgResponseSize here. + for w.s.getNumRequestsInFlight() > 0 && w.s.budget.available() < avgResponseSize { + select { + case <-w.s.budget.waitCh: + case <-ctx.Done(): + w.s.setError(ctx.Err()) + return + } + } + + err := w.issueRequestsForAsyncProcessing(ctx, requestsToServe, avgResponseSize) + if err != nil { + w.s.setError(err) + return + } + } +} + +// getRequests returns all currently enqueued requests to be served. +// +// A boolean that indicates whether the coordinator should exit is returned. +func (w *workerCoordinator) getRequests() ( + requestsToServe []singleRangeBatch, + avgResponseSize int64, + shouldExit bool, +) { + w.s.mu.Lock() + defer w.s.mu.Unlock() + requestsToServe = w.s.mu.requestsToServe + avgResponseSize = w.s.mu.avgResponseEstimator.getAvgResponseSize() + shouldExit = w.s.mu.err != nil + return requestsToServe, avgResponseSize, shouldExit +} + +// issueRequestsForAsyncProcessing iterates over the given requests and issues +// them to be served asynchronously while there is enough budget available to +// receive the responses. Once the budget is exhausted, no new requests are +// issued, the only exception is made for the case when there are no other +// requests in flight, and in that scenario, a single request will be issued. +// +// It is assumed that requestsToServe is a prefix of w.s.mu.requestsToServe +// (i.e. it is possible that some other requests have been appended to +// w.s.mu.requestsToServe after requestsToServe have been grabbed). All issued +// requests are removed from w.s.mu.requestToServe. +func (w *workerCoordinator) issueRequestsForAsyncProcessing( + ctx context.Context, requestsToServe []singleRangeBatch, avgResponseSize int64, +) error { + var numRequestsIssued int + defer func() { + w.s.mu.Lock() + // We can just slice here since we only append to requestToServe at + // the moment. + w.s.mu.requestsToServe = w.s.mu.requestsToServe[numRequestsIssued:] + w.s.mu.Unlock() + }() + w.s.budget.mu.Lock() + defer w.s.budget.mu.Unlock() + + headOfLine := w.s.getNumRequestsInFlight() == 0 + var budgetIsExhausted bool + for numRequestsIssued < len(requestsToServe) && !budgetIsExhausted { + availableBudget := w.s.budget.limitBytes - w.s.budget.mu.acc.Used() + if availableBudget < avgResponseSize { + if !headOfLine { + // We don't have enough budget available to serve this request, + // and there are other requests in flight, so we'll wait for + // some of them to finish. + break + } + budgetIsExhausted = true + if availableBudget < 1 { + // The budget is already in debt, and we have no requests in + // flight. This occurs when we have very large roachpb.Span in + // the request. In such a case, we still want to make progress + // by giving the smallest TargetBytes possible while asking the + // KV layer to not return an empty response. + availableBudget = 1 + } + } + singleRangeReqs := requestsToServe[numRequestsIssued] + // Calculate what TargetBytes limit to use for the BatchRequest that + // will be issued based on singleRangeReqs. We use the estimate to guess + // how much memory the response will need, and we reserve this + // estimation up front. + // + // Note that TargetBytes will be a strict limit on the response size + // (except in a degenerate case for head-of-the-line request that will + // get a very large single row in response which will exceed this + // limit). + targetBytes := int64(len(singleRangeReqs.reqs)) * avgResponseSize + if targetBytes > availableBudget { + // The estimate tells us that we don't have enough budget to receive + // the full response; however, in order to utilize the available + // budget fully, we can still issue this request with the truncated + // TargetBytes value hoping to receive a partial response. + targetBytes = availableBudget + } + if err := w.s.budget.consumeLocked(ctx, targetBytes, headOfLine /* allowDebt */); err != nil { + // This error cannot be because of the budget going into debt. If + // headOfLine is true, then we're allowing debt; otherwise, we have + // truncated targetBytes above to not exceed availableBudget, and + // we're holding the budget's mutex. Thus, the error indicates that + // the root memory pool has been exhausted. + if !headOfLine { + // There are some requests in flight, so we'll let them finish. + // + // This is opportunistic behavior where we're hoping that once + // other requests are fully processed (i.e. the corresponding + // results are Release()'d), we'll be able to make progress on + // this request too, without exceeding the root memory pool. + // + // We're not really concerned about pushing the node towards the + // OOM situation because we're still staying within the root + // memory pool limit (which should have some safety gap with the + // available RAM). Furthermore, if other queries are consuming + // all of the root memory pool limit, then the head-of-the-line + // request will notice it and will exit accordingly. + break + } + // We don't have any requests in flight, so we'll exit to be safe + // (in order not to OOM the node). Most likely this occurs when + // there are concurrent memory-intensive queries which this Streamer + // has no control over. + // + // We could have issued this head-of-the-line request with lower + // targetBytes value (unless it is already 1), but the fact that the + // root memory pool is exhausted indicates that the node might be + // overloaded already, so it seems better to not ask it to receive + // any more responses at the moment. + return err + } + w.performRequestAsync(ctx, singleRangeReqs, targetBytes, headOfLine) + numRequestsIssued++ + headOfLine = false + } + return nil +} + +// addRequest adds a single-range batch to be processed later. +func (w *workerCoordinator) addRequest(req singleRangeBatch) { + w.s.mu.Lock() + defer w.s.mu.Unlock() + w.s.mu.requestsToServe = append(w.s.mu.requestsToServe, req) + w.mu.hasWork.Signal() +} + +func (w *workerCoordinator) asyncRequestCleanup() { + w.s.adjustNumRequestsInFlight(-1 /* delta */) + w.s.waitGroup.Done() +} + +// performRequestAsync dispatches the given single-range batch for evaluation +// asynchronously. If the batch cannot be evaluated fully (due to exhausting its +// memory limitBytes), the "resume" single-range batch will be added into +// requestsToServe, and mainLoop will pick that up to process later. +// +// targetBytes specifies the memory budget that this single-range batch should +// be issued with. targetBytes bytes have already been consumed from the budget, +// and this amount of memory is owned by the goroutine that is spun up to +// perform the request. Once the response is received, performRequestAsync +// reconciles the budget so that the actual footprint of the response is +// consumed. Each Result produced based on that response will track a part of +// the memory reservation (according to the Result's footprint) that will be +// returned back to the budget once Result.Release() is called. +// +// headOfLine indicates whether this request is the current head of the line. +// Head-of-the-line requests are treated specially in a sense that they are +// allowed to put the budget into debt. The caller is responsible for ensuring +// that there is at most one asynchronous request with headOfLine=true at all +// times. +func (w *workerCoordinator) performRequestAsync( + ctx context.Context, req singleRangeBatch, targetBytes int64, headOfLine bool, +) { + w.s.waitGroup.Add(1) + w.s.adjustNumRequestsInFlight(1 /* delta */) + if err := w.s.stopper.RunAsyncTaskEx( + ctx, + stop.TaskOpts{ + TaskName: "streamer-lookup-async", + Sem: w.asyncSem, + WaitForSem: true, + }, + func(ctx context.Context) { + defer w.asyncRequestCleanup() + var ba roachpb.BatchRequest + ba.Header.WaitPolicy = w.lockWaitPolicy + ba.Header.TargetBytes = targetBytes + ba.Header.TargetBytesAllowEmpty = !headOfLine + // TODO(yuzefovich): consider setting MaxSpanRequestKeys whenever + // applicable (#67885). + ba.AdmissionHeader = w.requestAdmissionHeader + // We always have some memory reserved against the memory account, + // regardless of the value of headOfLine. + ba.AdmissionHeader.NoMemoryReservedAtSource = false + ba.Requests = req.reqs + + // TODO(yuzefovich): in Enqueue we split all requests into + // single-range batches, so ideally ba touches a single range in + // which case we hit the fast path in the DistSender. However, if + // the range boundaries have changed after we performed the split + // (or we had stale range cache at the time of the split), the + // DistSender will transparently re-split ba into several + // sub-batches that will be executed sequentially because of the + // presence of limits. We could, instead, ask the DistSender to not + // perform that re-splitting and return an error, then we'll rely on + // the updated range cache to perform re-splitting ourselves. This + // should offer some performance improvements since we'd eliminate + // unnecessary blocking (due to sequential evaluation of sub-batches + // by the DistSender). For the initial implementation it doesn't + // seem important though. + br, err := w.txn.Send(ctx, ba) + if err != nil { + // TODO(yuzefovich): if err is + // ReadWithinUncertaintyIntervalError and there is only a single + // Streamer in a single local flow, attempt to transparently + // refresh. + w.s.setError(err.GoError()) + return + } + + var resumeReq singleRangeBatch + // We will reuse the slices for the resume spans, if any. + resumeReq.reqs = req.reqs[:0] + resumeReq.positions = req.positions[:0] + var results []Result + var numCompleteGetResponses int + // memoryFootprintBytes tracks the total memory footprint of + // non-empty responses. This will be equal to the sum of memory + // tokens created for all Results. + var memoryFootprintBytes int64 + var hasNonEmptyScanResponse bool + for i, resp := range br.Responses { + enqueueKey := req.positions[i] + if w.s.enqueueKeys != nil { + enqueueKey = w.s.enqueueKeys[req.positions[i]] + } + reply := resp.GetInner() + origReq := req.reqs[i] + // Unset the original request so that we lose the reference to + // the span. + req.reqs[i] = roachpb.RequestUnion{} + switch origRequest := origReq.GetInner().(type) { + case *roachpb.GetRequest: + get := reply.(*roachpb.GetResponse) + if get.ResumeSpan != nil { + // This Get wasn't completed - update the original + // request according to the ResumeSpan and include it + // into the batch again. + origRequest.SetSpan(*get.ResumeSpan) + resumeReq.reqs = append(resumeReq.reqs, origReq) + resumeReq.positions = append(resumeReq.positions, req.positions[i]) + } else { + // This Get was completed. + toRelease := int64(get.Size()) + result := Result{ + GetResp: get, + // This currently only works because all requests + // are unique. + EnqueueKeysSatisfied: []int{enqueueKey}, + position: req.positions[i], + } + result.memoryTok.budget = w.s.budget + result.memoryTok.toRelease = toRelease + memoryFootprintBytes += toRelease + results = append(results, result) + numCompleteGetResponses++ + } + + case *roachpb.ScanRequest: + scan := reply.(*roachpb.ScanResponse) + resumeSpan := scan.ResumeSpan + if len(scan.Rows) > 0 || len(scan.BatchResponses) > 0 { + toRelease := int64(scan.Size()) + result := Result{ + // This currently only works because all requests + // are unique. + EnqueueKeysSatisfied: []int{enqueueKey}, + position: req.positions[i], + } + result.memoryTok.budget = w.s.budget + result.memoryTok.toRelease = toRelease + result.ScanResp.ScanResponse = scan + // Complete field will be set below. + memoryFootprintBytes += toRelease + results = append(results, result) + hasNonEmptyScanResponse = true + } + if resumeSpan != nil { + // This Scan wasn't completed - update the original + // request according to the resumeSpan and include it + // into the batch again. + origRequest.SetSpan(*resumeSpan) + resumeReq.reqs = append(resumeReq.reqs, origReq) + resumeReq.positions = append(resumeReq.positions, req.positions[i]) + } + } + } + + // Now adjust the budget based on the actual memory footprint of + // non-empty responses as well as resume spans, if any. + respOverestimate := targetBytes - memoryFootprintBytes + var reqsMemUsage int64 + if len(resumeReq.reqs) > 0 { + reqsMemUsage = requestsMemUsage(resumeReq.reqs) + } + reqOveraccounted := req.reqsReservedBytes - reqsMemUsage + overaccountedTotal := respOverestimate + reqOveraccounted + if overaccountedTotal >= 0 { + w.s.budget.release(ctx, overaccountedTotal) + } else { + // There is an under-accounting at the moment, so we have to + // increase the memory reservation. + // + // This under-accounting can occur in a couple of edge cases: + // 1) the estimate of the response sizes is pretty good (i.e. + // respOverestimate is around 0), but we received many partial + // responses with ResumeSpans that take up much more space than + // the original requests; + // 2) we have a single large row in the response. In this case + // headOfLine must be true (targetBytes might be 1 or higher, + // but not enough for that large row). + toConsume := -overaccountedTotal + if err := w.s.budget.consume(ctx, toConsume, headOfLine /* allowDebt */); err != nil { + w.s.budget.release(ctx, targetBytes) + if !headOfLine { + // Since this is not the head of the line, we'll just + // discard the result and add the request back to be + // served. + // + // This is opportunistic behavior where we're hoping + // that once other requests are fully processed (i.e. + // the corresponding results are Release()'d), we'll be + // able to make progress on this request too. + // TODO(yuzefovich): consider updating the + // avgResponseSize and/or storing the information about + // the returned bytes size in req. + w.addRequest(req) + return + } + // The error indicates that the root memory pool has been + // exhausted, so we'll exit to be safe (in order not to OOM + // the node). + // TODO(yuzefovich): if the response contains multiple rows, + // consider adding the request back to be served with a note + // to issue it with smaller targetBytes. + w.s.setError(err) + return + } + } + // Update the resume request accordingly. + resumeReq.reqsReservedBytes = reqsMemUsage + + // Do admission control after we've finalized the memory accounting. + if br != nil && w.responseAdmissionQ != nil { + responseAdmission := admission.WorkInfo{ + TenantID: roachpb.SystemTenantID, + Priority: admission.WorkPriority(w.requestAdmissionHeader.Priority), + CreateTime: w.requestAdmissionHeader.CreateTime, + } + if _, err := w.responseAdmissionQ.Admit(ctx, responseAdmission); err != nil { + w.s.setError(err) + return + } + } + + // If we have any results, finalize them. + if len(results) > 0 { + w.finalizeSingleRangeResults( + results, memoryFootprintBytes, hasNonEmptyScanResponse, + numCompleteGetResponses, + ) + } + + // If we have any incomplete requests, add them back into the work + // pool. + if len(resumeReq.reqs) > 0 { + w.addRequest(resumeReq) + } + }); err != nil { + // The new goroutine for the request wasn't spun up, so we have to + // perform the cleanup of this request ourselves. + w.asyncRequestCleanup() + w.s.setError(err) + } +} + +// finalizeSingleRangeResults "finalizes" the results of evaluation of a +// singleRangeBatch. By "finalization" we mean setting Complete field of +// ScanResp to correct value for all scan responses, updating the estimate of an +// average response size, and telling the Streamer about these results. +// +// This method assumes that results has length greater than zero. +func (w *workerCoordinator) finalizeSingleRangeResults( + results []Result, + actualMemoryReservation int64, + hasNonEmptyScanResponse bool, + numCompleteGetResponses int, +) { + w.s.mu.Lock() + defer w.s.mu.Unlock() + + numCompleteResponses := numCompleteGetResponses + // If we have non-empty scan response, it might be complete. This will be + // the case when a scan response doesn't have a resume span and there are no + // other scan requests in flight (involving other ranges) that are part of + // the same original ScanRequest. + // + // We need to do this check as well as adding the results to be returned to + // the client as an atomic operation so that Complete is set to true only on + // the last partial scan response. + if hasNonEmptyScanResponse { + for _, r := range results { + if r.ScanResp.ScanResponse != nil { + if r.ScanResp.ResumeSpan == nil { + // The scan within the range is complete. + w.s.mu.numRangesLeftPerScanRequest[r.position]-- + if w.s.mu.numRangesLeftPerScanRequest[r.position] == 0 { + // The scan across all ranges is now complete too. + r.ScanResp.Complete = true + numCompleteResponses++ + } + } else { + // Unset the ResumeSpan on the result in order to not + // confuse the user of the Streamer. Non-nil resume span was + // already included into resumeReq populated in + // performRequestAsync. + r.ScanResp.ResumeSpan = nil + } + } + } + } + + // Update the average response size based on this batch. + // TODO(yuzefovich): some of the responses might be partial, yet the + // estimator doesn't distinguish the footprint of the full response vs the + // partial one. Think more about this. + w.s.mu.avgResponseEstimator.update(actualMemoryReservation, int64(len(results))) + w.s.mu.numCompleteRequests += numCompleteResponses + // Store the results and non-blockingly notify the Streamer about them. + w.s.mu.results = append(w.s.mu.results, results...) + w.s.notifyGetResultsLocked() +} + +var zeroIntSlice []int + +func init() { + zeroIntSlice = make([]int, 1<<10) +} + +const requestUnionSliceOverhead = int64(unsafe.Sizeof([]roachpb.RequestUnion{})) + +func requestsMemUsage(reqs []roachpb.RequestUnion) int64 { + memUsage := requestUnionSliceOverhead + // Slice up to the capacity to account for everything. + for _, r := range reqs[:cap(reqs)] { + memUsage += int64(r.Size()) + } + return memUsage +} diff --git a/pkg/kv/kvclient/kvstreamer/streamer_test.go b/pkg/kv/kvclient/kvstreamer/streamer_test.go new file mode 100644 index 000000000000..2183cbbd0f42 --- /dev/null +++ b/pkg/kv/kvclient/kvstreamer/streamer_test.go @@ -0,0 +1,271 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package kvstreamer + +import ( + "context" + "math" + "testing" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/mon" + "github.com/cockroachdb/cockroach/pkg/util/randutil" + "github.com/stretchr/testify/require" +) + +func getStreamer( + ctx context.Context, s serverutils.TestServerInterface, limitBytes int64, acc *mon.BoundAccount, +) *Streamer { + return NewStreamer( + s.DistSenderI().(*kvcoord.DistSender), + s.Stopper(), + kv.NewTxn(ctx, s.DB(), s.NodeID()), + cluster.MakeTestingClusterSettings(), + lock.WaitPolicy(0), + limitBytes, + acc, + ) +} + +// TestStreamerLimitations verifies that the streamer panics or encounters +// errors in currently unsupported or invalid scenarios. +func TestStreamerLimitations(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + + getStreamer := func() *Streamer { + return getStreamer(ctx, s, math.MaxInt64, nil /* acc */) + } + + t.Run("InOrder mode unsupported", func(t *testing.T) { + require.Panics(t, func() { + streamer := getStreamer() + streamer.Init(InOrder, Hints{UniqueRequests: true}) + }) + }) + + t.Run("non-unique requests unsupported", func(t *testing.T) { + require.Panics(t, func() { + streamer := getStreamer() + streamer.Init(OutOfOrder, Hints{UniqueRequests: false}) + }) + }) + + t.Run("invalid enqueueKeys", func(t *testing.T) { + streamer := getStreamer() + defer streamer.Close() + streamer.Init(OutOfOrder, Hints{UniqueRequests: true}) + // Use a single request but two keys which is invalid. + reqs := []roachpb.RequestUnion{{Value: &roachpb.RequestUnion_Get{}}} + enqueueKeys := []int{0, 1} + require.Error(t, streamer.Enqueue(ctx, reqs, enqueueKeys)) + }) + + t.Run("pipelining unsupported", func(t *testing.T) { + streamer := getStreamer() + defer streamer.Close() + streamer.Init(OutOfOrder, Hints{UniqueRequests: true}) + get := roachpb.NewGet(roachpb.Key("key"), false /* forUpdate */) + reqs := []roachpb.RequestUnion{{ + Value: &roachpb.RequestUnion_Get{ + Get: get.(*roachpb.GetRequest), + }, + }} + require.NoError(t, streamer.Enqueue(ctx, reqs, nil /* enqueueKeys */)) + // It is invalid to enqueue more requests before the previous have been + // responded to. + require.Error(t, streamer.Enqueue(ctx, reqs, nil /* enqueueKeys */)) + }) +} + +// TestLargeKeys verifies that the Streamer successfully completes the queries +// when the keys to lookup are large (i.e. the enqueued requests themselves have +// large memory footprint). +func TestLargeKeys(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + skip.UnderStress(t, "the test inserts large blobs, and the machine can be overloaded when under stress") + + rng, _ := randutil.NewTestRand() + s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) + ctx := context.Background() + defer s.Stopper().Stop(ctx) + + // Lower the distsql_workmem limit so that we can operate with smaller + // blobs. Note that the joinReader in the row-by-row engine will override + // the limit if it is lower than 8MiB, so we cannot go lower than that here. + _, err := db.Exec("SET distsql_workmem='8MiB'") + require.NoError(t, err) + // In both engines, the index joiner buffers input rows up to 4MiB in size, + // so we have a couple of interesting options for the blob size: + // - 3000000 is interesting because it doesn't exceed the buffer size, yet + // two rows with such blobs do exceed it. The index joiners are expected to + // to process each row on its own. + // - 5000000 is interesting because a single row already exceeds the buffer + // size. + for _, blobSize := range []int{3000000, 5000000} { + // onlyLarge determines whether only large blobs are inserted or a mix + // of large and small blobs. + for _, onlyLarge := range []bool{false, true} { + _, err = db.Exec("DROP TABLE IF EXISTS foo") + require.NoError(t, err) + // We set up such a table that contains two large columns, one of them + // being the primary key. The idea is that the query below will first + // read from the secondary index which would include only the PK blob, + // and that will be used to construct index join lookups (i.e. the PK + // blobs will be the enqueued requests for the Streamer) whereas the + // other blob will be part of the response. + _, err = db.Exec("CREATE TABLE foo (pk_blob STRING PRIMARY KEY, attribute INT, blob TEXT, INDEX(attribute))") + require.NoError(t, err) + + // Insert a handful of rows. + numRows := rng.Intn(3) + 3 + for i := 0; i < numRows; i++ { + letter := string(byte('a') + byte(i)) + valueSize := blobSize + if !onlyLarge && rng.Float64() < 0.5 { + // If we're using a mix of large and small values, with 50% + // use a small value now. + valueSize = rng.Intn(10) + 1 + } + _, err = db.Exec("INSERT INTO foo SELECT repeat($1, $2), 1, repeat($1, $2)", letter, valueSize) + require.NoError(t, err) + } + + // Perform an index join so that the Streamer API is used. + query := "SELECT * FROM foo@foo_attribute_idx WHERE attribute=1" + testutils.RunTrueAndFalse(t, "vectorize", func(t *testing.T, vectorize bool) { + vectorizeMode := "off" + if vectorize { + vectorizeMode = "on" + } + _, err = db.Exec("SET vectorize = " + vectorizeMode) + require.NoError(t, err) + _, err = db.Exec(query) + require.NoError(t, err) + }) + } + } +} + +// TestStreamerBudgetErrorInEnqueue verifies the behavior of the Streamer in +// Enqueue when its limit and/or root pool limit are exceeded. Additional tests +// around the memory limit errors (when the responses exceed the limit) can be +// found in TestMemoryLimit in pkg/sql. +func TestStreamerBudgetErrorInEnqueue(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + + // Create a dummy table for which we know the encoding of valid keys. + _, err := db.Exec("CREATE TABLE foo (pk_blob STRING PRIMARY KEY, attribute INT, blob TEXT, INDEX(attribute))") + require.NoError(t, err) + + // makeGetRequest returns a valid GetRequest that wants to lookup a key with + // value 'a' repeated keySize number of times in the primary index of table + // foo. + makeGetRequest := func(keySize int) roachpb.RequestUnion { + var res roachpb.RequestUnion + var get roachpb.GetRequest + var union roachpb.RequestUnion_Get + key := make([]byte, keySize+6) + key[0] = 190 + key[1] = 137 + key[2] = 18 + for i := 0; i < keySize; i++ { + key[i+3] = 97 + } + key[keySize+3] = 0 + key[keySize+4] = 1 + key[keySize+5] = 136 + get.Key = key + union.Get = &get + res.Value = &union + return res + } + + // Imitate a root SQL memory monitor with 1MiB size. + const rootPoolSize = 1 << 20 /* 1MiB */ + rootMemMonitor := mon.NewMonitor( + "root", /* name */ + mon.MemoryResource, + nil, /* curCount */ + nil, /* maxHist */ + -1, /* increment */ + math.MaxInt64, /* noteworthy */ + cluster.MakeTestingClusterSettings(), + ) + rootMemMonitor.Start(ctx, nil /* pool */, mon.MakeStandaloneBudget(rootPoolSize)) + defer rootMemMonitor.Stop(ctx) + + acc := rootMemMonitor.MakeBoundAccount() + defer acc.Close(ctx) + + getStreamer := func(limitBytes int64) *Streamer { + acc.Clear(ctx) + s := getStreamer(ctx, s, limitBytes, &acc) + s.Init(OutOfOrder, Hints{UniqueRequests: true}) + return s + } + + t.Run("single key exceeds limit", func(t *testing.T) { + const limitBytes = 10 + streamer := getStreamer(limitBytes) + defer streamer.Close() + + // A single request that exceeds the limit should be allowed. + reqs := make([]roachpb.RequestUnion, 1) + reqs[0] = makeGetRequest(limitBytes + 1) + require.NoError(t, streamer.Enqueue(ctx, reqs, nil /* enqueueKeys */)) + }) + + t.Run("single key exceeds root pool size", func(t *testing.T) { + const limitBytes = 10 + streamer := getStreamer(limitBytes) + defer streamer.Close() + + // A single request that exceeds the limit as well as the root SQL pool + // should be denied. + reqs := make([]roachpb.RequestUnion, 1) + reqs[0] = makeGetRequest(rootPoolSize + 1) + require.Error(t, streamer.Enqueue(ctx, reqs, nil /* enqueueKeys */)) + }) + + t.Run("multiple keys exceed limit", func(t *testing.T) { + const limitBytes = 10 + streamer := getStreamer(limitBytes) + defer streamer.Close() + + // Create two requests which exceed the limit when combined. + reqs := make([]roachpb.RequestUnion, 2) + reqs[0] = makeGetRequest(limitBytes/2 + 1) + reqs[1] = makeGetRequest(limitBytes/2 + 1) + require.Error(t, streamer.Enqueue(ctx, reqs, nil /* enqueueKeys */)) + }) +} diff --git a/pkg/kv/kvclient/rangecache/range_cache.go b/pkg/kv/kvclient/rangecache/range_cache.go index 06585f23689f..29e068bcda1f 100644 --- a/pkg/kv/kvclient/rangecache/range_cache.go +++ b/pkg/kv/kvclient/rangecache/range_cache.go @@ -1112,7 +1112,7 @@ type CacheEntry struct { } func (e CacheEntry) String() string { - return fmt.Sprintf("desc:%s, lease:%s", e.Desc(), &e.lease) + return fmt.Sprintf("desc:%s, lease:%s", e.Desc(), e.lease) } // Desc returns the cached descriptor. Note that, besides being possibly stale, diff --git a/pkg/kv/kvserver/BUILD.bazel b/pkg/kv/kvserver/BUILD.bazel index 998378268460..cb174368921c 100644 --- a/pkg/kv/kvserver/BUILD.bazel +++ b/pkg/kv/kvserver/BUILD.bazel @@ -34,6 +34,7 @@ go_library( "replica_application_state_machine.go", "replica_backpressure.go", "replica_batch_updates.go", + "replica_circuit_breaker.go", "replica_closedts.go", "replica_command.go", "replica_consistency.go", @@ -162,6 +163,7 @@ go_library( "//pkg/util", "//pkg/util/admission", "//pkg/util/bufalloc", + "//pkg/util/circuit", "//pkg/util/contextutil", "//pkg/util/ctxgroup", "//pkg/util/encoding", @@ -221,6 +223,7 @@ go_test( "client_rangefeed_test.go", "client_relocate_range_test.go", "client_replica_backpressure_test.go", + "client_replica_circuit_breaker_test.go", "client_replica_gc_test.go", "client_replica_test.go", "client_spanconfigs_test.go", @@ -252,6 +255,7 @@ go_test( "replica_application_cmd_buf_test.go", "replica_application_state_machine_test.go", "replica_batch_updates_test.go", + "replica_circuit_breaker_test.go", "replica_closedts_internal_test.go", "replica_closedts_test.go", "replica_command_test.go", @@ -356,7 +360,7 @@ go_test( "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/systemschema", "//pkg/sql/catalog/tabledesc", - "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/sql/sqlutil", @@ -364,6 +368,7 @@ go_test( "//pkg/storage/enginepb", "//pkg/storage/fs", "//pkg/testutils", + "//pkg/testutils/echotest", "//pkg/testutils/gossiputil", "//pkg/testutils/kvclientutils", "//pkg/testutils/serverutils", @@ -374,6 +379,7 @@ go_test( "//pkg/ts/tspb", "//pkg/util", "//pkg/util/caller", + "//pkg/util/circuit", "//pkg/util/contextutil", "//pkg/util/ctxgroup", "//pkg/util/encoding", diff --git a/pkg/kv/kvserver/batcheval/cmd_clear_range.go b/pkg/kv/kvserver/batcheval/cmd_clear_range.go index 8891212aca1a..34bc0818815a 100644 --- a/pkg/kv/kvserver/batcheval/cmd_clear_range.go +++ b/pkg/kv/kvserver/batcheval/cmd_clear_range.go @@ -97,8 +97,15 @@ func ClearRange( // If the total size of data to be cleared is less than // clearRangeBytesThreshold, clear the individual values with an iterator, // instead of using a range tombstone (inefficient for small ranges). - if total := statsDelta.Total(); total < ClearRangeBytesThreshold { - log.VEventf(ctx, 2, "delta=%d < threshold=%d; using non-range clear", total, ClearRangeBytesThreshold) + // + // However, don't do this if the stats contain estimates -- this can only + // happen when we're clearing an entire range and we're using the existing + // range stats. We've seen cases where these estimates are wildly inaccurate + // (even negative), and it's better to drop an unnecessary range tombstone + // than to submit a huge write batch that'll get rejected by Raft. + if statsDelta.ContainsEstimates == 0 && statsDelta.Total() < ClearRangeBytesThreshold { + log.VEventf(ctx, 2, "delta=%d < threshold=%d; using non-range clear", + statsDelta.Total(), ClearRangeBytesThreshold) iter := readWriter.NewMVCCIterator(storage.MVCCKeyAndIntentsIterKind, storage.IterOptions{ LowerBound: from, UpperBound: to, @@ -154,7 +161,7 @@ func computeStatsDelta( } // If we took the fast path but race is enabled, assert stats were correctly computed. if fast { - delta.ContainsEstimates = computed.ContainsEstimates + computed.ContainsEstimates = delta.ContainsEstimates // retained for tests under race if !delta.Equal(computed) { log.Fatalf(ctx, "fast-path MVCCStats computation gave wrong result: diff(fast, computed) = %s", pretty.Diff(delta, computed)) diff --git a/pkg/kv/kvserver/batcheval/cmd_clear_range_test.go b/pkg/kv/kvserver/batcheval/cmd_clear_range_test.go index 105061c936ff..5b004e2c8a62 100644 --- a/pkg/kv/kvserver/batcheval/cmd_clear_range_test.go +++ b/pkg/kv/kvserver/batcheval/cmd_clear_range_test.go @@ -65,6 +65,7 @@ func TestCmdClearRangeBytesThreshold(t *testing.T) { overFull := ClearRangeBytesThreshold/len(valueStr) + 1 tests := []struct { keyCount int + estimatedStats bool expClearIterCount int expClearRangeCount int }{ @@ -85,6 +86,13 @@ func TestCmdClearRangeBytesThreshold(t *testing.T) { expClearIterCount: 0, expClearRangeCount: 1, }, + // Estimated stats always use ClearRange. + { + keyCount: 1, + estimatedStats: true, + expClearIterCount: 0, + expClearRangeCount: 1, + }, } for _, test := range tests { @@ -100,6 +108,9 @@ func TestCmdClearRangeBytesThreshold(t *testing.T) { t.Fatal(err) } } + if test.estimatedStats { + stats.ContainsEstimates++ + } batch := &wrappedBatch{Batch: eng.NewBatch()} defer batch.Close() @@ -126,10 +137,12 @@ func TestCmdClearRangeBytesThreshold(t *testing.T) { t.Fatal(err) } - // Verify cArgs.Stats is equal to the stats we wrote. + // Verify cArgs.Stats is equal to the stats we wrote, ignoring some values. newStats := stats - newStats.SysBytes, newStats.SysCount, newStats.AbortSpanBytes = 0, 0, 0 // ignore these values - cArgs.Stats.SysBytes, cArgs.Stats.SysCount, cArgs.Stats.AbortSpanBytes = 0, 0, 0 // these too, as GC threshold is updated + newStats.ContainsEstimates, cArgs.Stats.ContainsEstimates = 0, 0 + newStats.SysBytes, cArgs.Stats.SysBytes = 0, 0 + newStats.SysCount, cArgs.Stats.SysCount = 0, 0 + newStats.AbortSpanBytes, cArgs.Stats.AbortSpanBytes = 0, 0 newStats.Add(*cArgs.Stats) newStats.AgeTo(0) // pin at LastUpdateNanos==0 if !newStats.Equal(enginepb.MVCCStats{}) { diff --git a/pkg/kv/kvserver/client_raft_test.go b/pkg/kv/kvserver/client_raft_test.go index 4e0a55aac207..f4055d06dbc4 100644 --- a/pkg/kv/kvserver/client_raft_test.go +++ b/pkg/kv/kvserver/client_raft_test.go @@ -5576,14 +5576,20 @@ func TestElectionAfterRestart(t *testing.T) { require.NoError(t, err) require.NoError(t, tc.WaitForFullReplication()) - for _, row := range sqlutils.MakeSQLRunner(tc.Conns[0]).QueryStr( - t, `SELECT range_id FROM crdb_internal.ranges_no_leases WHERE table_name = 't';`, - ) { - n, err := strconv.Atoi(row[0]) - require.NoError(t, err) - rangeIDs[roachpb.RangeID(n)] = 0 - } - require.Len(t, rangeIDs, numRanges) + testutils.SucceedsSoon(t, func() error { + for _, row := range sqlutils.MakeSQLRunner(tc.Conns[0]).QueryStr( + t, `SELECT range_id FROM crdb_internal.ranges_no_leases WHERE table_name = 't';`, + ) { + n, err := strconv.Atoi(row[0]) + require.NoError(t, err) + rangeIDs[roachpb.RangeID(n)] = 0 + } + if len(rangeIDs) != numRanges { + return errors.Newf("expected %d ranges, found %d", numRanges, len(rangeIDs)) + } + return nil + }) + t.Logf("created %d ranges", numRanges) // Make sure that the ranges have all followers fully caught up. Otherwise, diff --git a/pkg/kv/kvserver/client_replica_circuit_breaker_test.go b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go new file mode 100644 index 000000000000..2bc3e648a871 --- /dev/null +++ b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go @@ -0,0 +1,588 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package kvserver_test + +import ( + "context" + "sync/atomic" + "testing" + "time" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" + "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" + "github.com/cockroachdb/cockroach/pkg/util/circuit" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/stop" + "github.com/cockroachdb/errors" + "github.com/stretchr/testify/require" +) + +// In all scenarios below, we are starting out with our range on n1 and n2, +// and all other ranges (in particular the liveness range) on n1. +// +// TODO(tbg): via tracing, test that when the breaker is tripped, requests fail +// fast right upon entering the replica. + +const ( + n1 = 0 + n2 = 1 + + pauseHeartbeats = true + keepHeartbeats = true +) + +// This is a sanity check in which the breaker plays no role. +func TestReplicaCircuitBreaker_NotTripped(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + tc := setupCircuitBreakerTest(t) + defer tc.Stopper().Stop(context.Background()) + + // Circuit breaker doesn't get in the way of anything unless + // something trips it. + require.NoError(t, tc.Write(n1)) + tc.RequireIsNotLeaseholderError(t, tc.Write(n2)) + require.NoError(t, tc.Read(n1)) + tc.RequireIsNotLeaseholderError(t, tc.Read(n2)) +} + +// In this test, n1 holds the lease and we disable the probe and trip the +// breaker. While the breaker is tripped, requests fail-fast with either a +// breaker or lease error. When the probe is re-enabled, everything heals. +func TestReplicaCircuitBreaker_LeaseholderTripped(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + tc := setupCircuitBreakerTest(t) + defer tc.Stopper().Stop(context.Background()) + + // Get lease on n1. + require.NoError(t, tc.Write(n1)) + // Disable the probe so that when the breaker trips, it stays tripped. + tc.SetProbeEnabled(n1, false) + tc.Report(n1, errors.New("boom")) + + // n1 could theoretically still serve reads (there is a valid lease + // and none of the latches are taken), but since it is hard to determine + // that upfront we currently fail all reads as well. + tc.RequireIsBreakerOpen(t, tc.Read(n1)) + tc.RequireIsBreakerOpen(t, tc.Write(n1)) + + // When we go through the KV client stack, we still get the breaker error + // back. + tc.RequireIsBreakerOpen(t, tc.WriteDS(n1)) + tc.RequireIsBreakerOpen(t, tc.WriteDS(n2)) + + // n2 does not have the lease so all it does is redirect to the leaseholder + // n1. + tc.RequireIsNotLeaseholderError(t, tc.Read(n2)) + tc.RequireIsNotLeaseholderError(t, tc.Write(n2)) + + // Enable the probe. Even a read should trigger the probe + // and within due time the breaker should heal. + tc.SetProbeEnabled(n1, true) + tc.UntripsSoon(t, tc.Read, n1) + // Same behavior on writes. + tc.Report(n1, errors.New("boom again")) + tc.UntripsSoon(t, tc.Write, n1) +} + +// In this scenario we have n1 holding the lease and we permanently trip the +// breaker on follower n2. Before the breaker is tripped, we see +// NotLeaseholderError. When it's tripped, those are supplanted by the breaker +// errors. Once we allow the breaker to probe, the breaker untrips. In +// particular, this tests that the probe can succeed even when run on a +// follower (which would not be true if it required the local Replica to +// execute an operation that requires the lease). +func TestReplicaCircuitBreaker_FollowerTripped(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + tc := setupCircuitBreakerTest(t) + defer tc.Stopper().Stop(context.Background()) + + // Get lease on n1. + require.NoError(t, tc.Write(n1)) + // Disable the probe on n2 so that when the breaker trips, it stays tripped. + tc.SetProbeEnabled(n2, false) + tc.Report(n2, errors.New("boom")) + + // We didn't trip the leaseholder n1, so it is unaffected. + require.NoError(t, tc.Read(n1)) + require.NoError(t, tc.Write(n1)) + // Even if we go through DistSender, we reliably reach the leaseholder. + // TODO(tbg): I think this relies on the leaseholder being cached. If + // DistSender tried to contact the follower and got the breaker error, at + // time of writing it would propagate it. + require.NoError(t, tc.WriteDS(n1)) + + tc.RequireIsBreakerOpen(t, tc.Read(n2)) + tc.RequireIsBreakerOpen(t, tc.Write(n2)) + + // Enable the probe. Even a read should trigger the probe + // and within due time the breaker should heal, giving us + // NotLeaseholderErrors again. + // + // TODO(tbg): this test would be more meaningful with follower reads. They + // should succeed when the breaker is open and fail if the breaker is + // tripped. However knowing that the circuit breaker check sits at the top + // of Replica.sendWithRangeID, it's clear that it won't make a difference. + tc.SetProbeEnabled(n2, true) + testutils.SucceedsSoon(t, func() error { + if err := tc.Read(n2); !errors.HasType(err, (*roachpb.NotLeaseHolderError)(nil)) { + return err + } + return nil + }) + // Same behavior on writes. + tc.Report(n2, errors.New("boom again")) + testutils.SucceedsSoon(t, func() error { + if err := tc.Write(n2); !errors.HasType(err, (*roachpb.NotLeaseHolderError)(nil)) { + return err + } + return nil + }) +} + +// In this scenario, the breaker is tripped and the probe is disabled and +// additionally, the liveness records for both nodes have expired. Soon after +// the probe is re-enabled, the breaker heals. In particular, the probe isn't +// doing anything that requires the lease (or whatever it does that requires +// the lease is sufficiently special cased; at time of writing it's the former +// but as the probe learns deeper checks, the plan is ultimately the latter). +func TestReplicaCircuitBreaker_LeaselessTripped(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + tc := setupCircuitBreakerTest(t) + defer tc.Stopper().Stop(context.Background()) + + // Put the lease on n1 but then trip the breaker with the probe + // disabled. + require.NoError(t, tc.Write(n1)) + tc.SetProbeEnabled(n1, false) + tc.Report(n1, errors.New("boom")) + resumeHeartbeats := tc.ExpireAllLeases(t, pauseHeartbeats) + + // n2 (not n1) will return a NotLeaseholderError. This may be surprising - + // why isn't it trying and succeeding to acquire a lease - but it does + // not do that because it sees that the new leaseholder (n2) is not live + // itself. We'll revisit this after re-enabling liveness later in the test. + { + err := tc.Read(n2) + // At time of writing: not incrementing epoch on n1 because next + // leaseholder (n2) not live. + t.Log(err) + tc.RequireIsNotLeaseholderError(t, err) + // Same behavior for write on n2. + tc.RequireIsNotLeaseholderError(t, tc.Write(n2)) + } + // On n1, run into the circuit breaker when requesting lease. + { + tc.RequireIsBreakerOpen(t, tc.Read(n1)) + tc.RequireIsBreakerOpen(t, tc.Write(n1)) + } + + // Let the breaker heal and things should go back to normal. This is not a + // trivial thing to hold, as the probe needs to go through for this, and if + // we're not careful, the probe itself is held up by the breaker as well, or + // the probe will try to acquire a lease (which we're currently careful to + // avoid). + resumeHeartbeats() + tc.SetProbeEnabled(n1, true) + tc.UntripsSoon(t, tc.Read, n1) + tc.UntripsSoon(t, tc.Write, n1) + tc.RequireIsNotLeaseholderError(t, tc.Read(n2)) + tc.RequireIsNotLeaseholderError(t, tc.Write(n2)) +} + +// In this test, the range is on n1 and n2 and we take down the follower n2, +// thus losing quorum (but not the lease or leaseholder). After the +// SlowReplicationThreshold (which is reduced suitably to keep the test +// snappy) has passed, the breaker on n1's Replica trips. When n2 comes back, +// the probe on n1 succeeds and requests to n1 can acquire a lease and +// succeed. +func TestReplicaCircuitBreaker_Leaseholder_QuorumLoss(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + tc := setupCircuitBreakerTest(t) + defer tc.Stopper().Stop(context.Background()) + + // Get lease on n1. + require.NoError(t, tc.Write(n1)) + tc.StopServer(n2) // lose quorum + + // We didn't lose the liveness range (which is only on n1). + require.NoError(t, tc.Server(n1).HeartbeatNodeLiveness()) + tc.SetSlowThreshold(10 * time.Millisecond) + { + err := tc.Write(n1) + var ae *roachpb.AmbiguousResultError + require.True(t, errors.As(err, &ae), "%+v", err) + t.Log(err) + } + tc.RequireIsBreakerOpen(t, tc.Read(n1)) + + // Bring n2 back and service should be restored. + tc.SetSlowThreshold(0) // reset + require.NoError(t, tc.RestartServer(n2)) + tc.UntripsSoon(t, tc.Read, n1) + require.NoError(t, tc.Write(n1)) +} + +// In this test, the range is on n1 and n2 and we place the lease on n2 and +// shut down n2 and expire the lease. n1 will be a non-leaseholder without +// quorum, and requests to it should trip the circuit breaker. This is an +// interesting test case internally because here, the request that trips the +// breaker is the slow lease request, and not the test's actual write. Since +// leases have lots of special casing internally, this is easy to get wrong. +func TestReplicaCircuitBreaker_Follower_QuorumLoss(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + tc := setupCircuitBreakerTest(t) + defer tc.Stopper().Stop(context.Background()) + + // Get lease to n2 so that we can lose it without taking down the system ranges. + desc := tc.LookupRangeOrFatal(t, tc.ScratchRange(t)) + tc.TransferRangeLeaseOrFatal(t, desc, tc.Target(n2)) + resumeHeartbeats := tc.ExpireAllLeases(t, keepHeartbeats) + tc.StopServer(n2) // lose quorum and leaseholder + resumeHeartbeats() + + // We didn't lose the liveness range (which is only on n1). + require.NoError(t, tc.Server(n1).HeartbeatNodeLiveness()) + tc.SetSlowThreshold(10 * time.Millisecond) + tc.RequireIsBreakerOpen(t, tc.Write(n1)) + tc.RequireIsBreakerOpen(t, tc.Read(n1)) + + // Bring n2 back and service should be restored. + tc.SetSlowThreshold(0) // reset + require.NoError(t, tc.RestartServer(n2)) + tc.UntripsSoon(t, tc.Read, n1) + require.NoError(t, tc.Write(n1)) +} + +// This test is skipped but documents that the current circuit breakers cannot +// prevent hung requests when the *liveness* range is down. +// +// The liveness range is usually 5x-replicated and so is less likely to lose +// quorum, for resilience against asymmetric partitions it would be nice to +// also trip the local breaker if liveness updated cannot be performed. We +// can't rely on receiving an error from the liveness range back, as we may not +// be able to reach any of its Replicas (and in fact all of its Replicas may +// have been lost, in extreme cases), so we would need to guard all +// interactions with the liveness range in a timeout, which is unsatisfying. +// +// A somewhat related problem needs to be solved for general loss of all +// Replicas of a Range. In that case the request will never reach a +// per-Replica circuit breaker and it will thus fail slow. Instead, we would +// need DistSender to detect this scenario (for example, by cross-checking +// liveness against the available targets, but this gets complicated again +// due to our having bootstrapped liveness on top of the KV stack). +// +// Solving the general problem, however, wouldn't obviate the need for +// special-casing of lease-related liveness interactions, since we also want +// to protect against the case in which the liveness range is "there" but +// simply will not make progress for whatever reason. +// +// An argument can be made that in such a case it is likely that the cluster +// is unavailable in its entirety. +func TestReplicaCircuitBreaker_Liveness_QuorumLoss(t *testing.T) { + defer leaktest.AfterTest(t)() + + skip.IgnoreLint(t, "See: https://github.com/cockroachdb/cockroach/issues/74616") + + defer log.Scope(t).Close(t) + tc := setupCircuitBreakerTest(t) + defer tc.Stopper().Stop(context.Background()) + + // Up-replicate liveness range and move lease to n2. + tc.AddVotersOrFatal(t, keys.NodeLivenessPrefix, tc.Target(n2)) + tc.TransferRangeLeaseOrFatal(t, tc.LookupRangeOrFatal(t, keys.NodeLivenessPrefix), tc.Target(n2)) + // Sanity check that things still work. + require.NoError(t, tc.Write(n1)) + tc.RequireIsNotLeaseholderError(t, tc.Write(n2)) + // Remove the second replica for our main range. + tc.RemoveVotersOrFatal(t, tc.ScratchRange(t), tc.Target(n2)) + + // Now stop n2. This will lose the liveness range only since the other + // ranges are on n1 only. + tc.StopServer(n2) + + // Expire all leases. We also pause all heartbeats but that doesn't really + // matter since the liveness range is unavailable anyway. + resume := tc.ExpireAllLeases(t, pauseHeartbeats) + defer resume() + + // Since there isn't a lease, and the liveness range is down, the circuit + // breaker should kick into gear. + tc.SetSlowThreshold(10 * time.Millisecond) + + // This is what fails, as the lease acquisition hangs on the liveness range + // but nothing will ever report a problem to the breaker. + tc.RequireIsBreakerOpen(t, tc.Read(n1)) + tc.RequireIsBreakerOpen(t, tc.Write(n1)) + + tc.SetSlowThreshold(0) // reset + require.NoError(t, tc.RestartServer(n2)) + + tc.UntripsSoon(t, tc.Read, n1) + require.NoError(t, tc.Write(n1)) +} + +// Test infrastructure below. + +func makeBreakerToggleable(b *circuit.Breaker) (setProbeEnabled func(bool)) { + opts := b.Opts() + origProbe := opts.AsyncProbe + var disableProbe int32 + opts.AsyncProbe = func(report func(error), done func()) { + if atomic.LoadInt32(&disableProbe) == 1 { + done() + return + } + origProbe(report, done) + } + b.Reconfigure(opts) + return func(to bool) { + var n int32 + if !to { + n = 1 + } + atomic.StoreInt32(&disableProbe, n) + } +} + +type replWithKnob struct { + *kvserver.Replica + setProbeEnabled func(bool) +} + +type circuitBreakerTest struct { + *testcluster.TestCluster + slowThresh *atomic.Value // time.Duration + ManualClock *hlc.HybridManualClock + repls []replWithKnob // 0 -> repl on Servers[0], etc +} + +func setupCircuitBreakerTest(t *testing.T) *circuitBreakerTest { + manualClock := hlc.NewHybridManualClock() + var rangeID int64 // atomic + slowThresh := &atomic.Value{} // supports .SetSlowThreshold(x) + slowThresh.Store(time.Duration(0)) + storeKnobs := &kvserver.StoreTestingKnobs{ + SlowReplicationThresholdOverride: func(ba *roachpb.BatchRequest) time.Duration { + t.Helper() + if rid := roachpb.RangeID(atomic.LoadInt64(&rangeID)); rid == 0 || ba == nil || ba.RangeID != rid { + return 0 + } + dur := slowThresh.Load().(time.Duration) + if dur > 0 { + t.Logf("%s: using slow replication threshold %s", ba.Summary(), dur) + } + return dur // 0 = default + }, + // The test will often check individual replicas and the lease will always be on + // n1. However, we don't control raft leadership placement and without this knob, + // n1 may refuse to acquire the lease, which we don't want. + AllowLeaseRequestProposalsWhenNotLeader: true, + // The TestingApplyFilter prevents n2 from requesting a lease (or from the lease + // being transferred to n2). The test seems to pass pretty reliably without this + // but it can't hurt. + TestingApplyFilter: func(args kvserverbase.ApplyFilterArgs) (int, *roachpb.Error) { + if !args.IsLeaseRequest { + return 0, nil + } + lease := args.State.Lease + if lease == nil { + return 0, nil + } + if lease.Replica.NodeID != 2 { + return 0, nil + } + pErr := roachpb.NewErrorf("test prevents lease acquisition by n2") + return 0, pErr + }, + } + // In some tests we'll restart servers, which means that we will be waiting + // for raft elections. Speed this up by campaigning aggressively. This also + // speeds up the test by calling refreshProposalsLocked more frequently, which + // is where the logic to trip the breaker sits. Together, this cuts most tests + // involving a restart from >4s to ~300ms. + var raftCfg base.RaftConfig + raftCfg.SetDefaults() + raftCfg.RaftHeartbeatIntervalTicks = 1 + raftCfg.RaftElectionTimeoutTicks = 2 + reg := server.NewStickyInMemEnginesRegistry() + args := base.TestClusterArgs{ + ReplicationMode: base.ReplicationManual, + ServerArgs: base.TestServerArgs{ + RaftConfig: raftCfg, + Knobs: base.TestingKnobs{ + Server: &server.TestingKnobs{ + ClockSource: manualClock.UnixNano, + StickyEngineRegistry: reg, + }, + Store: storeKnobs, + }, + }, + } + tc := testcluster.StartTestCluster(t, 2, args) + tc.Stopper().AddCloser(stop.CloserFn(reg.CloseAllStickyInMemEngines)) + + _, err := tc.ServerConn(0).Exec(`SET CLUSTER SETTING kv.replica_circuit_breaker.slow_replication_threshold = '45s'`) + require.NoError(t, err) + + k := tc.ScratchRange(t) + atomic.StoreInt64(&rangeID, int64(tc.LookupRangeOrFatal(t, k).RangeID)) + + tc.AddVotersOrFatal(t, k, tc.Target(1)) + + var repls []replWithKnob + for i := range tc.Servers { + repl := tc.GetFirstStoreFromServer(t, i).LookupReplica(keys.MustAddr(k)) + enableProbe := makeBreakerToggleable(repl.Breaker()) + repls = append(repls, replWithKnob{repl, enableProbe}) + } + return &circuitBreakerTest{ + TestCluster: tc, + ManualClock: manualClock, + repls: repls, + slowThresh: slowThresh, + } +} + +func (cbt *circuitBreakerTest) SetProbeEnabled(idx int, to bool) { + cbt.repls[idx].setProbeEnabled(to) +} + +func (cbt *circuitBreakerTest) Report(idx int, err error) { + cbt.repls[idx].Replica.Breaker().Report(err) +} + +func (cbt *circuitBreakerTest) UntripsSoon(t *testing.T, method func(idx int) error, idx int) { + t.Helper() + testutils.SucceedsSoon(t, func() error { + t.Helper() + err := method(idx) + // All errors coming out should be annotated as coming from + // the circuit breaker. + if err != nil && !errors.Is(err, circuit.ErrBreakerOpen) { + t.Fatalf("saw unexpected error %+v", err) + } + return err + }) +} + +func (cbt *circuitBreakerTest) ExpireAllLeases(t *testing.T, pauseHeartbeats bool) (undo func()) { + t.Helper() + var maxWT int64 + var fs []func() + for _, srv := range cbt.Servers { + lv := srv.NodeLiveness().(*liveness.NodeLiveness) + if pauseHeartbeats { + undo := lv.PauseAllHeartbeatsForTest() + fs = append(fs, undo) + } + self, ok := lv.Self() + require.True(t, ok) + if maxWT < self.Expiration.WallTime { + maxWT = self.Expiration.WallTime + } + } + cbt.ManualClock.Forward(maxWT + 1) + return func() { + for _, f := range fs { + f() + } + } +} + +func (*circuitBreakerTest) sendViaRepl(repl *kvserver.Replica, req roachpb.Request) error { + var ba roachpb.BatchRequest + ba.RangeID = repl.Desc().RangeID + ba.Timestamp = repl.Clock().Now() + ba.Add(req) + ctx, cancel := context.WithTimeout(context.Background(), testutils.DefaultSucceedsSoonDuration) + defer cancel() + _, pErr := repl.Send(ctx, ba) + // If our context got canceled, return an opaque error regardless of presence or + // absence of actual error. This makes sure we don't accidentally pass tests as + // a result of our context cancellation. + if err := ctx.Err(); err != nil { + pErr = roachpb.NewErrorf("timed out waiting for batch response: %v", pErr) + } + return pErr.GoError() +} + +func (*circuitBreakerTest) sendViaDistSender(ds *kvcoord.DistSender, req roachpb.Request) error { + var ba roachpb.BatchRequest + ba.Add(req) + ctx, cancel := context.WithTimeout(context.Background(), testutils.DefaultSucceedsSoonDuration) + defer cancel() + _, pErr := ds.Send(ctx, ba) + // If our context got canceled, return an opaque error regardless of presence or + // absence of actual error. This makes sure we don't accidentally pass tests as + // a result of our context cancellation. + if err := ctx.Err(); err != nil { + pErr = roachpb.NewErrorf("timed out waiting for batch response: %v", pErr) + } + return pErr.GoError() +} + +func (*circuitBreakerTest) RequireIsBreakerOpen(t *testing.T, err error) { + t.Helper() + require.True(t, errors.Is(err, circuit.ErrBreakerOpen), "%+v", err) +} + +func (*circuitBreakerTest) RequireIsNotLeaseholderError(t *testing.T, err error) { + t.Helper() + ok := errors.HasType(err, (*roachpb.NotLeaseHolderError)(nil)) + require.True(t, ok, "%+v", err) +} + +func (cbt *circuitBreakerTest) Write(idx int) error { + return cbt.writeViaRepl(cbt.repls[idx].Replica) +} + +func (cbt *circuitBreakerTest) WriteDS(idx int) error { + put := roachpb.NewPut(cbt.repls[idx].Desc().StartKey.AsRawKey(), roachpb.MakeValueFromString("hello")) + return cbt.sendViaDistSender(cbt.Servers[idx].DistSender(), put) +} + +// SetSlowThreshold sets the SlowReplicationThreshold for requests sent through the +// test harness (i.e. via Write) to the provided duration. The zero value restores +// the default. +func (cbt *circuitBreakerTest) SetSlowThreshold(dur time.Duration) { + cbt.slowThresh.Store(dur) +} + +func (cbt *circuitBreakerTest) Read(idx int) error { + return cbt.readViaRepl(cbt.repls[idx].Replica) +} + +func (cbt *circuitBreakerTest) writeViaRepl(repl *kvserver.Replica) error { + put := roachpb.NewPut(repl.Desc().StartKey.AsRawKey(), roachpb.MakeValueFromString("hello")) + return cbt.sendViaRepl(repl, put) +} + +func (cbt *circuitBreakerTest) readViaRepl(repl *kvserver.Replica) error { + get := roachpb.NewGet(repl.Desc().StartKey.AsRawKey(), false /* forUpdate */) + return cbt.sendViaRepl(repl, get) +} diff --git a/pkg/kv/kvserver/client_replica_test.go b/pkg/kv/kvserver/client_replica_test.go index c76b2a82657d..f83d0201d0da 100644 --- a/pkg/kv/kvserver/client_replica_test.go +++ b/pkg/kv/kvserver/client_replica_test.go @@ -1403,7 +1403,7 @@ func TestRangeLocalUncertaintyLimitAfterNewLease(t *testing.T) { } lease, _ := replica2.GetLease() if lease.Replica.NodeID != replica2.NodeID() { - return errors.Errorf("expected lease transfer to node2: %s", &lease) + return errors.Errorf("expected lease transfer to node2: %s", lease) } return nil }) @@ -1479,7 +1479,7 @@ func TestLeaseMetricsOnSplitAndTransfer(t *testing.T) { for i := 0; i < 2; i++ { r := tc.GetFirstStoreFromServer(t, i).LookupReplica(roachpb.RKey(expirationKey)) if l, _ := r.GetLease(); l.Replica.StoreID != tc.Target(1).StoreID { - return errors.Errorf("expected lease to transfer to replica 2: got %s", &l) + return errors.Errorf("expected lease to transfer to replica 2: got %s", l) } } return nil diff --git a/pkg/kv/kvserver/client_spanconfigs_test.go b/pkg/kv/kvserver/client_spanconfigs_test.go index d845451636ff..ce06c66cb436 100644 --- a/pkg/kv/kvserver/client_spanconfigs_test.go +++ b/pkg/kv/kvserver/client_spanconfigs_test.go @@ -59,7 +59,7 @@ func TestSpanConfigUpdateAppliedToReplica(t *testing.T) { _, err := s.InternalExecutor().(sqlutil.InternalExecutor).ExecEx(ctx, "inline-exec", nil, sessiondata.InternalExecutorOverride{User: security.RootUserName()}, - `SET CLUSTER SETTING spanconfig.experimental_store.enabled = true`) + `SET CLUSTER SETTING spanconfig.store.enabled = true`) require.NoError(t, err) key, err := s.ScratchRange() diff --git a/pkg/kv/kvserver/client_split_test.go b/pkg/kv/kvserver/client_split_test.go index ea52bcf99fc1..bd81cdb34f55 100644 --- a/pkg/kv/kvserver/client_split_test.go +++ b/pkg/kv/kvserver/client_split_test.go @@ -1697,13 +1697,13 @@ func TestStoreSplitTimestampCacheDifferentLeaseHolder(t *testing.T) { return nil } log.Infof(ctx, "received lease request (%s, %s)", - leaseReq.Span(), &leaseReq.Lease) + leaseReq.Span(), leaseReq.Lease) if !reflect.DeepEqual(*forbiddenDesc, leaseReq.Lease.Replica) { return nil } log.Infof(ctx, "refusing lease request (%s, %s) because %+v held lease for LHS of split", - leaseReq.Span(), &leaseReq.Lease, forbiddenDesc) + leaseReq.Span(), leaseReq.Lease, forbiddenDesc) return roachpb.NewError(&roachpb.NotLeaseHolderError{RangeID: args.Hdr.RangeID}) } diff --git a/pkg/kv/kvserver/closed_timestamp_test.go b/pkg/kv/kvserver/closed_timestamp_test.go index d27de3a8c9e1..42f760dfb975 100644 --- a/pkg/kv/kvserver/closed_timestamp_test.go +++ b/pkg/kv/kvserver/closed_timestamp_test.go @@ -30,7 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -393,7 +393,7 @@ func TestClosedTimestampCanServeAfterSplitAndMerges(t *testing.T) { } // Split the table at key 2. idxPrefix := keys.SystemSQLCodec.IndexPrefix(uint32(tableID), 1) - k, err := rowenc.EncodeTableKey(idxPrefix, tree.NewDInt(2), encoding.Ascending) + k, err := keyside.Encode(idxPrefix, tree.NewDInt(2), encoding.Ascending) if err != nil { t.Fatalf("failed to encode key: %+v", err) } @@ -974,7 +974,7 @@ func getEncodedKeyForTable( t.Fatalf("failed to lookup ids: %+v", err) } idxPrefix := keys.SystemSQLCodec.IndexPrefix(uint32(tableID), 1) - k, err := rowenc.EncodeTableKey(idxPrefix, val, encoding.Ascending) + k, err := keyside.Encode(idxPrefix, val, encoding.Ascending) if err != nil { t.Fatalf("failed to encode split key: %+v", err) } diff --git a/pkg/kv/kvserver/helpers_test.go b/pkg/kv/kvserver/helpers_test.go index 057e6d2abb43..42b4ec4e6f78 100644 --- a/pkg/kv/kvserver/helpers_test.go +++ b/pkg/kv/kvserver/helpers_test.go @@ -35,6 +35,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/util" + circuit2 "github.com/cockroachdb/cockroach/pkg/util/circuit" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/quotapool" @@ -221,6 +222,10 @@ func NewTestStorePool(cfg StoreConfig) *StorePool { ) } +func (r *Replica) Breaker() *circuit2.Breaker { + return r.breaker.wrapped +} + func (r *Replica) AssertState(ctx context.Context, reader storage.Reader) { r.raftMu.Lock() defer r.raftMu.Unlock() diff --git a/pkg/kv/kvserver/kvserverbase/bulk_adder.go b/pkg/kv/kvserver/kvserverbase/bulk_adder.go index cbc4d43843e3..71c55e47e83b 100644 --- a/pkg/kv/kvserver/kvserverbase/bulk_adder.go +++ b/pkg/kv/kvserver/kvserverbase/bulk_adder.go @@ -67,6 +67,10 @@ type BulkAdderOptions struct { // different from the timestamp used to construct the adder which is what is // actually applied to each key). BatchTimestamp hlc.Timestamp + + // WriteAtRequestTime is used to set the corresponding field when sending + // constructed SSTables to AddSSTable. See roachpb.AddSSTableRequest. + WriteAtRequestTime bool } // DisableExplicitSplits can be returned by a SplitAndScatterAfter function to diff --git a/pkg/kv/kvserver/kvserverpb/state.proto b/pkg/kv/kvserver/kvserverpb/state.proto index 8f484529721f..08034e04dfb3 100644 --- a/pkg/kv/kvserver/kvserverpb/state.proto +++ b/pkg/kv/kvserver/kvserverpb/state.proto @@ -177,6 +177,9 @@ message RangeInfo { // Closed timestamp info communicated through the side-transport. See also // state.raft_closed_timestamp. RangeSideTransportInfo closed_timestamp_sidetransport_info = 19 [(gogoproto.customname) = "ClosedTimestampSideTransportInfo", (gogoproto.nullable) = false ]; + // The circuit breaker error, if any. This is nonzero if and only if the + // circuit breaker on the source Replica is tripped. + string circuit_breaker_error = 20; } // RangeSideTransportInfo describes a range's closed timestamp info communicated diff --git a/pkg/kv/kvserver/liveness/BUILD.bazel b/pkg/kv/kvserver/liveness/BUILD.bazel index 1fa6121d1d69..962629ea783d 100644 --- a/pkg/kv/kvserver/liveness/BUILD.bazel +++ b/pkg/kv/kvserver/liveness/BUILD.bazel @@ -41,9 +41,6 @@ go_test( embed = [":liveness"], deps = [ "//pkg/base", - "//pkg/config", - "//pkg/config/zonepb", - "//pkg/keys", "//pkg/kv/kvserver", "//pkg/kv/kvserver/liveness/livenesspb", "//pkg/roachpb:with-mocks", @@ -63,7 +60,6 @@ go_test( "//pkg/util/syncutil", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_logtags//:logtags", - "@com_github_gogo_protobuf//proto", "@com_github_kr_pretty//:pretty", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", diff --git a/pkg/kv/kvserver/liveness/client_test.go b/pkg/kv/kvserver/liveness/client_test.go index 80019481dc8d..178369def9f0 100644 --- a/pkg/kv/kvserver/liveness/client_test.go +++ b/pkg/kv/kvserver/liveness/client_test.go @@ -19,9 +19,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/config" - "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" @@ -36,7 +33,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/errors" "github.com/cockroachdb/logtags" - "github.com/gogo/protobuf/proto" "github.com/kr/pretty" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -174,14 +170,9 @@ func TestNodeLivenessStatusMap(t *testing.T) { ctx = logtags.AddTag(ctx, "in test", nil) log.Infof(ctx, "setting zone config to disable replication") - // Allow for inserting zone configs without having to go through (or - // duplicate the logic from) the CLI. - config.TestingSetupZoneConfigHook(tc.Stopper()) - zoneConfig := zonepb.DefaultZoneConfig() - // Force just one replica per range to ensure that we can shut down - // nodes without endangering the liveness range. - zoneConfig.NumReplicas = proto.Int32(1) - config.TestingSetZoneConfig(keys.MetaRangesID, zoneConfig) + if _, err := tc.Conns[0].Exec(`ALTER RANGE meta CONFIGURE ZONE using num_replicas = 1`); err != nil { + t.Fatal(err) + } log.Infof(ctx, "starting 3 more nodes") tc.AddAndStartServer(t, serverArgs) diff --git a/pkg/kv/kvserver/loqrecovery/BUILD.bazel b/pkg/kv/kvserver/loqrecovery/BUILD.bazel index 532bcc76e5ff..108e4b717deb 100644 --- a/pkg/kv/kvserver/loqrecovery/BUILD.bazel +++ b/pkg/kv/kvserver/loqrecovery/BUILD.bazel @@ -46,6 +46,7 @@ go_test( "//pkg/util/leaktest", "//pkg/util/uuid", "@com_github_cockroachdb_datadriven//:datadriven", + "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", "@in_gopkg_yaml_v2//:yaml_v2", "@io_etcd_go_etcd_raft_v3//raftpb", diff --git a/pkg/kv/kvserver/loqrecovery/apply.go b/pkg/kv/kvserver/loqrecovery/apply.go index c1e69ee7e562..29d0cb9a7061 100644 --- a/pkg/kv/kvserver/loqrecovery/apply.go +++ b/pkg/kv/kvserver/loqrecovery/apply.go @@ -102,7 +102,7 @@ func PrepareUpdateReplicas( } if len(missing) > 0 { - report.MissingStores = storeListFromSet(missing) + report.MissingStores = storeSliceFromSet(missing) } return report, nil } @@ -113,7 +113,7 @@ func applyReplicaUpdate( clock := hlc.NewClock(hlc.UnixNano, 0) report := PrepareReplicaReport{ RangeID: update.RangeID, - Replica: *update.NewReplica, + Replica: update.NewReplica, StartKey: update.StartKey.AsRKey(), } diff --git a/pkg/kv/kvserver/loqrecovery/loqrecoverypb/recovery.go b/pkg/kv/kvserver/loqrecovery/loqrecoverypb/recovery.go index e1e12cc1072a..872f81b412a7 100644 --- a/pkg/kv/kvserver/loqrecovery/loqrecoverypb/recovery.go +++ b/pkg/kv/kvserver/loqrecovery/loqrecoverypb/recovery.go @@ -60,3 +60,15 @@ func (m ReplicaUpdate) NodeID() roachpb.NodeID { func (m ReplicaUpdate) StoreID() roachpb.StoreID { return m.NewReplica.StoreID } + +// Replica gets replica for the store where this info and range +// descriptor were collected. Returns err if it can't find replica +// descriptor for the store it originated from. +func (m *ReplicaInfo) Replica() (roachpb.ReplicaDescriptor, error) { + if d, ok := m.Desc.GetReplicaDescriptor(m.StoreID); ok { + return d, nil + } + return roachpb.ReplicaDescriptor{}, errors.Errorf( + "invalid replica info: its own store s%d is not present in descriptor replicas %s", + m.StoreID, m.Desc) +} diff --git a/pkg/kv/kvserver/loqrecovery/loqrecoverypb/recovery.proto b/pkg/kv/kvserver/loqrecovery/loqrecoverypb/recovery.proto index 4b9484d2467c..3640410692eb 100644 --- a/pkg/kv/kvserver/loqrecovery/loqrecoverypb/recovery.proto +++ b/pkg/kv/kvserver/loqrecovery/loqrecoverypb/recovery.proto @@ -50,7 +50,8 @@ message ReplicaUpdate { int32 old_replica_id = 3 [(gogoproto.customname) = "OldReplicaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.ReplicaID", (gogoproto.moretags) = "yaml:\"OldReplicaID\""]; - roachpb.ReplicaDescriptor new_replica = 4 [(gogoproto.moretags) = "yaml:\"NewReplica\""]; + roachpb.ReplicaDescriptor new_replica = 4 [(gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"NewReplica\""]; int32 next_replica_id = 5 [(gogoproto.customname) = "NextReplicaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/roachpb.ReplicaID", (gogoproto.moretags) = "yaml:\"NextReplicaID\""]; diff --git a/pkg/kv/kvserver/loqrecovery/plan.go b/pkg/kv/kvserver/loqrecovery/plan.go index 61090a840057..45f8c86e6828 100644 --- a/pkg/kv/kvserver/loqrecovery/plan.go +++ b/pkg/kv/kvserver/loqrecovery/plan.go @@ -12,6 +12,7 @@ package loqrecovery import ( "context" + "sort" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery/loqrecoverypb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -20,8 +21,8 @@ import ( ) // nextReplicaIDIncrement defines how much forward we want to advance an ID of -// the designated winning replica to avoid any potential replicaID conflicts in -// case we've picked not the most up-to-date replica. +// the designated surviving replica to avoid any potential replicaID conflicts +// in case we've picked not the most up-to-date replica. const nextReplicaIDIncrement = 10 // updatedLocationsMap is tracking which stores we plan to update with a plan. @@ -43,58 +44,62 @@ func (m updatedLocationsMap) add(node roachpb.NodeID, store roachpb.StoreID) { func (m updatedLocationsMap) asMapOfSlices() map[roachpb.NodeID][]roachpb.StoreID { newMap := make(map[roachpb.NodeID][]roachpb.StoreID) for k, v := range m { - newMap[k] = storeListFromSet(v) + newMap[k] = storeSliceFromSet(v) } return newMap } -// PlanningReport provides aggregate stats and details of replica updates that is used -// for user confirmation. +// PlanningReport provides aggregate stats and details of replica updates that +// is used for user confirmation. type PlanningReport struct { - // TotalReplicas is the number of replicas that were found on nodes present in the - // cluster + // TotalReplicas is the number of replicas that were found on nodes present + // in the cluster TotalReplicas int - // DiscardedNonSurvivors is the number of replicas from ranges that lost quorum that + // DiscardedNonSurvivors is the number of replicas from ranges that lost + // quorum that // we decided not to use according to selection criteria used by planner. DiscardedNonSurvivors int // TODO(oleg): track total analyzed range count in subsequent version - // PresentStores is deduced list of stores that we collected replica info from. This - // set is filled from analyzed descriptors and may not strictly match stores on which - // collection was run if some stores are empty. + // PresentStores is deduced list of stores that we collected replica info + // from. This set is filled from analyzed descriptors and may not strictly + // match stores on which collection was run if some stores are empty. PresentStores []roachpb.StoreID - // MissingStores is a deduced list of stores that were found in replica descriptors - // but were not found in range descriptors e.g. collection was not run on those stores - // because they are dead or because of human error. + // MissingStores is a deduced list of stores that were found in replica + // descriptors but were not found in range descriptors e.g. collection was not + // run on those stores because they are dead or because of human error. MissingStores []roachpb.StoreID - // PlannedUpdates contains detailed update info about each planned update. This - // information is presented to user for action confirmation and can contain details - // that are not needed for actual plan application. + // PlannedUpdates contains detailed update info about each planned update. + // This information is presented to user for action confirmation and can + // contain details that are not needed for actual plan application. PlannedUpdates []ReplicaUpdateReport // UpdatedNodes contains information about nodes with their stores where plan // needs to be applied. Stores are sorted in ascending order. UpdatedNodes map[roachpb.NodeID][]roachpb.StoreID } -// ReplicaUpdateReport contains detailed info about changes planned for particular replica -// that was chosen as a designated survivor for the range. -// This information is more detailed than update plan and collected for reporting purposes. -// While information in update plan is meant for loqrecovery components, Report is meant for -// cli interaction to keep user informed of changes. +// ReplicaUpdateReport contains detailed info about changes planned for +// particular replica that was chosen as a designated survivor for the range. +// This information is more detailed than update plan and collected for +// reporting purposes. +// While information in update plan is meant for loqrecovery components, Report +// is meant for cli interaction to keep user informed of changes. type ReplicaUpdateReport struct { - RangeID roachpb.RangeID - StartKey roachpb.RKey - Replica roachpb.ReplicaDescriptor - OldReplica roachpb.ReplicaDescriptor - StoreID roachpb.StoreID - DiscardedReplicas roachpb.ReplicaSet + RangeID roachpb.RangeID + StartKey roachpb.RKey + Replica roachpb.ReplicaDescriptor + OldReplica roachpb.ReplicaDescriptor + StoreID roachpb.StoreID + DiscardedAvailableReplicas roachpb.ReplicaSet + DiscardedDeadReplicas roachpb.ReplicaSet } -// PlanReplicas analyzes captured replica information to determine which replicas could serve -// as dedicated survivors in ranges where quorum was lost. -// Devised plan doesn't guarantee data consistency after the recovery, only the fact that ranges -// could progress and subsequently perform up-replication. +// PlanReplicas analyzes captured replica information to determine which +// replicas could serve as dedicated survivors in ranges where quorum was +// lost. +// Devised plan doesn't guarantee data consistency after the recovery, only +// the fact that ranges could progress and subsequently perform up-replication. func PlanReplicas( ctx context.Context, nodes []loqrecoverypb.NodeReplicaInfo, deadStores []roachpb.StoreID, ) (loqrecoverypb.ReplicaUpdatePlan, PlanningReport, error) { @@ -108,130 +113,53 @@ func PlanReplicas( if err != nil { return loqrecoverypb.ReplicaUpdatePlan{}, PlanningReport{}, err } - report.PresentStores = storeListFromSet(availableStoreIDs) - report.MissingStores = storeListFromSet(missingStores) + report.PresentStores = storeSliceFromSet(availableStoreIDs) + report.MissingStores = storeSliceFromSet(missingStores) - var plan []loqrecoverypb.ReplicaUpdate - // Find ranges that lost quorum and create updates for them. - // This approach is only using local info stored in each replica independently. - // It is inherited in unchanged form from existing recovery operation - // `debug unsafe-remove-dead-replicas`. - // TODO(oleg): #73662 Use additional field and information about all replicas of - // range to determine winner. - for _, rangeDesc := range replicas { - report.TotalReplicas++ - numDeadPeers := 0 - desc := rangeDesc.Desc - allReplicas := desc.Replicas().Descriptors() - maxLiveVoter := roachpb.StoreID(-1) - var winningReplica roachpb.ReplicaDescriptor - for _, rep := range allReplicas { - if _, ok := availableStoreIDs[rep.StoreID]; !ok { - numDeadPeers++ - continue - } - // The designated survivor will be the voter with the highest storeID. - // Note that an outgoing voter cannot be designated, as the only - // replication change it could make is to turn itself into a learner, at - // which point the range is completely messed up. - // - // Note: a better heuristic might be to choose the leaseholder store, not - // the largest store, as this avoids the problem of requests still hanging - // after running the tool in a rolling-restart fashion (when the lease- - // holder is under a valid epoch and was ont chosen as designated - // survivor). However, this choice is less deterministic, as leaseholders - // are more likely to change than replication configs. The hanging would - // independently be fixed by the below issue, so staying with largest store - // is likely the right choice. See: - // - // https://github.com/cockroachdb/cockroach/issues/33007 - if rep.IsVoterNewConfig() && rep.StoreID > maxLiveVoter { - maxLiveVoter = rep.StoreID - winningReplica = rep - } - } - - // If there's no dead peer in this group (so can't hope to fix - // anything by rewriting the descriptor) or the current store is not the - // one we want to turn into the sole voter, don't do anything. - if numDeadPeers == 0 { - continue - } - - // The replica thinks it can make progress anyway, so we leave it alone. - if desc.Replicas().CanMakeProgress(func(rep roachpb.ReplicaDescriptor) bool { - _, ok := availableStoreIDs[rep.StoreID] - return ok - }) { - log.Infof(ctx, "Replica has not lost quorum, skipping: %s", desc) - continue - } - - if rangeDesc.StoreID != maxLiveVoter { - log.Infof(ctx, "Not designated survivor, skipping: %s", desc) - report.DiscardedNonSurvivors++ - continue - } + replicasByRangeID := groupReplicasByRangeID(replicas) + // proposedSurvivors contain decisions for all ranges in keyspace. it + // contains ranges that lost quorum as well as the ones that didn't. + var proposedSurvivors []rankedReplicas + for _, rangeReplicas := range replicasByRangeID { + proposedSurvivors = append(proposedSurvivors, rankReplicasBySurvivability(rangeReplicas)) + } + if err = checkKeyspaceCovering(proposedSurvivors); err != nil { + return loqrecoverypb.ReplicaUpdatePlan{}, PlanningReport{}, err + } - // We're the designated survivor and the range needs to be recovered. - // - // Rewrite the range as having a single replica. The winning replica is - // picked arbitrarily: the one with the highest store ID. This is not always - // the best option: it may lose writes that were committed on another - // surviving replica that had applied more of the raft log. However, in - // practice when we have multiple surviving replicas but still need this - // tool (because the replication factor was 4 or higher), we see that the - // logs are nearly always in sync and the choice doesn't matter. Correctly - // picking the replica with the longer log would complicate the use of this - // tool. - // Rewrite the replicas list. Bump the replica ID so that in case there are - // other surviving nodes that were members of the old incarnation of the - // range, they no longer recognize this revived replica (because they are - // not in sync with it). - update := loqrecoverypb.ReplicaUpdate{ - RangeID: rangeDesc.Desc.RangeID, - StartKey: loqrecoverypb.RecoveryKey(desc.StartKey), - OldReplicaID: winningReplica.ReplicaID, - NewReplica: &roachpb.ReplicaDescriptor{ - NodeID: rangeDesc.NodeID, - StoreID: rangeDesc.StoreID, - ReplicaID: desc.NextReplicaID + nextReplicaIDIncrement, - }, - NextReplicaID: desc.NextReplicaID + nextReplicaIDIncrement + 1, + var plan []loqrecoverypb.ReplicaUpdate + for _, p := range proposedSurvivors { + report.TotalReplicas += len(p) + u, ok := makeReplicaUpdateIfNeeded(ctx, p, availableStoreIDs) + if ok { + plan = append(plan, u) + report.DiscardedNonSurvivors += len(p) - 1 + report.PlannedUpdates = append(report.PlannedUpdates, makeReplicaUpdateReport(ctx, p, u)) + updatedLocations.add(u.NodeID(), u.StoreID()) + log.Infof(ctx, "replica has lost quorum, recovering: %s -> %s", p.survivor().Desc, u) + } else { + log.Infof(ctx, "range r%d didn't lose quorum", p.rangeID()) } - log.Infof(ctx, "Replica has lost quorum, recovering: %s -> %s", desc, update) - plan = append(plan, update) - - discarded := desc.Replicas().DeepCopy() - discarded.RemoveReplica(rangeDesc.NodeID, rangeDesc.StoreID) - report.PlannedUpdates = append(report.PlannedUpdates, ReplicaUpdateReport{ - RangeID: desc.RangeID, - StartKey: desc.StartKey, - Replica: *update.NewReplica, - OldReplica: winningReplica, - StoreID: rangeDesc.StoreID, - DiscardedReplicas: discarded, - }) - updatedLocations.add(rangeDesc.NodeID, rangeDesc.StoreID) } report.UpdatedNodes = updatedLocations.asMapOfSlices() return loqrecoverypb.ReplicaUpdatePlan{Updates: plan}, report, nil } -// validateReplicaSets evaluates provided set of replicas and an optional deadStoreIDs -// request and produces consistency info containing: -// availableStores - all storeIDs for which info was collected, i.e. (barring operator -// error) the conclusive list of all remaining stores in the cluster. -// missingStores - all dead stores (stores that are referenced by replicas, but not -// present in any of descriptors) -// If inconsistency is found e.g. no info was provided for a store but it is not present -// in explicit deadStoreIDs list, error is returned. +// validateReplicaSets evaluates provided set of replicas and an optional +// deadStoreIDs request and produces consistency info containing: +// availableStores - all storeIDs for which info was collected, i.e. +// (barring operator error) the conclusive list of all +// remaining stores in the cluster. +// missingStores - all dead stores (stores that are referenced by replicas, +// but not present in any of descriptors) +// If inconsistency is found e.g. no info was provided for a store but it is +// not present in explicit deadStoreIDs list, error is returned. func validateReplicaSets( replicas []loqrecoverypb.ReplicaInfo, deadStores []roachpb.StoreID, ) (availableStoreIDs, missingStoreIDs storeIDSet, _ error) { - // Populate availableStoreIDs with all StoreIDs from which we collected info and, - // populate missingStoreIDs with all StoreIDs referenced in replica descriptors for - // which no information was collected. + // Populate availableStoreIDs with all StoreIDs from which we collected info + // and, populate missingStoreIDs with all StoreIDs referenced in replica + // descriptors for which no information was collected. availableStoreIDs = make(storeIDSet) missingStoreIDs = make(storeIDSet) for _, replicaDescriptor := range replicas { @@ -240,11 +168,11 @@ func validateReplicaSets( missingStoreIDs[replicaDesc.StoreID] = struct{}{} } } - // The difference between all referenced StoreIDs (missingStoreIDs) and the present - // StoreIDs (presentStoreIDs) should exactly equal the user-provided list of dead - // stores (deadStores), and the former must be a superset of the latter (since each - // descriptor found on a store references that store). Verify all of these conditions - // and error out if one of them does not hold. + // The difference between all referenced StoreIDs (missingStoreIDs) and the + // present StoreIDs (presentStoreIDs) should exactly equal the user-provided + // list of dead stores (deadStores), and the former must be a superset of the + // latter (since each descriptor found on a store references that store). + // Verify all of these conditions and error out if one of them does not hold. for id := range availableStoreIDs { delete(missingStoreIDs, id) } @@ -274,3 +202,289 @@ func validateReplicaSets( } return availableStoreIDs, missingStoreIDs, nil } + +func groupReplicasByRangeID( + descriptors []loqrecoverypb.ReplicaInfo, +) map[roachpb.RangeID][]loqrecoverypb.ReplicaInfo { + groupedRanges := make(map[roachpb.RangeID][]loqrecoverypb.ReplicaInfo) + for _, descriptor := range descriptors { + groupedRanges[descriptor.Desc.RangeID] = append( + groupedRanges[descriptor.Desc.RangeID], descriptor) + } + return groupedRanges +} + +// rankedReplicas contains replica resolution details e.g. preferred replica as +// well as extra info to produce a report of the planned action. +type rankedReplicas []loqrecoverypb.ReplicaInfo + +func (p rankedReplicas) startKey() roachpb.RKey { + return p[0].Desc.StartKey +} + +func (p rankedReplicas) endKey() roachpb.RKey { + return p[0].Desc.EndKey +} + +func (p rankedReplicas) span() roachpb.Span { + return roachpb.Span{Key: roachpb.Key(p[0].Desc.StartKey), EndKey: roachpb.Key(p[0].Desc.EndKey)} +} + +func (p rankedReplicas) rangeID() roachpb.RangeID { + return p[0].Desc.RangeID +} + +func (p rankedReplicas) nodeID() roachpb.NodeID { + return p[0].NodeID +} + +func (p rankedReplicas) storeID() roachpb.StoreID { + return p[0].StoreID +} + +func (p rankedReplicas) survivor() *loqrecoverypb.ReplicaInfo { + return &p[0] +} + +// rankReplicasBySurvivability given a slice of replicas for the range from +// all live stores, pick one to survive recovery. if progress can be made, +// still pick one replica so that it could be used to do key covering +// validation. +// Note that replicas argument would be sorted in process of picking a +// survivor +func rankReplicasBySurvivability(replicas []loqrecoverypb.ReplicaInfo) rankedReplicas { + isVoter := func(desc loqrecoverypb.ReplicaInfo) int { + for _, replica := range desc.Desc.InternalReplicas { + if replica.StoreID == desc.StoreID { + if replica.IsVoterNewConfig() { + return 1 + } + return 0 + } + } + // This is suspicious, our descriptor is not in replicas. Panic maybe? + return 0 + } + sort.Slice(replicas, func(i, j int) bool { + // When finding the best suitable replica evaluate 3 conditions in order: + // - replica is a voter + // - replica has the higher range committed index + // - replica has the higher store id + // + // Note: that an outgoing voter cannot be designated, as the only + // replication change it could make is to turn itself into a learner, at + // which point the range is completely messed up. + // + // Note: a better heuristic might be to choose the leaseholder store, not + // the largest store, as this avoids the problem of requests still hanging + // after running the tool in a rolling-restart fashion (when the lease- + // holder is under a valid epoch and was ont chosen as designated + // survivor). However, this choice is less deterministic, as leaseholders + // are more likely to change than replication configs. The hanging would + // independently be fixed by the below issue, so staying with largest store + // is likely the right choice. See: + // + // https://github.com/cockroachdb/cockroach/issues/33007 + voterI := isVoter(replicas[i]) + voterJ := isVoter(replicas[j]) + if voterI > voterJ { + return true + } + if voterI < voterJ { + return false + } + if replicas[i].RaftAppliedIndex > replicas[j].RaftAppliedIndex { + return true + } + if replicas[i].RaftAppliedIndex < replicas[j].RaftAppliedIndex { + return false + } + return replicas[i].StoreID > replicas[j].StoreID + }) + return replicas +} + +// checkKeyspaceCovering given slice of all survivor ranges, checks that full +// keyspace is covered. +// Note that slice would be sorted in process of the check. +func checkKeyspaceCovering(replicas []rankedReplicas) error { + sort.Slice(replicas, func(i, j int) bool { + // We only need to sort replicas in key order to detect + // key collisions or gaps, but if we have matching keys + // sort becomes unstable which makes it produce different + // errors on different runs on the same data. To address + // that, we also add RangeID as a sorting criteria as a + // second level key to add stability. + if replicas[i].startKey().Less(replicas[j].startKey()) { + return true + } + if replicas[i].startKey().Equal(replicas[j].startKey()) { + return replicas[i].rangeID() < replicas[j].rangeID() + } + return false + }) + var anomalies []keyspaceCoverageAnomaly + prevDesc := rankedReplicas{{Desc: roachpb.RangeDescriptor{}}} + // We validate that first range starts at min key, last range ends at max key + // and that for every range start key is equal to end key of previous range. + // If any of those conditions fail, we record this as anomaly to indicate + // there's a gap between ranges or an overlap between two or more ranges. + for _, rankedDescriptors := range replicas { + // We need to take special care of the case where the survivor replica is + // outgoing voter. It cannot be designated, as the only replication change + // it could make is to turn itself into a learner, at which point the range + // is completely messed up. If it is not a stale replica of some sorts, + // then that would be a gap in keyspace coverage. + r, err := rankedDescriptors.survivor().Replica() + if err != nil { + return err + } + if !r.IsVoterNewConfig() { + continue + } + switch { + case rankedDescriptors.startKey().Less(prevDesc.endKey()): + start := keyMax(rankedDescriptors.startKey(), prevDesc.startKey()) + end := keyMin(rankedDescriptors.endKey(), prevDesc.endKey()) + anomalies = append(anomalies, keyspaceCoverageAnomaly{ + span: roachpb.Span{Key: roachpb.Key(start), EndKey: roachpb.Key(end)}, + overlap: true, + range1: prevDesc.rangeID(), + range1Span: prevDesc.span(), + range2: rankedDescriptors.rangeID(), + range2Span: rankedDescriptors.span(), + }) + case prevDesc.endKey().Less(rankedDescriptors.startKey()): + anomalies = append(anomalies, keyspaceCoverageAnomaly{ + span: roachpb.Span{ + Key: roachpb.Key(prevDesc.endKey()), + EndKey: roachpb.Key(rankedDescriptors.startKey()), + }, + overlap: false, + range1: prevDesc.rangeID(), + range1Span: prevDesc.span(), + range2: rankedDescriptors.rangeID(), + range2Span: rankedDescriptors.span(), + }) + } + // We want to advance previous range details only when new range will + // advance upper bound. This is not always the case as theoretically ranges + // could be "nested" or range could be an earlier version encompassing LHS + // and RHS parts. + if prevDesc.endKey().Less(rankedDescriptors.endKey()) { + prevDesc = rankedDescriptors + } + } + if !prevDesc.endKey().Equal(roachpb.RKeyMax) { + anomalies = append(anomalies, keyspaceCoverageAnomaly{ + span: roachpb.Span{Key: roachpb.Key(prevDesc.endKey()), EndKey: roachpb.KeyMax}, + overlap: false, + range1: prevDesc.rangeID(), + range1Span: prevDesc.span(), + range2: roachpb.RangeID(0), + range2Span: roachpb.Span{Key: roachpb.KeyMax, EndKey: roachpb.KeyMax}, + }) + } + + if len(anomalies) > 0 { + return &KeyspaceCoverageError{anomalies: anomalies} + } + return nil +} + +// makeReplicaUpdateIfNeeded if candidate range can't make progress, create an +// update using preferred replica. +// Returns a replica update and a flag indicating if update needs to be +// performed. +// For replicas that can make progress return empty update and false to exclude +// range from update plan. +func makeReplicaUpdateIfNeeded( + ctx context.Context, p rankedReplicas, liveStoreIDs storeIDSet, +) (loqrecoverypb.ReplicaUpdate, bool) { + if p.survivor().Desc.Replicas().CanMakeProgress(func(rep roachpb.ReplicaDescriptor) bool { + _, ok := liveStoreIDs[rep.StoreID] + return ok + }) { + return loqrecoverypb.ReplicaUpdate{}, false + } + + // We want to have replicaID which is greater or equal nextReplicaID across + // all available replicas. We'll use that as a base to bump it by arbitrary + // number to avoid potential conflicts with other replicas applying + // uncommitted raft log with descriptor updates. + nextReplicaID := p.survivor().Desc.NextReplicaID + for _, r := range p[1:] { + if r.Desc.NextReplicaID > nextReplicaID { + nextReplicaID = r.Desc.NextReplicaID + } + } + + replica, err := p.survivor().Replica() + if err != nil { + // We don't expect invalid replicas reaching this stage because we will err + // out on earlier stages. This is covered by invalid input tests and if we + // ended up here that means tests are not run, or code changed sufficiently + // and both checks and tests were lost. + log.Fatalf(ctx, "unexpected invalid replica info while making recovery plan, "+ + "we should never have unvalidated descriptors at planning stage, they must be detected "+ + "while performing keyspace coverage check: %s", err) + } + + // The range needs to be recovered and this replica is a designated survivor. + // To recover the range rewrite it as having a single replica: + // - Rewrite the replicas list. + // - Bump the replica ID so that in case there are other surviving nodes that + // were members of the old incarnation of the range, they no longer + // recognize this revived replica (because they are not in sync with it). + return loqrecoverypb.ReplicaUpdate{ + RangeID: p.rangeID(), + StartKey: loqrecoverypb.RecoveryKey(p.startKey()), + OldReplicaID: replica.ReplicaID, + NewReplica: roachpb.ReplicaDescriptor{ + NodeID: p.nodeID(), + StoreID: p.storeID(), + ReplicaID: nextReplicaID + nextReplicaIDIncrement, + }, + NextReplicaID: nextReplicaID + nextReplicaIDIncrement + 1, + }, true +} + +// makeReplicaUpdateReport creates a detailed report of changes that needs to +// be performed on range. It uses decision as well as information about all +// replicas of range to provide information about what is being discarded and +// how new replica would be configured. +func makeReplicaUpdateReport( + ctx context.Context, p rankedReplicas, update loqrecoverypb.ReplicaUpdate, +) ReplicaUpdateReport { + oldReplica, err := p.survivor().Replica() + if err != nil { + // We don't expect invalid replicas reaching this stage because we will err + // out on earlier stages. This is covered by invalid input tests and if we + // ended up here that means tests are not run, or code changed sufficiently + // and both checks and tests were lost. + log.Fatalf(ctx, "unexpected invalid replica info while making recovery plan: %s", err) + } + + // Replicas that belonged to unavailable nodes based on surviving range + // descriptor. + discardedDead := p.survivor().Desc.Replicas() + discardedDead.RemoveReplica(update.NodeID(), update.StoreID()) + // Replicas that we collected info about for the range, but decided they are + // not preferred choice. + discardedAvailable := roachpb.ReplicaSet{} + for _, replica := range p[1:] { + discardedDead.RemoveReplica(replica.NodeID, replica.StoreID) + r, _ := replica.Desc.GetReplicaDescriptor(replica.StoreID) + discardedAvailable.AddReplica(r) + } + + return ReplicaUpdateReport{ + RangeID: p.rangeID(), + StartKey: p.startKey(), + OldReplica: oldReplica, + Replica: update.NewReplica, + StoreID: p.storeID(), + DiscardedDeadReplicas: discardedDead, + DiscardedAvailableReplicas: discardedAvailable, + } +} diff --git a/pkg/kv/kvserver/loqrecovery/recovery_env_test.go b/pkg/kv/kvserver/loqrecovery/recovery_env_test.go index f31d13a05661..9508724727f1 100644 --- a/pkg/kv/kvserver/loqrecovery/recovery_env_test.go +++ b/pkg/kv/kvserver/loqrecovery/recovery_env_test.go @@ -12,8 +12,10 @@ package loqrecovery import ( "context" + "fmt" "sort" "strconv" + "strings" "testing" "time" @@ -29,12 +31,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/keysutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/datadriven" + "github.com/cockroachdb/errors" "go.etcd.io/etcd/raft/v3/raftpb" "gopkg.in/yaml.v2" ) -// Range info used for test data to avoid providing unnecessary fields that are not used in -// replica removal. +// Range info used for test data to avoid providing unnecessary fields that are +// not used in replica removal. type testReplicaInfo struct { // Replica location. NodeID roachpb.NodeID `yaml:"NodeID"` @@ -129,7 +132,14 @@ func (e *quorumRecoveryEnv) Handle(t *testing.T, d datadriven.TestData) string { t.Fatalf("%s: unknown command %s", d.Pos, d.Cmd) } if err != nil { - return err.Error() + // This is a special case of error. Coverage errors provide properly + // formatted report as a separate function to better separate processing + // from presentation. + details := errors.GetAllDetails(err) + if len(details) > 0 { + return fmt.Sprintf("ERROR: %s", strings.Join(details, "\n")) + } + return fmt.Sprintf("ERROR: %s", err.Error()) } if len(out) > 0 { return out @@ -160,7 +170,8 @@ func (e *quorumRecoveryEnv) handleReplicationData(t *testing.T, d datadriven.Tes key, desc, replicaState, hardState := buildReplicaDescriptorFromTestData(t, replica) eng := e.getOrCreateStore(ctx, t, replica.StoreID, replica.NodeID) - if err = storage.MVCCPutProto(ctx, eng, nil, key, clock.Now(), nil /* txn */, &desc); err != nil { + if err = storage.MVCCPutProto(ctx, eng, nil, key, clock.Now(), nil, /* txn */ + &desc); err != nil { t.Fatalf("failed to write range descriptor into store: %v", err) } @@ -209,14 +220,10 @@ func buildReplicaDescriptorFromTestData( DeprecatedGenerationComparable: nil, StickyBit: nil, } - localReplica, ok := desc.GetReplicaDescriptor(replica.StoreID) - if !ok { - t.Fatalf("invalid test data descriptor on replica doesn't contain itself") - } lease := roachpb.Lease{ Start: clock.Now().Add(5*time.Minute.Nanoseconds(), 0).UnsafeToClockTimestamp(), Expiration: nil, - Replica: localReplica, + Replica: desc.InternalReplicas[0], ProposedTS: nil, Epoch: 0, Sequence: 0, @@ -284,7 +291,8 @@ func (e *quorumRecoveryEnv) getOrCreateStore( StoreID: storeID, } if err = storage.MVCCPutProto( - context.Background(), eng, nil, keys.StoreIdentKey(), hlc.Timestamp{}, nil, &sIdent); err != nil { + context.Background(), eng, nil, keys.StoreIdentKey(), hlc.Timestamp{}, nil, + &sIdent); err != nil { t.Fatalf("failed to populate test store ident: %v", err) } wrapped.engine = eng @@ -316,9 +324,10 @@ func (e *quorumRecoveryEnv) groupStoresByNode( t *testing.T, storeIDs []roachpb.StoreID, ) map[roachpb.NodeID][]storage.Engine { nodes := make(map[roachpb.NodeID][]storage.Engine) - iterateSelectedStores(t, storeIDs, e.stores, func(store storage.Engine, nodeID roachpb.NodeID, storeID roachpb.StoreID) { - nodes[nodeID] = append(nodes[nodeID], store) - }) + iterateSelectedStores(t, storeIDs, e.stores, + func(store storage.Engine, nodeID roachpb.NodeID, storeID roachpb.StoreID) { + nodes[nodeID] = append(nodes[nodeID], store) + }) return nodes } @@ -326,14 +335,15 @@ func (e *quorumRecoveryEnv) groupStoresByNodeStore( t *testing.T, storeIDs []roachpb.StoreID, ) map[roachpb.NodeID]map[roachpb.StoreID]storage.Batch { nodes := make(map[roachpb.NodeID]map[roachpb.StoreID]storage.Batch) - iterateSelectedStores(t, storeIDs, e.stores, func(store storage.Engine, nodeID roachpb.NodeID, storeID roachpb.StoreID) { - nodeStores, ok := nodes[nodeID] - if !ok { - nodeStores = make(map[roachpb.StoreID]storage.Batch) - nodes[nodeID] = nodeStores - } - nodeStores[storeID] = store.NewBatch() - }) + iterateSelectedStores(t, storeIDs, e.stores, + func(store storage.Engine, nodeID roachpb.NodeID, storeID roachpb.StoreID) { + nodeStores, ok := nodes[nodeID] + if !ok { + nodeStores = make(map[roachpb.StoreID]storage.Batch) + nodes[nodeID] = nodeStores + } + nodeStores[storeID] = store.NewBatch() + }) return nodes } @@ -390,10 +400,11 @@ func (e *quorumRecoveryEnv) handleDumpStore(t *testing.T, d datadriven.TestData) for _, storeID := range stores { var descriptorViews []storeDescriptorView store := e.stores[storeID] - err := kvserver.IterateRangeDescriptorsFromDisk(ctx, store.engine, func(desc roachpb.RangeDescriptor) error { - descriptorViews = append(descriptorViews, descriptorView(desc)) - return nil - }) + err := kvserver.IterateRangeDescriptorsFromDisk(ctx, store.engine, + func(desc roachpb.RangeDescriptor) error { + descriptorViews = append(descriptorViews, descriptorView(desc)) + return nil + }) if err != nil { t.Fatalf("failed to make a dump of store replica data: %v", err) } diff --git a/pkg/kv/kvserver/loqrecovery/testdata/invalid_input b/pkg/kv/kvserver/loqrecovery/testdata/invalid_input new file mode 100644 index 000000000000..cffbf0841e47 --- /dev/null +++ b/pkg/kv/kvserver/loqrecovery/testdata/invalid_input @@ -0,0 +1,23 @@ +# Test verifies that if we have replica with incorrect descriptor that doesn't contain its own store replica, +# we detect that and don't produce bad results or crash. +replication-data +- StoreID: 1 + RangeID: 1 + StartKey: /Min + EndKey: /Max + Replicas: # this replica is bad, it doesn't contain itself in the replicas set + - { NodeID: 2, StoreID: 2, ReplicaID: 2} + - { NodeID: 3, StoreID: 3, ReplicaID: 3} + - { NodeID: 4, StoreID: 4, ReplicaID: 1} + RangeAppliedIndex: 10 + RaftCommittedIndex: 13 +---- +ok + +collect-replica-info stores=(1) +---- +ok + +make-plan +---- +ERROR: invalid replica info: its own store s1 is not present in descriptor replicas r1:/M{in-ax} [(n2,s2):2, (n3,s3):3, (n4,s4):1, next=4, gen=3] diff --git a/pkg/kv/kvserver/loqrecovery/testdata/keyspace_coverage b/pkg/kv/kvserver/loqrecovery/testdata/keyspace_coverage new file mode 100644 index 000000000000..b75513b05cda --- /dev/null +++ b/pkg/kv/kvserver/loqrecovery/testdata/keyspace_coverage @@ -0,0 +1,194 @@ +# Tests verifying that gaps between range spans or overlaps of range spans block recovery. + + +# Check that ranges with perfectly matching spans are correctly detected as an overlap +# even if they are not adjacent with respect of range ids. +replication-data +- StoreID: 1 + RangeID: 1 + StartKey: /Min # first range for the [/Min-/Table/3) keyspan + EndKey: /Table/3 + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 2, StoreID: 2, ReplicaID: 2} + - { NodeID: 3, StoreID: 3, ReplicaID: 3} + RangeAppliedIndex: 10 + RaftCommittedIndex: 13 +- StoreID: 1 + RangeID: 2 + StartKey: /Table/3 + EndKey: /Max + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 2, StoreID: 2, ReplicaID: 2} + - { NodeID: 3, StoreID: 3, ReplicaID: 3} + RangeAppliedIndex: 10 + RaftCommittedIndex: 13 +- StoreID: 2 + RangeID: 3 + StartKey: /Min + EndKey: /Table/3 # second (conflicting) range for the [/Min-/Table/3) keyspan + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 2, StoreID: 2, ReplicaID: 2} + - { NodeID: 3, StoreID: 3, ReplicaID: 3} + RangeAppliedIndex: 20 + RaftCommittedIndex: 20 +---- +ok + +collect-replica-info stores=(1,2) +---- +ok + +make-plan +---- +ERROR: Key space covering is not complete. Discovered following inconsistencies: +range overlap /{Min-Table/3} + r1: /{Min-Table/3} + r3: /{Min-Table/3} + + +# Check range gap where range 2 is missing leaving a hole between ranges 1 and 3. +replication-data +- StoreID: 1 + RangeID: 1 + StartKey: /Min + EndKey: /Table/3 # first range ends short of the second one leaving a missing [Table/3, Table/4) + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 2, StoreID: 2, ReplicaID: 2} + - { NodeID: 3, StoreID: 3, ReplicaID: 3} + RangeAppliedIndex: 10 + RaftCommittedIndex: 13 +- StoreID: 1 + RangeID: 3 + StartKey: /Table/4 + EndKey: /Max + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 2, StoreID: 2, ReplicaID: 2} + - { NodeID: 3, StoreID: 3, ReplicaID: 3} + RangeAppliedIndex: 10 + RaftCommittedIndex: 13 +---- +ok + +collect-replica-info stores=(1) +---- +ok + +make-plan +---- +ERROR: Key space covering is not complete. Discovered following inconsistencies: +range gap /Table/{3-4} + r1: /{Min-Table/3} + r3: /{Table/4-Max} + +# Check range overlap with stale range in a way of newly split ones. +replication-data +- StoreID: 1 + RangeID: 1 + StartKey: /Min # range covers the full range which was split, but node has some stale data + EndKey: /Table/10 + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 4, StoreID: 4, ReplicaID: 2} + - { NodeID: 5, StoreID: 5, ReplicaID: 3} + RangeAppliedIndex: 10 + RaftCommittedIndex: 13 +- StoreID: 1 + RangeID: 10 + StartKey: /Table/10 + EndKey: /Max + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 4, StoreID: 4, ReplicaID: 2} + - { NodeID: 5, StoreID: 5, ReplicaID: 3} + RangeAppliedIndex: 10 + RaftCommittedIndex: 13 +- StoreID: 2 + RangeID: 3 # newer range that covers part of it parent range + StartKey: /Table/1 + EndKey: /Table/3 + Replicas: + - { NodeID: 2, StoreID: 2, ReplicaID: 2} + - { NodeID: 6, StoreID: 6, ReplicaID: 3} + - { NodeID: 7, StoreID: 7, ReplicaID: 3} + RangeAppliedIndex: 10 + RaftCommittedIndex: 13 +- StoreID: 2 + RangeID: 4 + StartKey: /Table/3 + EndKey: /Table/10 # newer range that covers part of it parent range + Replicas: + - { NodeID: 2, StoreID: 2, ReplicaID: 2} + - { NodeID: 6, StoreID: 6, ReplicaID: 3} + - { NodeID: 7, StoreID: 7, ReplicaID: 3} + RangeAppliedIndex: 10 + RaftCommittedIndex: 13 +---- +ok + +collect-replica-info stores=(1,2) +---- +ok + +make-plan +---- +ERROR: Key space covering is not complete. Discovered following inconsistencies: +range overlap /Table/{1-3} + r1: /{Min-Table/10} + r3: /Table/{1-3} +range overlap /Table/{3-10} + r1: /{Min-Table/10} + r4: /Table/{3-10} + + +# Check that gaps at the start and end of keyspace are detected and reported correctly. +# For this we will create range that start long of min and short of max. +replication-data +- StoreID: 1 + RangeID: 1 + StartKey: /Table/1 # range starts in the middle of keyspace + EndKey: /Table/99 # and ends short of Max + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 4, StoreID: 4, ReplicaID: 2} + - { NodeID: 5, StoreID: 5, ReplicaID: 3} + RangeAppliedIndex: 10 + RaftCommittedIndex: 13 +---- +ok + +collect-replica-info stores=(1) +---- +ok + +make-plan +---- +ERROR: Key space covering is not complete. Discovered following inconsistencies: +range gap /{Min-Table/1} + r0: /Min + r1: /Table/{1-99} +range gap /{Table/99-Max} + r1: /Table/{1-99} + r0: /Max{-} + + +# Check that empty replica set will be correctly handled as a non covering keyset. +replication-data +---- +ok + +collect-replica-info +---- +ok + +make-plan +---- +ERROR: Key space covering is not complete. Discovered following inconsistencies: +range gap /M{in-ax} + r0: /Min + r0: /Max{-} + diff --git a/pkg/kv/kvserver/loqrecovery/testdata/learners_lose b/pkg/kv/kvserver/loqrecovery/testdata/learners_lose index 159984aadf54..1c35ab5089f7 100644 --- a/pkg/kv/kvserver/loqrecovery/testdata/learners_lose +++ b/pkg/kv/kvserver/loqrecovery/testdata/learners_lose @@ -1,8 +1,8 @@ # Tests verifying that learners don't become survivors. -# First use case where we can make a right decision +# First use case where we can make a right decision. # With two out of five replicas remaining, check that learner -# is not picked as a survivor even if all other criteria are met +# is not picked as a survivor even if all other criteria are met. # Note: for replica type codes, see metadata.proto replication-data @@ -24,7 +24,7 @@ replication-data EndKey: /Max Replicas: - { NodeID: 1, StoreID: 1, ReplicaID: 1} - - { NodeID: 2, StoreID: 2, ReplicaID: 2, ReplicaType: 1} # learner has highest storeID but must not win + - { NodeID: 2, StoreID: 2, ReplicaID: 2, ReplicaType: 1} # learner has highest storeID but must not win - { NodeID: 3, StoreID: 3, ReplicaID: 3} - { NodeID: 4, StoreID: 4, ReplicaID: 4} - { NodeID: 5, StoreID: 5, ReplicaID: 5} @@ -73,9 +73,8 @@ dump-store stores=(1,2) - Replica: {NodeID: 4, StoreID: 4, ReplicaID: 4} - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 5} -# Second use case where we can't make a decision and leave replica as is -# Only a single learner is left, there is no way to recover. - +# Second use case where we can't make a decision and fail keyspace coverage as +# only a single learner is left, there is no way to recover. replication-data - StoreID: 1 RangeID: 1 @@ -116,35 +115,7 @@ ok make-plan ---- -[] - -apply-plan stores=(1,2) ----- -ok - -dump-store stores=(1,2) ----- -- NodeID: 1 - StoreID: 1 - Descriptors: - - RangeID: 1 - StartKey: /Min - Replicas: - - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1, ReplicaType: 1} - - Replica: {NodeID: 3, StoreID: 3, ReplicaID: 2} - - Replica: {NodeID: 4, StoreID: 4, ReplicaID: 3} - - RangeID: 2 - StartKey: /Table/1 - Replicas: - - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} - - Replica: {NodeID: 2, StoreID: 2, ReplicaID: 2} - - Replica: {NodeID: 3, StoreID: 3, ReplicaID: 3} -- NodeID: 2 - StoreID: 2 - Descriptors: - - RangeID: 2 - StartKey: /Table/1 - Replicas: - - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} - - Replica: {NodeID: 2, StoreID: 2, ReplicaID: 2} - - Replica: {NodeID: 3, StoreID: 3, ReplicaID: 3} +ERROR: Key space covering is not complete. Discovered following inconsistencies: +range gap /{Min-Table/1} + r0: /Min + r2: /{Table/1-Max} diff --git a/pkg/kv/kvserver/loqrecovery/testdata/max_applied_voter_wins b/pkg/kv/kvserver/loqrecovery/testdata/max_applied_voter_wins new file mode 100644 index 000000000000..84b9d4899b4b --- /dev/null +++ b/pkg/kv/kvserver/loqrecovery/testdata/max_applied_voter_wins @@ -0,0 +1,221 @@ +# With two out of five replicas remaining, check that replica with highest +# range applied index is chosen regardless of replica storeID. +# We have a 5-way replication and have two out of five nodes left so quorum +# is lost. + +replication-data +- StoreID: 1 + RangeID: 1 + StartKey: /Min + EndKey: /Max + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 2, StoreID: 2, ReplicaID: 2} + - { NodeID: 3, StoreID: 3, ReplicaID: 3} # replicas 3-5 are located on unavailable stores + - { NodeID: 4, StoreID: 4, ReplicaID: 4} + - { NodeID: 5, StoreID: 5, ReplicaID: 5} + RangeAppliedIndex: 11 # this replica has higher applied index so is preferred over the other one + RaftCommittedIndex: 13 +- StoreID: 2 + RangeID: 1 + StartKey: /Min + EndKey: /Max + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 2, StoreID: 2, ReplicaID: 2} + - { NodeID: 3, StoreID: 3, ReplicaID: 3} # replicas 3-5 are located on unavailable stores + - { NodeID: 4, StoreID: 4, ReplicaID: 4} + - { NodeID: 5, StoreID: 5, ReplicaID: 5} + RangeAppliedIndex: 10 # applied index takes precedence over store ID so this replica loses + RaftCommittedIndex: 14 # committed index while higher, should not confuse planner and use applied index +---- +ok + +collect-replica-info stores=(1,2) +---- +ok + +make-plan +---- +- RangeID: 1 + StartKey: /Min + OldReplicaID: 1 + NewReplica: + NodeID: 1 + StoreID: 1 + ReplicaID: 16 + NextReplicaID: 17 + +apply-plan stores=(1,2) +---- +ok + +dump-store stores=(1,2) +---- +- NodeID: 1 + StoreID: 1 + Descriptors: + - RangeID: 1 + StartKey: /Min + Replicas: + - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 16} +- NodeID: 2 + StoreID: 2 + Descriptors: + - RangeID: 1 + StartKey: /Min + Replicas: + - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} + - Replica: {NodeID: 2, StoreID: 2, ReplicaID: 2} + - Replica: {NodeID: 3, StoreID: 3, ReplicaID: 3} + - Replica: {NodeID: 4, StoreID: 4, ReplicaID: 4} + - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 5} + + +# Second use case where stale replica which remained from before split +# on store with higher ID will conflict with later one spanning smaller range. +# We have a stale replica in s2 which still remembers group across s3 and s4 +# but they are not available anymore. While LHS and RHS across s1, s4, s5 are +# now more recent. Stale replica loses based on raft applied index being lower. +replication-data +- StoreID: 1 # this is the LHS replica post split + RangeID: 1 + StartKey: /Min + EndKey: /Table/1 + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 5, StoreID: 5, ReplicaID: 6} + - { NodeID: 6, StoreID: 6, ReplicaID: 7} + RangeAppliedIndex: 15 + RaftCommittedIndex: 15 +- StoreID: 1 # this is the RHS replica post split + RangeID: 2 + StartKey: /Table/1 + EndKey: /Max + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 5, StoreID: 5, ReplicaID: 6} + - { NodeID: 6, StoreID: 6, ReplicaID: 7} + RangeAppliedIndex: 15 + RaftCommittedIndex: 15 +- StoreID: 2 + RangeID: 1 # this is the old version of range which got stale + StartKey: /Min + EndKey: /Max + Replicas: + - { NodeID: 2, StoreID: 2, ReplicaID: 2} + - { NodeID: 3, StoreID: 3, ReplicaID: 3} + - { NodeID: 4, StoreID: 4, ReplicaID: 4} + RangeAppliedIndex: 10 + RaftCommittedIndex: 13 +- StoreID: 5 + RangeID: 1 # this is the LHS replica post split + StartKey: /Min + EndKey: /Table/1 + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 5, StoreID: 5, ReplicaID: 6} + - { NodeID: 6, StoreID: 6, ReplicaID: 7} + RangeAppliedIndex: 15 + RaftCommittedIndex: 15 +- StoreID: 5 + RangeID: 2 # this is the RHS replica post split + StartKey: /Table/1 + EndKey: /Max + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 5, StoreID: 5, ReplicaID: 6} + - { NodeID: 6, StoreID: 6, ReplicaID: 7} + RangeAppliedIndex: 15 + RaftCommittedIndex: 15 +- StoreID: 6 + RangeID: 1 # this is the LHS replica post split + StartKey: /Min + EndKey: /Table/1 + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 5, StoreID: 5, ReplicaID: 6} + - { NodeID: 6, StoreID: 6, ReplicaID: 7} + RangeAppliedIndex: 15 + RaftCommittedIndex: 15 +- StoreID: 6 + RangeID: 2 # this is the RHS replica post split + StartKey: /Table/1 + EndKey: /Max + Replicas: + - { NodeID: 1, StoreID: 1, ReplicaID: 1} + - { NodeID: 5, StoreID: 5, ReplicaID: 6} + - { NodeID: 6, StoreID: 6, ReplicaID: 7} + RangeAppliedIndex: 15 + RaftCommittedIndex: 15 +---- +ok + +collect-replica-info stores=(1,2,5,6) +---- +ok + +make-plan +---- +[] + +apply-plan stores=(1,2,5,6) +---- +ok + +dump-store stores=(1,2,5,6) +---- +- NodeID: 1 + StoreID: 1 + Descriptors: + - RangeID: 1 + StartKey: /Min + Replicas: + - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} + - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6} + - Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7} + - RangeID: 2 + StartKey: /Table/1 + Replicas: + - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} + - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6} + - Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7} +- NodeID: 2 + StoreID: 2 + Descriptors: + - RangeID: 1 + StartKey: /Min + Replicas: + - Replica: {NodeID: 2, StoreID: 2, ReplicaID: 2} + - Replica: {NodeID: 3, StoreID: 3, ReplicaID: 3} + - Replica: {NodeID: 4, StoreID: 4, ReplicaID: 4} +- NodeID: 5 + StoreID: 5 + Descriptors: + - RangeID: 1 + StartKey: /Min + Replicas: + - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} + - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6} + - Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7} + - RangeID: 2 + StartKey: /Table/1 + Replicas: + - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} + - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6} + - Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7} +- NodeID: 6 + StoreID: 6 + Descriptors: + - RangeID: 1 + StartKey: /Min + Replicas: + - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} + - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6} + - Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7} + - RangeID: 2 + StartKey: /Table/1 + Replicas: + - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} + - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6} + - Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7} diff --git a/pkg/kv/kvserver/loqrecovery/testdata/max_store_voter_wins b/pkg/kv/kvserver/loqrecovery/testdata/max_store_voter_wins index bd2b7b065cf2..d3ca49ad2bdd 100644 --- a/pkg/kv/kvserver/loqrecovery/testdata/max_store_voter_wins +++ b/pkg/kv/kvserver/loqrecovery/testdata/max_store_voter_wins @@ -1,9 +1,8 @@ -# Test verifying that voter with max StoreID would be designated survivor. +# Tests verifying that voter with max StoreID would be designated survivor. -# First use case where we can successfully resolve replica by store ID +# First use case where we can successfully resolve replica by store ID. # With two out of five replicas remaining, check that replica with highest # store ID is chosen as a survivor. -# Note: for replica type codes, see metadata.proto replication-data - StoreID: 1 @@ -11,7 +10,7 @@ replication-data StartKey: /Min EndKey: /Max Replicas: - - { NodeID: 1, StoreID: 1, ReplicaID: 1} # This replica is identical to one in store 2 but has lower storeID 1 + - { NodeID: 1, StoreID: 1, ReplicaID: 1} # this replica is identical to one in store 2 but has lower storeID 1 - { NodeID: 2, StoreID: 2, ReplicaID: 2} - { NodeID: 3, StoreID: 3, ReplicaID: 3} - { NodeID: 4, StoreID: 4, ReplicaID: 4} @@ -24,7 +23,7 @@ replication-data EndKey: /Max Replicas: - { NodeID: 1, StoreID: 1, ReplicaID: 1} - - { NodeID: 2, StoreID: 2, ReplicaID: 2} # This replica has the same state n1 but has higher storeID so it wins + - { NodeID: 2, StoreID: 2, ReplicaID: 2} # this replica has the same state n1 but has higher storeID so it wins - { NodeID: 3, StoreID: 3, ReplicaID: 3} - { NodeID: 4, StoreID: 4, ReplicaID: 4} - { NodeID: 5, StoreID: 5, ReplicaID: 5} @@ -72,157 +71,3 @@ dump-store stores=(1,2) StartKey: /Min Replicas: - Replica: {NodeID: 2, StoreID: 2, ReplicaID: 16} - -# Second use case where stale replica which remained from before split -# on store with higher ID will conflict with later one spanning smaller range. -# We have a stale replica in s2 which still remembers group across s3 and s4 -# which are not available anymore. While LHS and RHS across s1, s4, s5 are now -# more recent. But they can't win against old version as we don't analyze enough -# info. -replication-data -- StoreID: 1 # This is a LHS post split - RangeID: 1 - StartKey: /Min - EndKey: /Table/1 - Replicas: - - { NodeID: 1, StoreID: 1, ReplicaID: 1} - - { NodeID: 5, StoreID: 5, ReplicaID: 6} - - { NodeID: 6, StoreID: 6, ReplicaID: 7} - RangeAppliedIndex: 15 - RaftCommittedIndex: 15 -- StoreID: 1 # This is RHS replica post split - RangeID: 2 - StartKey: /Table/1 - EndKey: /Max - Replicas: - - { NodeID: 1, StoreID: 1, ReplicaID: 1} - - { NodeID: 5, StoreID: 5, ReplicaID: 6} - - { NodeID: 6, StoreID: 6, ReplicaID: 7} - RangeAppliedIndex: 15 - RaftCommittedIndex: 15 -- StoreID: 2 - RangeID: 1 # This is an old version of range which got lost - StartKey: /Min - EndKey: /Max - Replicas: - - { NodeID: 2, StoreID: 2, ReplicaID: 2} - - { NodeID: 3, StoreID: 3, ReplicaID: 3} - - { NodeID: 4, StoreID: 4, ReplicaID: 4} - RangeAppliedIndex: 10 - RaftCommittedIndex: 13 -- StoreID: 5 - RangeID: 1 # This is a LHS post split - StartKey: /Min - EndKey: /Table/1 - Replicas: - - { NodeID: 1, StoreID: 1, ReplicaID: 1} - - { NodeID: 5, StoreID: 5, ReplicaID: 6} - - { NodeID: 6, StoreID: 6, ReplicaID: 7} - RangeAppliedIndex: 15 - RaftCommittedIndex: 15 -- StoreID: 5 - RangeID: 2 # This is RHS replica post split - StartKey: /Table/1 - EndKey: /Max - Replicas: - - { NodeID: 1, StoreID: 1, ReplicaID: 1} - - { NodeID: 5, StoreID: 5, ReplicaID: 6} - - { NodeID: 6, StoreID: 6, ReplicaID: 7} - RangeAppliedIndex: 15 - RaftCommittedIndex: 15 -- StoreID: 6 - RangeID: 1 # This is a LHS post split - StartKey: /Min - EndKey: /Table/1 - Replicas: - - { NodeID: 1, StoreID: 1, ReplicaID: 1} - - { NodeID: 5, StoreID: 5, ReplicaID: 6} - - { NodeID: 6, StoreID: 6, ReplicaID: 7} - RangeAppliedIndex: 15 - RaftCommittedIndex: 15 -- StoreID: 6 - RangeID: 2 # This is RHS replica post split - StartKey: /Table/1 - EndKey: /Max - Replicas: - - { NodeID: 1, StoreID: 1, ReplicaID: 1} - - { NodeID: 5, StoreID: 5, ReplicaID: 6} - - { NodeID: 6, StoreID: 6, ReplicaID: 7} - RangeAppliedIndex: 15 - RaftCommittedIndex: 15 ----- -ok - -collect-replica-info stores=(1,2,5,6) ----- -ok - -make-plan ----- -- RangeID: 1 - StartKey: /Min - OldReplicaID: 2 - NewReplica: - NodeID: 2 - StoreID: 2 - ReplicaID: 15 - NextReplicaID: 16 - -apply-plan stores=(1,2,5,6) ----- -ok - -dump-store stores=(1,2,5,6) ----- -- NodeID: 1 - StoreID: 1 - Descriptors: - - RangeID: 1 - StartKey: /Min - Replicas: - - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} - - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6} - - Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7} - - RangeID: 2 - StartKey: /Table/1 - Replicas: - - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} - - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6} - - Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7} -- NodeID: 2 - StoreID: 2 - Descriptors: - - RangeID: 1 - StartKey: /Min - Replicas: - - Replica: {NodeID: 2, StoreID: 2, ReplicaID: 15} -- NodeID: 5 - StoreID: 5 - Descriptors: - - RangeID: 1 - StartKey: /Min - Replicas: - - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} - - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6} - - Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7} - - RangeID: 2 - StartKey: /Table/1 - Replicas: - - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} - - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6} - - Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7} -- NodeID: 6 - StoreID: 6 - Descriptors: - - RangeID: 1 - StartKey: /Min - Replicas: - - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} - - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6} - - Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7} - - RangeID: 2 - StartKey: /Table/1 - Replicas: - - Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1} - - Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6} - - Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7} diff --git a/pkg/kv/kvserver/loqrecovery/utils.go b/pkg/kv/kvserver/loqrecovery/utils.go index 47f449ff634d..ac9ca119b245 100644 --- a/pkg/kv/kvserver/loqrecovery/utils.go +++ b/pkg/kv/kvserver/loqrecovery/utils.go @@ -20,8 +20,8 @@ import ( type storeIDSet map[roachpb.StoreID]struct{} -// storeListFromSet unwraps map to a sorted list of StoreIDs. -func storeListFromSet(set storeIDSet) []roachpb.StoreID { +// storeSliceFromSet unwraps map to a sorted list of StoreIDs. +func storeSliceFromSet(set storeIDSet) []roachpb.StoreID { storeIDs := make([]roachpb.StoreID, 0, len(set)) for k := range set { storeIDs = append(storeIDs, k) @@ -35,8 +35,70 @@ func storeListFromSet(set storeIDSet) []roachpb.StoreID { // Make a string of stores 'set' in ascending order. func joinStoreIDs(storeIDs storeIDSet) string { storeNames := make([]string, 0, len(storeIDs)) - for _, id := range storeListFromSet(storeIDs) { + for _, id := range storeSliceFromSet(storeIDs) { storeNames = append(storeNames, fmt.Sprintf("s%d", id)) } return strings.Join(storeNames, ", ") } + +func keyMax(key1 roachpb.RKey, key2 roachpb.RKey) roachpb.RKey { + if key1.Less(key2) { + return key2 + } + return key1 +} + +func keyMin(key1 roachpb.RKey, key2 roachpb.RKey) roachpb.RKey { + if key2.Less(key1) { + return key2 + } + return key1 +} + +// keyspaceCoverageAnomaly records errors found when checking keyspace coverage. +// Anomaly is a key span where there's no coverage or there are more than one +// range that covers the span. +// Anomaly also contains additional information about ranges that either +// bordering the gap or overlap over the anomaly span. +type keyspaceCoverageAnomaly struct { + span roachpb.Span + overlap bool + + range1 roachpb.RangeID + range1Span roachpb.Span + + range2 roachpb.RangeID + range2Span roachpb.Span +} + +func (i keyspaceCoverageAnomaly) String() string { + if i.overlap { + return fmt.Sprintf("range overlap %v\n r%d: %v\n r%d: %v", + i.span, i.range1, i.range1Span, i.range2, i.range2Span) + } + return fmt.Sprintf("range gap %v\n r%d: %v\n r%d: %v", + i.span, i.range1, i.range1Span, i.range2, i.range2Span) +} + +// KeyspaceCoverageError is returned by replica planner when it detects problems +// with key coverage. Error contains all anomalies found. It also provides a +// convenience function to print report. +type KeyspaceCoverageError struct { + anomalies []keyspaceCoverageAnomaly +} + +func (e *KeyspaceCoverageError) Error() string { + return "keyspace coverage error" +} + +// ErrorDetail returns a properly formatted report that could be presented +// to user. +func (e *KeyspaceCoverageError) ErrorDetail() string { + descriptions := make([]string, 0, len(e.anomalies)) + for _, id := range e.anomalies { + descriptions = append(descriptions, fmt.Sprintf("%v", id)) + } + return fmt.Sprintf( + "Key space covering is not complete. Discovered following inconsistencies:\n%s\n", + strings.Join(descriptions, "\n")) +} diff --git a/pkg/kv/kvserver/mvcc_gc_queue_test.go b/pkg/kv/kvserver/mvcc_gc_queue_test.go index 6fba02ecc8a0..cebee5decc89 100644 --- a/pkg/kv/kvserver/mvcc_gc_queue_test.go +++ b/pkg/kv/kvserver/mvcc_gc_queue_test.go @@ -1001,7 +1001,7 @@ func TestMVCCGCQueueIntentResolution(t *testing.T) { } // Process through GC queue. - confReader, err := tc.store.GetConfReader() + confReader, err := tc.store.GetConfReader(ctx) if err != nil { t.Fatal(err) } @@ -1062,7 +1062,7 @@ func TestMVCCGCQueueLastProcessedTimestamps(t *testing.T) { } } - confReader, err := tc.store.GetConfReader() + confReader, err := tc.store.GetConfReader(ctx) if err != nil { t.Fatal(err) } @@ -1167,7 +1167,7 @@ func TestMVCCGCQueueChunkRequests(t *testing.T) { } // Forward the clock past the default GC time. - confReader, err := tc.store.GetConfReader() + confReader, err := tc.store.GetConfReader(ctx) if err != nil { t.Fatal(err) } diff --git a/pkg/kv/kvserver/queue.go b/pkg/kv/kvserver/queue.go index 0a9f7c737b1d..40034f86a987 100644 --- a/pkg/kv/kvserver/queue.go +++ b/pkg/kv/kvserver/queue.go @@ -611,7 +611,7 @@ func (bq *baseQueue) maybeAdd(ctx context.Context, repl replicaInQueue, now hlc. var confReader spanconfig.StoreReader if bq.needsSystemConfig { var err error - confReader, err = bq.store.GetConfReader() + confReader, err = bq.store.GetConfReader(ctx) if err != nil { if errors.Is(err, errSysCfgUnavailable) && log.V(1) { log.Warningf(ctx, "unable to retrieve system config, skipping: %v", err) @@ -901,7 +901,7 @@ func (bq *baseQueue) processReplica(ctx context.Context, repl replicaInQueue) er var confReader spanconfig.StoreReader if bq.needsSystemConfig { var err error - confReader, err = bq.store.GetConfReader() + confReader, err = bq.store.GetConfReader(ctx) if errors.Is(err, errSysCfgUnavailable) { if log.V(1) { log.Warningf(ctx, "unable to retrieve conf reader, skipping: %v", err) diff --git a/pkg/kv/kvserver/queue_helpers_testutil.go b/pkg/kv/kvserver/queue_helpers_testutil.go index ca232ddccfec..2f469cf95fc2 100644 --- a/pkg/kv/kvserver/queue_helpers_testutil.go +++ b/pkg/kv/kvserver/queue_helpers_testutil.go @@ -26,11 +26,11 @@ func (bq *baseQueue) testingAdd( return bq.addInternal(ctx, repl.Desc(), repl.ReplicaID(), priority) } -func forceScanAndProcess(s *Store, q *baseQueue) error { +func forceScanAndProcess(ctx context.Context, s *Store, q *baseQueue) error { // Check that the system config is available. It is needed by many queues. If // it's not available, some queues silently fail to process any replicas, // which is undesirable for this method. - if _, err := s.GetConfReader(); err != nil { + if _, err := s.GetConfReader(ctx); err != nil { return errors.Wrap(err, "unable to retrieve conf reader") } @@ -44,7 +44,7 @@ func forceScanAndProcess(s *Store, q *baseQueue) error { } func mustForceScanAndProcess(ctx context.Context, s *Store, q *baseQueue) { - if err := forceScanAndProcess(s, q); err != nil { + if err := forceScanAndProcess(ctx, s, q); err != nil { log.Fatalf(ctx, "%v", err) } } @@ -52,7 +52,7 @@ func mustForceScanAndProcess(ctx context.Context, s *Store, q *baseQueue) { // ForceReplicationScanAndProcess iterates over all ranges and // enqueues any that need to be replicated. func (s *Store) ForceReplicationScanAndProcess() error { - return forceScanAndProcess(s, s.replicateQueue.baseQueue) + return forceScanAndProcess(context.TODO(), s, s.replicateQueue.baseQueue) } // MustForceReplicaGCScanAndProcess iterates over all ranges and enqueues any that @@ -70,7 +70,7 @@ func (s *Store) MustForceMergeScanAndProcess() { // ForceSplitScanAndProcess iterates over all ranges and enqueues any that // may need to be split. func (s *Store) ForceSplitScanAndProcess() error { - return forceScanAndProcess(s, s.splitQueue.baseQueue) + return forceScanAndProcess(context.TODO(), s, s.splitQueue.baseQueue) } // MustForceRaftLogScanAndProcess iterates over all ranges and enqueues any that @@ -83,20 +83,20 @@ func (s *Store) MustForceRaftLogScanAndProcess() { // any that need time series maintenance, then processes the time series // maintenance queue. func (s *Store) ForceTimeSeriesMaintenanceQueueProcess() error { - return forceScanAndProcess(s, s.tsMaintenanceQueue.baseQueue) + return forceScanAndProcess(context.TODO(), s, s.tsMaintenanceQueue.baseQueue) } // ForceRaftSnapshotQueueProcess iterates over all ranges, enqueuing // any that need raft snapshots, then processes the raft snapshot // queue. func (s *Store) ForceRaftSnapshotQueueProcess() error { - return forceScanAndProcess(s, s.raftSnapshotQueue.baseQueue) + return forceScanAndProcess(context.TODO(), s, s.raftSnapshotQueue.baseQueue) } // ForceConsistencyQueueProcess runs all the ranges through the consistency // queue. func (s *Store) ForceConsistencyQueueProcess() error { - return forceScanAndProcess(s, s.consistencyQueue.baseQueue) + return forceScanAndProcess(context.TODO(), s, s.consistencyQueue.baseQueue) } // The methods below can be used to control a store's queues. Stopping a queue diff --git a/pkg/kv/kvserver/queue_test.go b/pkg/kv/kvserver/queue_test.go index c053453f1c31..87580ac7fcab 100644 --- a/pkg/kv/kvserver/queue_test.go +++ b/pkg/kv/kvserver/queue_test.go @@ -565,7 +565,7 @@ func TestNeedsSystemConfig(t *testing.T) { tc.StartWithStoreConfig(ctx, t, stopper, cfg) { - confReader, err := tc.store.GetConfReader() + confReader, err := tc.store.GetConfReader(ctx) require.Nil(t, confReader) require.True(t, errors.Is(err, errSysCfgUnavailable)) } diff --git a/pkg/kv/kvserver/replica.go b/pkg/kv/kvserver/replica.go index 141a838aff6c..16da2b78753f 100644 --- a/pkg/kv/kvserver/replica.go +++ b/pkg/kv/kvserver/replica.go @@ -264,6 +264,16 @@ type Replica struct { // miss out on anything. raftCtx context.Context + // breaker is a per-Replica circuit breaker. Its purpose is to avoid incurring + // large (infinite) latencies on client requests when the Replica is unable to + // serve commands. This circuit breaker does *not* recruit the occasional + // request to determine whether it is safe to heal the breaker. Instead, it + // has its own probe that is executed asynchronously and determines when the + // Replica is healthy again. + // + // See replica_circuit_breaker.go for details. + breaker *replicaCircuitBreaker + // raftMu protects Raft processing the replica. // // Locking notes: Replica.raftMu < Replica.mu @@ -279,6 +289,47 @@ type Replica struct { stateMachine replicaStateMachine // decoder is used to decode committed raft entries. decoder replicaDecoder + + // The last seen replica descriptors from incoming Raft messages. These are + // stored so that the replica still knows the replica descriptors for itself + // and for its message recipients in the circumstances when its RangeDescriptor + // is out of date. + // + // Normally, a replica knows about the other replica descriptors for a + // range via the RangeDescriptor stored in Replica.mu.state.Desc. But that + // descriptor is only updated during a Split or ChangeReplicas operation. + // There are periods during a Replica's lifetime when that information is + // out of date: + // + // 1. When a replica is being newly created as the result of an incoming + // Raft message for it. This is the common case for ChangeReplicas and an + // uncommon case for Splits. The leader will be sending the replica + // messages and the replica needs to be able to respond before it can + // receive an updated range descriptor (via a snapshot, + // changeReplicasTrigger, or splitTrigger). + // + // 2. If the node containing a replica is partitioned or down while the + // replicas for the range are updated. When the node comes back up, other + // replicas may begin communicating with it and it needs to be able to + // respond. Unlike 1 where there is no range descriptor, in this situation + // the replica has a range descriptor but it is out of date. Note that a + // replica being removed from a node and then quickly re-added before the + // replica has been GC'd will also use the last seen descriptors. In + // effect, this is another path for which the replica's local range + // descriptor is out of date. + // + // The last seen replica descriptors are updated on receipt of every raft + // message via Replica.setLastReplicaDescriptors (see + // Store.HandleRaftRequest). These last seen descriptors are used when + // the replica's RangeDescriptor contains missing or out of date descriptors + // for a replica (see Replica.sendRaftMessageRaftMuLocked). + // + // Removing a replica from Store.mu.replicas is not a problem because + // when a replica is completely removed, it won't be recreated until + // there is another event that will repopulate the replicas map in the + // range descriptor. When it is temporarily dropped and recreated, the + // newly recreated replica will have a complete range descriptor. + lastToReplica, lastFromReplica roachpb.ReplicaDescriptor } // Contains the lease history when enabled. @@ -499,47 +550,6 @@ type Replica struct { // live node will not lose leaseholdership. lastUpdateTimes lastUpdateTimesMap - // The last seen replica descriptors from incoming Raft messages. These are - // stored so that the replica still knows the replica descriptors for itself - // and for its message recipients in the circumstances when its RangeDescriptor - // is out of date. - // - // Normally, a replica knows about the other replica descriptors for a - // range via the RangeDescriptor stored in Replica.mu.state.Desc. But that - // descriptor is only updated during a Split or ChangeReplicas operation. - // There are periods during a Replica's lifetime when that information is - // out of date: - // - // 1. When a replica is being newly created as the result of an incoming - // Raft message for it. This is the common case for ChangeReplicas and an - // uncommon case for Splits. The leader will be sending the replica - // messages and the replica needs to be able to respond before it can - // receive an updated range descriptor (via a snapshot, - // changeReplicasTrigger, or splitTrigger). - // - // 2. If the node containing a replica is partitioned or down while the - // replicas for the range are updated. When the node comes back up, other - // replicas may begin communicating with it and it needs to be able to - // respond. Unlike 1 where there is no range descriptor, in this situation - // the replica has a range descriptor but it is out of date. Note that a - // replica being removed from a node and then quickly re-added before the - // replica has been GC'd will also use the last seen descriptors. In - // effect, this is another path for which the replica's local range - // descriptor is out of date. - // - // The last seen replica descriptors are updated on receipt of every raft - // message via Replica.setLastReplicaDescriptors (see - // Store.HandleRaftRequest). These last seen descriptors are used when - // the replica's RangeDescriptor contains missing or out of date descriptors - // for a replica (see Replica.sendRaftMessage). - // - // Removing a replica from Store.mu.replicas is not a problem because - // when a replica is completely removed, it won't be recreated until - // there is another event that will repopulate the replicas map in the - // range descriptor. When it is temporarily dropped and recreated, the - // newly recreated replica will have a complete range descriptor. - lastToReplica, lastFromReplica roachpb.ReplicaDescriptor - // Computed checksum at a snapshot UUID. checksums map[uuid.UUID]ReplicaChecksum @@ -940,7 +950,7 @@ func (r *Replica) GetRangeInfo(ctx context.Context) roachpb.RangeInfo { // I wish this could be a Fatal, but unfortunately it's possible for the // lease to be incoherent with the descriptor after a leaseholder was // brutally removed through `cockroach debug unsafe-remove-dead-replicas`. - log.Errorf(ctx, "leaseholder replica not in descriptor; desc: %s, lease: %s", desc, &l) + log.Errorf(ctx, "leaseholder replica not in descriptor; desc: %s, lease: %s", desc, l) // Let's not return an incoherent lease; for example if we end up // returning it to a client through a br.RangeInfos, the client will freak // out. @@ -1063,13 +1073,11 @@ func (r *Replica) mergeInProgressRLocked() bool { } // setLastReplicaDescriptors sets the most recently seen replica -// descriptors to those contained in the *RaftMessageRequest, acquiring r.mu -// to do so. -func (r *Replica) setLastReplicaDescriptors(req *RaftMessageRequest) { - r.mu.Lock() - r.mu.lastFromReplica = req.FromReplica - r.mu.lastToReplica = req.ToReplica - r.mu.Unlock() +// descriptors to those contained in the *RaftMessageRequest. +func (r *Replica) setLastReplicaDescriptorsRaftMuLocked(req *RaftMessageRequest) { + r.raftMu.AssertHeld() + r.raftMu.lastFromReplica = req.FromReplica + r.raftMu.lastToReplica = req.ToReplica } // GetMVCCStats returns a copy of the MVCC stats object for this range. @@ -1229,6 +1237,9 @@ func (r *Replica) State(ctx context.Context) kvserverpb.RangeInfo { ctx, r.RangeID, r.mu.state.Lease.Replica.NodeID) ri.ClosedTimestampSideTransportInfo.CentralClosed = centralClosed ri.ClosedTimestampSideTransportInfo.CentralLAI = centralLAI + if err := r.breaker.Signal().Err(); err != nil { + ri.CircuitBreakerError = err.Error() + } return ri } diff --git a/pkg/kv/kvserver/replica_circuit_breaker.go b/pkg/kv/kvserver/replica_circuit_breaker.go new file mode 100644 index 000000000000..fa5918d63442 --- /dev/null +++ b/pkg/kv/kvserver/replica_circuit_breaker.go @@ -0,0 +1,229 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package kvserver + +import ( + "context" + "time" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/util/circuit" + "github.com/cockroachdb/cockroach/pkg/util/contextutil" + "github.com/cockroachdb/cockroach/pkg/util/envutil" + "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/stop" + "github.com/cockroachdb/errors" + "github.com/cockroachdb/redact" + "go.etcd.io/etcd/raft/v3" +) + +type replicaInCircuitBreaker interface { + Clock() *hlc.Clock + Desc() *roachpb.RangeDescriptor + Send(context.Context, roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) + slowReplicationThreshold(ba *roachpb.BatchRequest) (time.Duration, bool) + replicaUnavailableError() error +} + +var defaultReplicaCircuitBreakerSlowReplicationThreshold = envutil.EnvOrDefaultDuration( + "COCKROACH_REPLICA_CIRCUIT_BREAKER_SLOW_REPLICATION_THRESHOLD", 0, +) + +var replicaCircuitBreakerSlowReplicationThreshold = settings.RegisterPublicDurationSettingWithExplicitUnit( + settings.SystemOnly, + "kv.replica_circuit_breaker.slow_replication_threshold", + "duration after which slow proposals trip the per-Replica circuit breaker (zero duration disables breakers)", + defaultReplicaCircuitBreakerSlowReplicationThreshold, + settings.NonNegativeDuration, +) + +// replicaCircuitBreaker is a wrapper around *circuit.Breaker that makes it +// convenient for use as a per-Replica circuit breaker. +type replicaCircuitBreaker struct { + ambCtx log.AmbientContext + stopper *stop.Stopper + r replicaInCircuitBreaker + st *cluster.Settings + wrapped *circuit.Breaker +} + +func (br *replicaCircuitBreaker) enabled() bool { + return replicaCircuitBreakerSlowReplicationThreshold.Get(&br.st.SV) > 0 && + br.st.Version.IsActive(context.Background(), clusterversion.ProbeRequest) +} + +func (br *replicaCircuitBreaker) newError() error { + return br.r.replicaUnavailableError() +} + +func (br *replicaCircuitBreaker) TripAsync() { + if !br.enabled() { + return + } + + _ = br.stopper.RunAsyncTask( + br.ambCtx.AnnotateCtx(context.Background()), "trip-breaker", + func(ctx context.Context) { + br.wrapped.Report(br.newError()) + }, + ) +} + +type signaller interface { + Err() error + C() <-chan struct{} +} + +type neverTripSignaller struct{} + +func (s neverTripSignaller) Err() error { return nil } +func (s neverTripSignaller) C() <-chan struct{} { return nil } + +func (br *replicaCircuitBreaker) Signal() signaller { + if !br.enabled() { + return neverTripSignaller{} + } + return br.wrapped.Signal() +} + +func newReplicaCircuitBreaker( + cs *cluster.Settings, + stopper *stop.Stopper, + ambientCtx log.AmbientContext, + r replicaInCircuitBreaker, +) *replicaCircuitBreaker { + br := &replicaCircuitBreaker{ + stopper: stopper, + ambCtx: ambientCtx, + r: r, + st: cs, + } + + br.wrapped = circuit.NewBreaker(circuit.Options{ + Name: "breaker", // log bridge has ctx tags + AsyncProbe: br.asyncProbe, + EventHandler: &circuit.EventLogger{ + Log: func(buf redact.StringBuilder) { + log.Infof(ambientCtx.AnnotateCtx(context.Background()), "%s", buf) + }, + }, + }) + + return br +} + +type probeKey struct{} + +func isCircuitBreakerProbe(ctx context.Context) bool { + return ctx.Value(probeKey{}) != nil +} + +func withCircuitBreakerProbeMarker(ctx context.Context) context.Context { + return context.WithValue(ctx, probeKey{}, probeKey{}) +} + +func (br *replicaCircuitBreaker) asyncProbe(report func(error), done func()) { + bgCtx := br.ambCtx.AnnotateCtx(context.Background()) + if err := br.stopper.RunAsyncTask(bgCtx, "replica-probe", func(ctx context.Context) { + defer done() + + if !br.enabled() { + report(nil) + return + } + + err := sendProbe(ctx, br.r) + report(err) + }); err != nil { + done() + } +} + +func sendProbe(ctx context.Context, r replicaInCircuitBreaker) error { + ctx = withCircuitBreakerProbeMarker(ctx) + desc := r.Desc() + if !desc.IsInitialized() { + return nil + } + ba := roachpb.BatchRequest{} + ba.Timestamp = r.Clock().Now() + ba.RangeID = r.Desc().RangeID + probeReq := &roachpb.ProbeRequest{} + probeReq.Key = desc.StartKey.AsRawKey() + ba.Add(probeReq) + thresh, ok := r.slowReplicationThreshold(&ba) + if !ok { + // Breakers are disabled now. + return nil + } + if err := contextutil.RunWithTimeout(ctx, "probe", thresh, + func(ctx context.Context) error { + _, pErr := r.Send(ctx, ba) + return pErr.GoError() + }, + ); err != nil { + return errors.CombineErrors(r.replicaUnavailableError(), err) + } + return nil +} + +func replicaUnavailableError( + desc *roachpb.RangeDescriptor, + replDesc roachpb.ReplicaDescriptor, + lm liveness.IsLiveMap, + rs *raft.Status, +) error { + nonLiveRepls := roachpb.MakeReplicaSet(nil) + for _, rDesc := range desc.Replicas().Descriptors() { + if lm[rDesc.NodeID].IsLive { + continue + } + nonLiveRepls.AddReplica(rDesc) + } + + canMakeProgress := desc.Replicas().CanMakeProgress( + func(replDesc roachpb.ReplicaDescriptor) bool { + return lm[replDesc.NodeID].IsLive + }, + ) + + // Ensure good redaction. + var _ redact.SafeFormatter = nonLiveRepls + var _ redact.SafeFormatter = desc + var _ redact.SafeFormatter = replDesc + + err := errors.Errorf("replica %s of %s is unavailable", desc, replDesc) + err = errors.Wrapf( + err, + "raft status: %+v", redact.Safe(rs), // raft status contains no PII + ) + if len(nonLiveRepls.AsProto()) > 0 { + err = errors.Wrapf(err, "replicas on non-live nodes: %v (lost quorum: %t)", nonLiveRepls, !canMakeProgress) + } + + return err +} + +func (r *Replica) replicaUnavailableError() error { + desc := r.Desc() + replDesc, _ := desc.GetReplicaDescriptor(r.store.StoreID()) + + var isLiveMap liveness.IsLiveMap + if nl := r.store.cfg.NodeLiveness; nl != nil { // exclude unit test + isLiveMap = nl.GetIsLiveMap() + } + return replicaUnavailableError(desc, replDesc, isLiveMap, r.RaftStatus()) +} diff --git a/pkg/kv/kvserver/replica_circuit_breaker_test.go b/pkg/kv/kvserver/replica_circuit_breaker_test.go new file mode 100644 index 000000000000..79426fb0a1da --- /dev/null +++ b/pkg/kv/kvserver/replica_circuit_breaker_test.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package kvserver + +import ( + "path/filepath" + "testing" + + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/testutils/echotest" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/redact" + "go.etcd.io/etcd/raft/v3" +) + +func TestReplicaUnavailableError(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + var repls roachpb.ReplicaSet + repls.AddReplica(roachpb.ReplicaDescriptor{NodeID: 1, StoreID: 10, ReplicaID: 100}) + repls.AddReplica(roachpb.ReplicaDescriptor{NodeID: 2, StoreID: 20, ReplicaID: 200}) + desc := roachpb.NewRangeDescriptor(10, roachpb.RKey("a"), roachpb.RKey("z"), repls) + var ba roachpb.BatchRequest + ba.Add(&roachpb.RequestLeaseRequest{}) + lm := liveness.IsLiveMap{ + 1: liveness.IsLiveMapEntry{IsLive: true}, + } + rs := raft.Status{} + err := replicaUnavailableError(desc, desc.Replicas().AsProto()[0], lm, &rs) + echotest.Require(t, string(redact.Sprint(err)), filepath.Join("testdata", "replica_unavailable_error.txt")) +} diff --git a/pkg/kv/kvserver/replica_closedts_test.go b/pkg/kv/kvserver/replica_closedts_test.go index 42eedf404e65..67261059102e 100644 --- a/pkg/kv/kvserver/replica_closedts_test.go +++ b/pkg/kv/kvserver/replica_closedts_test.go @@ -628,7 +628,7 @@ func TestRejectedLeaseDoesntDictateClosedTimestamp(t *testing.T) { } lease = li.Current() if !lease.OwnedBy(n2.GetFirstStoreID()) { - return errors.Errorf("n2 still unaware of its lease: %s", &lease) + return errors.Errorf("n2 still unaware of its lease: %s", li.Current()) } return nil }) diff --git a/pkg/kv/kvserver/replica_command.go b/pkg/kv/kvserver/replica_command.go index 648b81cc6a5b..3d9834956bb6 100644 --- a/pkg/kv/kvserver/replica_command.go +++ b/pkg/kv/kvserver/replica_command.go @@ -2841,7 +2841,7 @@ func (s *Store) relocateOne( `range %s was either in a joint configuration or had learner replicas: %v`, desc, desc.Replicas()) } - confReader, err := s.GetConfReader() + confReader, err := s.GetConfReader(ctx) if err != nil { return nil, nil, errors.Wrap(err, "can't relocate range") } diff --git a/pkg/kv/kvserver/replica_init.go b/pkg/kv/kvserver/replica_init.go index fbb1b1dc0c69..5771e6f3e651 100644 --- a/pkg/kv/kvserver/replica_init.go +++ b/pkg/kv/kvserver/replica_init.go @@ -127,6 +127,8 @@ func newUnloadedReplica( r.splitQueueThrottle = util.Every(splitQueueThrottleDuration) r.mergeQueueThrottle = util.Every(mergeQueueThrottleDuration) + + r.breaker = newReplicaCircuitBreaker(store.cfg.Settings, store.stopper, r.AmbientContext, r) return r } diff --git a/pkg/kv/kvserver/replica_proposal.go b/pkg/kv/kvserver/replica_proposal.go index bfafefb9ebdf..8bdd0bce36f4 100644 --- a/pkg/kv/kvserver/replica_proposal.go +++ b/pkg/kv/kvserver/replica_proposal.go @@ -68,6 +68,10 @@ type ProposalData struct { // last (re-)proposed. proposedAtTicks int + // createdAtTicks is the (logical) time at which this command was + // *first* proposed. + createdAtTicks int + // command is serialized and proposed to raft. In the event of // reproposals its MaxLeaseIndex field is mutated. command *kvserverpb.RaftCommand diff --git a/pkg/kv/kvserver/replica_proposal_buf.go b/pkg/kv/kvserver/replica_proposal_buf.go index e4435633ee43..28209fb97332 100644 --- a/pkg/kv/kvserver/replica_proposal_buf.go +++ b/pkg/kv/kvserver/replica_proposal_buf.go @@ -1021,6 +1021,9 @@ func (rp *replicaProposer) registerProposalLocked(p *ProposalData) { // Record when the proposal was submitted to Raft so that we can later // decide if/when to re-propose it. p.proposedAtTicks = rp.mu.ticks + if p.createdAtTicks == 0 { + p.createdAtTicks = rp.mu.ticks + } rp.mu.proposals[p.idKey] = p } diff --git a/pkg/kv/kvserver/replica_raft.go b/pkg/kv/kvserver/replica_raft.go index 7f6166730630..a125963b71c4 100644 --- a/pkg/kv/kvserver/replica_raft.go +++ b/pkg/kv/kvserver/replica_raft.go @@ -193,7 +193,7 @@ func (r *Replica) evalAndPropose( } else if !st.Lease.OwnedBy(r.store.StoreID()) { // Perform a sanity check that the lease is owned by this replica. This must // have been ascertained by the callers in checkExecutionCanProceed. - log.Fatalf(ctx, "cannot propose %s on follower with remotely owned lease %s", ba, &st.Lease) + log.Fatalf(ctx, "cannot propose %s on follower with remotely owned lease %s", ba, st.Lease) } else { proposal.command.ProposerLeaseSequence = st.Lease.Sequence } @@ -752,7 +752,7 @@ func (r *Replica) handleRaftReadyRaftMuLocked( msgApps, otherMsgs := splitMsgApps(rd.Messages) r.traceMessageSends(msgApps, "sending msgApp") - r.sendRaftMessages(ctx, msgApps) + r.sendRaftMessagesRaftMuLocked(ctx, msgApps) // Use a more efficient write-only batch because we don't need to do any // reads from the batch. Any reads are performed on the underlying DB. @@ -862,7 +862,7 @@ func (r *Replica) handleRaftReadyRaftMuLocked( // Update raft log entry cache. We clear any older, uncommitted log entries // and cache the latest ones. r.store.raftEntryCache.Add(r.RangeID, rd.Entries, true /* truncate */) - r.sendRaftMessages(ctx, otherMsgs) + r.sendRaftMessagesRaftMuLocked(ctx, otherMsgs) r.traceEntries(rd.CommittedEntries, "committed, before applying any entries") applicationStart := timeutil.Now() @@ -1010,7 +1010,7 @@ func (r *Replica) tick(ctx context.Context, livenessMap liveness.IsLiveMap) (boo } now := r.store.Clock().NowAsClockTimestamp() - if r.maybeQuiesceLocked(ctx, now, livenessMap) { + if r.maybeQuiesceRaftMuLockedReplicaMuLocked(ctx, now, livenessMap) { return false, nil } @@ -1057,6 +1057,21 @@ func (r *Replica) hasRaftReadyRLocked() bool { return r.mu.internalRaftGroup.HasReady() } +// slowReplicationThreshold returns the threshold after which in-flight +// replicated commands should be considered "stuck" and should trip the +// per-Replica circuit breaker. The boolean indicates whether this +// mechanism is enabled; if it isn't no action should be taken. +func (r *Replica) slowReplicationThreshold(ba *roachpb.BatchRequest) (time.Duration, bool) { + if knobs := r.store.TestingKnobs(); knobs != nil && knobs.SlowReplicationThresholdOverride != nil { + if dur := knobs.SlowReplicationThresholdOverride(ba); dur > 0 { + return dur, true + } + // Fall through. + } + dur := replicaCircuitBreakerSlowReplicationThreshold.Get(&r.store.cfg.Settings.SV) + return dur, dur > 0 +} + //go:generate stringer -type refreshRaftReason type refreshRaftReason int @@ -1092,9 +1107,29 @@ func (r *Replica) refreshProposalsLocked( log.Fatalf(ctx, "refreshAtDelta specified for reason %s != reasonTicks", reason) } + var maxSlowProposalDurationRequest *roachpb.BatchRequest + var maxSlowProposalDuration time.Duration + var slowProposalCount int64 var reproposals pendingCmdSlice for _, p := range r.mu.proposals { - if p.command.MaxLeaseIndex == 0 { + slowReplicationThreshold, ok := r.slowReplicationThreshold(p.Request) + // NB: ticks can be delayed, in which this detection would kick in too late + // as well. This is unlikely to become a concern since the configured + // durations here should be very large compared to the refresh interval, and + // so delays shouldn't dramatically change the detection latency. + inflightDuration := r.store.cfg.RaftTickInterval * time.Duration(r.mu.ticks-p.createdAtTicks) + if ok && inflightDuration > slowReplicationThreshold { + if maxSlowProposalDuration < inflightDuration { + maxSlowProposalDuration = inflightDuration + maxSlowProposalDurationRequest = p.Request + slowProposalCount++ + } + } + // TODO(tbg): the enabled() call is a hack until we've figured out what to + // do about #74711. If leases are finished instead of reproposed, they can't + // ever trigger the breaker, which is bad as there usually isn't anything + // else around that will. + if p.command.MaxLeaseIndex == 0 && !r.breaker.enabled() { // Commands without a MaxLeaseIndex cannot be reproposed, as they might // apply twice. We also don't want to ask the proposer to retry these // special commands. @@ -1141,6 +1176,30 @@ func (r *Replica) refreshProposalsLocked( } } + r.store.metrics.SlowRaftRequests.Update(slowProposalCount) + + // If the breaker isn't tripped yet but we've detected commands that have + // taken too long to replicate, trip the breaker now. + // + // NB: we still keep reproposing commands on this and subsequent ticks + // even though this seems strictly counter-productive, except perhaps + // for the probe's proposals. We could consider being more strict here + // which could avoid build-up of raft log entries during outages, see + // for example: + // https://github.com/cockroachdb/cockroach/issues/60612 + if r.breaker.Signal().Err() == nil && maxSlowProposalDuration > 0 { + log.Warningf(ctx, + "have been waiting %.2fs for slow proposal %s", + maxSlowProposalDuration.Seconds(), maxSlowProposalDurationRequest, + ) + // NB: this is async because we're holding lots of locks here, and we want + // to avoid having to pass all the information about the replica into the + // breaker (since the breaker needs access to this information at will to + // power the probe anyway). Over time, we anticipate there being multiple + // mechanisms which trip the breaker. + r.breaker.TripAsync() + } + if log.V(1) && len(reproposals) > 0 { log.Infof(ctx, "pending commands: reproposing %d (at %d.%d) %s", @@ -1207,7 +1266,7 @@ func (r *Replica) maybeCoalesceHeartbeat( return true } -func (r *Replica) sendRaftMessages(ctx context.Context, messages []raftpb.Message) { +func (r *Replica) sendRaftMessagesRaftMuLocked(ctx context.Context, messages []raftpb.Message) { var lastAppResp raftpb.Message for _, message := range messages { drop := false @@ -1275,19 +1334,19 @@ func (r *Replica) sendRaftMessages(ctx context.Context, messages []raftpb.Messag } if !drop { - r.sendRaftMessage(ctx, message) + r.sendRaftMessageRaftMuLocked(ctx, message) } } if lastAppResp.Index > 0 { - r.sendRaftMessage(ctx, lastAppResp) + r.sendRaftMessageRaftMuLocked(ctx, lastAppResp) } } -// sendRaftMessage sends a Raft message. -func (r *Replica) sendRaftMessage(ctx context.Context, msg raftpb.Message) { +// sendRaftMessageRaftMuLocked sends a Raft message. +func (r *Replica) sendRaftMessageRaftMuLocked(ctx context.Context, msg raftpb.Message) { r.mu.RLock() - fromReplica, fromErr := r.getReplicaDescriptorByIDRLocked(roachpb.ReplicaID(msg.From), r.mu.lastToReplica) - toReplica, toErr := r.getReplicaDescriptorByIDRLocked(roachpb.ReplicaID(msg.To), r.mu.lastFromReplica) + fromReplica, fromErr := r.getReplicaDescriptorByIDRLocked(roachpb.ReplicaID(msg.From), r.raftMu.lastToReplica) + toReplica, toErr := r.getReplicaDescriptorByIDRLocked(roachpb.ReplicaID(msg.To), r.raftMu.lastFromReplica) var startKey roachpb.RKey if msg.Type == raftpb.MsgApp && r.mu.internalRaftGroup != nil { // When the follower is potentially an uninitialized replica waiting for diff --git a/pkg/kv/kvserver/replica_raft_quiesce.go b/pkg/kv/kvserver/replica_raft_quiesce.go index 63ae153c2a2a..258665ef14df 100644 --- a/pkg/kv/kvserver/replica_raft_quiesce.go +++ b/pkg/kv/kvserver/replica_raft_quiesce.go @@ -121,9 +121,9 @@ func (r *Replica) canUnquiesceRLocked() bool { r.mu.internalRaftGroup != nil } -// maybeQuiesceLocked checks to see if the replica is quiescable and initiates -// quiescence if it is. Returns true if the replica has been quiesced and false -// otherwise. +// maybeQuiesceRaftMuLockedReplicaMuLocked checks to see if the replica is +// quiescable and initiates quiescence if it is. Returns true if the replica has +// been quiesced and false otherwise. // // A quiesced range is not ticked and thus doesn't create MsgHeartbeat requests // or cause elections. The Raft leader for a range checks various @@ -178,14 +178,14 @@ func (r *Replica) canUnquiesceRLocked() bool { // would quiesce. The fallout from this situation are undesirable raft // elections which will cause throughput hiccups to the range, but not // correctness issues. -func (r *Replica) maybeQuiesceLocked( +func (r *Replica) maybeQuiesceRaftMuLockedReplicaMuLocked( ctx context.Context, now hlc.ClockTimestamp, livenessMap liveness.IsLiveMap, ) bool { status, lagging, ok := shouldReplicaQuiesce(ctx, r, now, livenessMap) if !ok { return false } - return r.quiesceAndNotifyLocked(ctx, status, lagging) + return r.quiesceAndNotifyRaftMuLockedReplicaMuLocked(ctx, status, lagging) } type quiescer interface { @@ -398,10 +398,10 @@ func shouldReplicaQuiesce( return status, lagging, true } -func (r *Replica) quiesceAndNotifyLocked( +func (r *Replica) quiesceAndNotifyRaftMuLockedReplicaMuLocked( ctx context.Context, status *raft.Status, lagging laggingReplicaSet, ) bool { - fromReplica, fromErr := r.getReplicaDescriptorByIDRLocked(r.mu.replicaID, r.mu.lastToReplica) + fromReplica, fromErr := r.getReplicaDescriptorByIDRLocked(r.mu.replicaID, r.raftMu.lastToReplica) if fromErr != nil { if log.V(4) { log.Infof(ctx, "not quiescing: cannot find from replica (%d)", r.mu.replicaID) @@ -416,7 +416,7 @@ func (r *Replica) quiesceAndNotifyLocked( continue } toReplica, toErr := r.getReplicaDescriptorByIDRLocked( - roachpb.ReplicaID(id), r.mu.lastFromReplica) + roachpb.ReplicaID(id), r.raftMu.lastFromReplica) if toErr != nil { if log.V(4) { log.Infof(ctx, "failed to quiesce: cannot find to replica (%d)", id) diff --git a/pkg/kv/kvserver/replica_raftstorage.go b/pkg/kv/kvserver/replica_raftstorage.go index 86c5872ae148..ea5711c7bd14 100644 --- a/pkg/kv/kvserver/replica_raftstorage.go +++ b/pkg/kv/kvserver/replica_raftstorage.go @@ -684,7 +684,7 @@ func (r *Replica) updateRangeInfo(ctx context.Context, desc *roachpb.RangeDescri // the original range wont work as the original and new ranges might belong // to different zones. // Load the system config. - confReader, err := r.store.GetConfReader() + confReader, err := r.store.GetConfReader(ctx) if errors.Is(err, errSysCfgUnavailable) { // This could be before the system config was ever gossiped, or it // expired. Let the gossip callback set the info. diff --git a/pkg/kv/kvserver/replica_range_lease.go b/pkg/kv/kvserver/replica_range_lease.go index 75a69685135e..7eadd4a8084c 100644 --- a/pkg/kv/kvserver/replica_range_lease.go +++ b/pkg/kv/kvserver/replica_range_lease.go @@ -436,6 +436,11 @@ func (p *pendingLeaseRequest) requestLeaseAsync( // Send the RequestLeaseRequest or TransferLeaseRequest and wait for the new // lease to be applied. if pErr == nil { + // The Replica circuit breakers together with round-tripping a ProbeRequest + // here before asking for the lease could provide an alternative, simpler + // solution to the the below issue: + // + // https://github.com/cockroachdb/cockroach/issues/37906 ba := roachpb.BatchRequest{} ba.Timestamp = p.repl.store.Clock().Now() ba.RangeID = p.repl.RangeID @@ -1049,7 +1054,7 @@ func (r *Replica) leaseGoodToGoForStatusRLocked( // this is just a logged error instead of a fatal // assertion. log.Errorf(ctx, "lease %s owned by replica %+v that no longer exists", - &st.Lease, st.Lease.Replica) + st.Lease, st.Lease.Replica) } // Otherwise, if the lease is currently held by another replica, redirect // to the holder. @@ -1159,7 +1164,7 @@ func (r *Replica) redirectOnOrAcquireLeaseForRequest( if !stillMember { // See corresponding comment in leaseGoodToGoRLocked. log.Errorf(ctx, "lease %s owned by replica %+v that no longer exists", - &status.Lease, status.Lease.Replica) + status.Lease, status.Lease.Replica) } // Otherwise, if the lease is currently held by another replica, redirect // to the holder. @@ -1323,7 +1328,7 @@ func (r *Replica) maybeExtendLeaseAsync(ctx context.Context, st kvserverpb.Lease return } if log.ExpensiveLogEnabled(ctx, 2) { - log.Infof(ctx, "extending lease %s at %s", &st.Lease, st.Now) + log.Infof(ctx, "extending lease %s at %s", st.Lease, st.Now) } // We explicitly ignore the returned handle as we won't block on it. _ = r.requestLeaseLocked(ctx, st) diff --git a/pkg/kv/kvserver/replica_rangefeed.go b/pkg/kv/kvserver/replica_rangefeed.go index 61f3b1cb5310..2205473e6b10 100644 --- a/pkg/kv/kvserver/replica_rangefeed.go +++ b/pkg/kv/kvserver/replica_rangefeed.go @@ -716,7 +716,7 @@ func (r *Replica) ensureClosedTimestampStarted(ctx context.Context) *roachpb.Err // In particular, r.redirectOnOrAcquireLease() doesn't work because, if the // current lease is invalid and the current replica is not a leader, the // current replica will not take a lease. - log.VEventf(ctx, 2, "ensuring lease for rangefeed range. current lease invalid: %s", &lease.Lease) + log.VEventf(ctx, 2, "ensuring lease for rangefeed range. current lease invalid: %s", lease.Lease) err := contextutil.RunWithTimeout(ctx, "read forcing lease acquisition", 5*time.Second, func(ctx context.Context) error { var b kv.Batch diff --git a/pkg/kv/kvserver/replica_send.go b/pkg/kv/kvserver/replica_send.go index 86956fb884ea..ad67decb0862 100644 --- a/pkg/kv/kvserver/replica_send.go +++ b/pkg/kv/kvserver/replica_send.go @@ -96,6 +96,73 @@ func (r *Replica) Send( return r.sendWithRangeID(ctx, r.RangeID, &ba) } +// checkCircuitBreaker takes a cancelable context and its cancel function. The +// context is cancelled when the circuit breaker trips. If the breaker is +// already tripped , its error is returned immediately and the caller should not +// continue processing the request. Otherwise, the caller is provided with a +// signaller for use in a deferred call to maybeAdjustWithBreakerError, which +// will annotate the outgoing error in the event of the breaker tripping while +// the request is processing. +func (r *Replica) checkCircuitBreaker(ctx context.Context, cancel func()) (signaller, error) { + // NB: brSig will never trip if circuit breakers are not enabled. + brSig := r.breaker.Signal() + if isCircuitBreakerProbe(ctx) { + brSig = neverTripSignaller{} + } + + if err := brSig.Err(); err != nil { + // TODO(tbg): we may want to exclude some requests from this check, or allow + // requests to exclude themselves from the check (via their header). + cancel() + return nil, err + } + + // NB: this is a total crutch, see: + // https://github.com/cockroachdb/cockroach/issues/74707 + // It will do until breakers default to on: + // https://github.com/cockroachdb/cockroach/issues/74705 + if ch := brSig.C(); ch != nil { + _ = r.store.Stopper().RunAsyncTask(ctx, "watch", func(ctx context.Context) { + select { + case <-ctx.Done(): + return + case <-ch: + cancel() + } + }) + } + + return brSig, nil +} + +func maybeAdjustWithBreakerError(pErr *roachpb.Error, brErr error) *roachpb.Error { + if pErr == nil || brErr == nil { + return pErr + } + err := pErr.GoError() + if ae := (&roachpb.AmbiguousResultError{}); errors.As(err, &ae) { + // The breaker tripped while a command was inflight, so we have to + // propagate an ambiguous result. We don't want to replace it, but there + // is a way to stash an Error in it so we use that. + // + // TODO(tbg): could also wrap it; there is no other write to WrappedErr + // in the codebase and it might be better to remove it. Nested *Errors + // are not a good idea. + wrappedErr := brErr + if ae.WrappedErr != nil { + wrappedErr = errors.Wrapf(brErr, "%v", ae.WrappedErr) + } + ae.WrappedErr = roachpb.NewError(wrappedErr) + return roachpb.NewError(ae) + } else if le := (&roachpb.NotLeaseHolderError{}); errors.As(err, &le) { + // When a lease acquisition triggered by this request is short-circuited + // by the breaker, it will return an opaque NotLeaseholderError, which we + // replace with the breaker's error. + return roachpb.NewError(errors.CombineErrors(brErr, le)) + } + return pErr +} + // sendWithRangeID takes an unused rangeID argument so that the range // ID will be accessible in stack traces (both in panics and when // sampling goroutines from a live server). This line is subject to @@ -109,7 +176,7 @@ func (r *Replica) Send( // github.com/cockroachdb/cockroach/pkg/storage.(*Replica).sendWithRangeID(0xc420d1a000, 0x64bfb80, 0xc421564b10, 0x15, 0x153fd4634aeb0193, 0x0, 0x100000001, 0x1, 0x15, 0x0, ...) func (r *Replica) sendWithRangeID( ctx context.Context, _forStacks roachpb.RangeID, ba *roachpb.BatchRequest, -) (*roachpb.BatchResponse, *roachpb.Error) { +) (_ *roachpb.BatchResponse, rErr *roachpb.Error) { var br *roachpb.BatchResponse if r.leaseholderStats != nil && ba.Header.GatewayNodeID != 0 { r.leaseholderStats.record(ba.Header.GatewayNodeID) @@ -126,6 +193,17 @@ func (r *Replica) sendWithRangeID( return nil, roachpb.NewError(err) } + // Circuit breaker handling. + ctx, cancel := context.WithCancel(ctx) + brSig, err := r.checkCircuitBreaker(ctx, cancel) + if err != nil { + return nil, roachpb.NewError(err) + } + defer func() { + rErr = maybeAdjustWithBreakerError(rErr, brSig.Err()) + cancel() + }() + if err := r.maybeBackpressureBatch(ctx, ba); err != nil { return nil, roachpb.NewError(err) } @@ -137,7 +215,7 @@ func (r *Replica) sendWithRangeID( } // NB: must be performed before collecting request spans. - ba, err := maybeStripInFlightWrites(ba) + ba, err = maybeStripInFlightWrites(ba) if err != nil { return nil, roachpb.NewError(err) } diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index be811a262617..2b6a7d52143e 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -66,7 +66,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/cockroachdb/logtags" - "github.com/cockroachdb/redact" "github.com/kr/pretty" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -10213,7 +10212,7 @@ func TestReplicaServersideRefreshes(t *testing.T) { ba.Add(&get, &put) return }, - expErr: "write at timestamp .* too old", + expErr: "write for key .* at timestamp .* too old", }, { name: "serializable push without retry", @@ -10242,7 +10241,7 @@ func TestReplicaServersideRefreshes(t *testing.T) { assignSeqNumsForReqs(ba.Txn, &cput) return }, - expErr: "write at timestamp .* too old", + expErr: "write for key .* at timestamp .* too old", }, // Non-1PC serializable txn initput will fail with write too old error. { @@ -10257,7 +10256,7 @@ func TestReplicaServersideRefreshes(t *testing.T) { assignSeqNumsForReqs(ba.Txn, &iput) return }, - expErr: "write at timestamp .* too old", + expErr: "write for key .* at timestamp .* too old", }, // Non-1PC serializable txn locking scan will fail with write too old error. { @@ -10272,7 +10271,7 @@ func TestReplicaServersideRefreshes(t *testing.T) { ba.Add(scan) return }, - expErr: "write at timestamp .* too old", + expErr: "write for key .* at timestamp .* too old", }, // Non-1PC serializable txn cput with CanForwardReadTimestamp set to // true will succeed with write too old error. @@ -12990,44 +12989,6 @@ func enableTraceDebugUseAfterFree() (restore func()) { return func() { trace.DebugUseAfterFinish = prev } } -func TestRangeUnavailableMessage(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - var repls roachpb.ReplicaSet - repls.AddReplica(roachpb.ReplicaDescriptor{NodeID: 1, StoreID: 10, ReplicaID: 100}) - repls.AddReplica(roachpb.ReplicaDescriptor{NodeID: 2, StoreID: 20, ReplicaID: 200}) - desc := roachpb.NewRangeDescriptor(10, roachpb.RKey("a"), roachpb.RKey("z"), repls) - dur := time.Minute - var ba roachpb.BatchRequest - ba.Add(&roachpb.RequestLeaseRequest{}) - lm := liveness.IsLiveMap{ - 1: liveness.IsLiveMapEntry{IsLive: true}, - } - rs := raft.Status{} - var s redact.StringBuilder - rangeUnavailableMessage(&s, desc, lm, &rs, &ba, dur) - const exp = `have been waiting 60.00s for proposing command RequestLease [‹/Min›,‹/Min›). -This range is likely unavailable. -Please submit this message to Cockroach Labs support along with the following information: - -Descriptor: r10:‹{a-z}› [(n1,s10):1, (n2,s20):2, next=3, gen=0] -Live: (n1,s10):1 -Non-live: (n2,s20):2 -Raft Status: {"id":"0","term":0,"vote":"0","commit":0,"lead":"0","raftState":"StateFollower","applied":0,"progress":{},"leadtransferee":"0"} - -and a copy of https://yourhost:8080/#/reports/range/10 - -If you are using CockroachDB Enterprise, reach out through your -support contract. Otherwise, please open an issue at: - - https://github.com/cockroachdb/cockroach/issues/new/choose -` - act := s.RedactableString() - t.Log(act) - require.EqualValues(t, exp, act) -} - // Test that, depending on the request's ClientRangeInfo, descriptor and lease // updates are returned. func TestRangeInfoReturned(t *testing.T) { diff --git a/pkg/kv/kvserver/replica_write.go b/pkg/kv/kvserver/replica_write.go index 723a3c7eca68..c2965c8cce5b 100644 --- a/pkg/kv/kvserver/replica_write.go +++ b/pkg/kv/kvserver/replica_write.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/spanset" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/uncertainty" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -33,8 +32,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" - "github.com/cockroachdb/redact" - "go.etcd.io/etcd/raft/v3" ) // migrateApplicationTimeout is the duration to wait for a Migrate command @@ -173,26 +170,6 @@ func (r *Replica) executeWriteBatch( // If the command was accepted by raft, wait for the range to apply it. ctxDone := ctx.Done() shouldQuiesce := r.store.stopper.ShouldQuiesce() - startPropTime := timeutil.Now() - slowTimer := timeutil.NewTimer() - defer slowTimer.Stop() - slowTimer.Reset(r.store.cfg.SlowReplicationThreshold) - // NOTE: this defer was moved from a case in the select statement to here - // because escape analysis does a better job avoiding allocations to the - // heap when defers are unconditional. When this was in the slowTimer select - // case, it was causing pErr to escape. - defer func() { - if slowTimer.Read { - r.store.metrics.SlowRaftRequests.Dec(1) - log.Infof( - ctx, - "slow command %s finished after %.2fs with error %v", - ba, - timeutil.Since(startPropTime).Seconds(), - pErr, - ) - } - }() for { select { @@ -286,15 +263,6 @@ func (r *Replica) executeWriteBatch( return propResult.Reply, nil, propResult.Err - case <-slowTimer.C: - slowTimer.Read = true - r.store.metrics.SlowRaftRequests.Inc(1) - - var s redact.StringBuilder - rangeUnavailableMessage(&s, r.Desc(), r.store.cfg.NodeLiveness.GetIsLiveMap(), - r.RaftStatus(), ba, timeutil.Since(startPropTime)) - log.Errorf(ctx, "range unavailable: %v", s) - case <-ctxDone: // If our context was canceled, return an AmbiguousResultError, // which indicates to the caller that the command may have executed. @@ -345,54 +313,6 @@ func (r *Replica) executeWriteBatch( } } -func rangeUnavailableMessage( - s *redact.StringBuilder, - desc *roachpb.RangeDescriptor, - lm liveness.IsLiveMap, - rs *raft.Status, - ba *roachpb.BatchRequest, - dur time.Duration, -) { - var liveReplicas, otherReplicas []roachpb.ReplicaDescriptor - for _, rDesc := range desc.Replicas().Descriptors() { - if lm[rDesc.NodeID].IsLive { - liveReplicas = append(liveReplicas, rDesc) - } else { - otherReplicas = append(otherReplicas, rDesc) - } - } - - // Ensure that these are going to redact nicely. - var _ redact.SafeFormatter = ba - var _ redact.SafeFormatter = desc - var _ redact.SafeFormatter = roachpb.ReplicaSet{} - - s.Printf(`have been waiting %.2fs for proposing command %s. -This range is likely unavailable. -Please submit this message to Cockroach Labs support along with the following information: - -Descriptor: %s -Live: %s -Non-live: %s -Raft Status: %+v - -and a copy of https://yourhost:8080/#/reports/range/%d - -If you are using CockroachDB Enterprise, reach out through your -support contract. Otherwise, please open an issue at: - - https://github.com/cockroachdb/cockroach/issues/new/choose -`, - dur.Seconds(), - ba, - desc, - roachpb.MakeReplicaSet(liveReplicas), - roachpb.MakeReplicaSet(otherReplicas), - redact.Safe(rs), // raft status contains no PII - desc.RangeID, - ) -} - // canAttempt1PCEvaluation looks at the batch and decides whether it can be // executed as 1PC. func (r *Replica) canAttempt1PCEvaluation( diff --git a/pkg/kv/kvserver/store.go b/pkg/kv/kvserver/store.go index b80a122d611b..f2e91f9689fd 100644 --- a/pkg/kv/kvserver/store.go +++ b/pkg/kv/kvserver/store.go @@ -516,7 +516,7 @@ Store.HandleRaftRequest (which is part of the RaftMessageHandler interface), ultimately resulting in a call to Replica.handleRaftReadyRaftMuLocked, which houses the integration with the etcd/raft library (raft.RawNode). This may generate Raft messages to be sent to other Stores; these are handed to -Replica.sendRaftMessages which ultimately hands them to the Store's +Replica.sendRaftMessagesRaftMuLocked which ultimately hands them to the Store's RaftTransport.SendAsync method. Raft uses message passing (not request-response), and outgoing messages will use a gRPC stream that differs from that used for incoming messages (which makes asymmetric partitions more @@ -1059,10 +1059,6 @@ type StoreConfig struct { // KVAdmissionController is an optional field used for admission control. KVAdmissionController KVAdmissionController - - // SlowReplicationThreshold is the duration after which an in-flight proposal - // is tracked in the requests.slow.raft metric. - SlowReplicationThreshold time.Duration } // ConsistencyTestingKnobs is a BatchEvalTestingKnobs struct used to control the @@ -1109,9 +1105,6 @@ func (sc *StoreConfig) SetDefaults() { if sc.TestingKnobs.GossipWhenCapacityDeltaExceedsFraction == 0 { sc.TestingKnobs.GossipWhenCapacityDeltaExceedsFraction = defaultGossipWhenCapacityDeltaExceedsFraction } - if sc.SlowReplicationThreshold == 0 { - sc.SlowReplicationThreshold = base.SlowRequestThreshold - } } // GetStoreConfig exposes the config used for this store. @@ -1508,10 +1501,10 @@ func (s *Store) SetDraining(drain bool, reporter func(int, redact.SafeString)) { if transferStatus != transferOK { if err != nil { log.VErrEventf(ctx, 1, "failed to transfer lease %s for range %s when draining: %s", - &drainingLeaseStatus.Lease, desc, err) + drainingLeaseStatus.Lease, desc, err) } else { log.VErrEventf(ctx, 1, "failed to transfer lease %s for range %s when draining: %s", - &drainingLeaseStatus.Lease, desc, transferStatus) + drainingLeaseStatus.Lease, desc, transferStatus) } } } @@ -2104,7 +2097,7 @@ func (s *Store) startGossip() { var errSysCfgUnavailable = errors.New("system config not available in gossip") // GetConfReader exposes access to a configuration reader. -func (s *Store) GetConfReader() (spanconfig.StoreReader, error) { +func (s *Store) GetConfReader(ctx context.Context) (spanconfig.StoreReader, error) { if s.cfg.TestingKnobs.MakeSystemConfigSpanUnavailableToQueues { return nil, errSysCfgUnavailable } @@ -3260,7 +3253,7 @@ func (s *Store) ManuallyEnqueue( return nil, nil, errors.Errorf("unknown queue type %q", queueName) } - confReader, err := s.GetConfReader() + confReader, err := s.GetConfReader(ctx) if err != nil { return nil, nil, errors.Wrap(err, "unable to retrieve conf reader, cannot run queue; make sure "+ diff --git a/pkg/kv/kvserver/store_raft.go b/pkg/kv/kvserver/store_raft.go index 89a70537990a..e7cc24dba5f1 100644 --- a/pkg/kv/kvserver/store_raft.go +++ b/pkg/kv/kvserver/store_raft.go @@ -213,7 +213,7 @@ func (s *Store) withReplicaForRequest( return roachpb.NewError(err) } defer r.raftMu.Unlock() - r.setLastReplicaDescriptors(req) + r.setLastReplicaDescriptorsRaftMuLocked(req) return f(ctx, r) } diff --git a/pkg/kv/kvserver/store_rebalancer.go b/pkg/kv/kvserver/store_rebalancer.go index c7367f088dcc..d5c4ec9e97f0 100644 --- a/pkg/kv/kvserver/store_rebalancer.go +++ b/pkg/kv/kvserver/store_rebalancer.go @@ -582,15 +582,27 @@ func (sr *StoreRebalancer) chooseRangeToRebalance( log.VEventf(ctx, 3, "considering replica rebalance for r%d with %.2f qps", replWithStats.repl.GetRangeID(), replWithStats.qps) - targetVoterRepls, targetNonVoterRepls := sr.getRebalanceTargetsBasedOnQPS( + targetVoterRepls, targetNonVoterRepls, foundRebalance := sr.getRebalanceTargetsBasedOnQPS( ctx, rebalanceCtx, options, ) + + if !foundRebalance { + // Bail if there are no stores that are better for the existing replicas. + // If the range needs a lease transfer to enable better load distribution, + // it will be handled by the logic in `chooseLeaseToTransfer()`. + log.VEventf(ctx, 3, "could not find rebalance opportunities for r%d", replWithStats.repl.RangeID) + continue + } + storeDescMap := storeListToMap(allStoresList) // Pick the voter with the least QPS to be leaseholder; // RelocateRange transfers the lease to the first provided target. + // + // TODO(aayush): Does this logic need to exist? This logic does not take + // lease preferences into account. So it is already broken in a way. newLeaseIdx := 0 newLeaseQPS := math.MaxFloat64 var raftStatus *raft.Status @@ -625,7 +637,7 @@ func (sr *StoreRebalancer) chooseRangeToRebalance( // the stores in this cluster. func (sr *StoreRebalancer) getRebalanceTargetsBasedOnQPS( ctx context.Context, rbCtx rangeRebalanceContext, options scorerOptions, -) (finalVoterTargets, finalNonVoterTargets []roachpb.ReplicaDescriptor) { +) (finalVoterTargets, finalNonVoterTargets []roachpb.ReplicaDescriptor, foundRebalance bool) { finalVoterTargets = rbCtx.rangeDesc.Replicas().VoterDescriptors() finalNonVoterTargets = rbCtx.rangeDesc.Replicas().NonVoterDescriptors() @@ -652,6 +664,9 @@ func (sr *StoreRebalancer) getRebalanceTargetsBasedOnQPS( rbCtx.rangeDesc.RangeID, ) break + } else { + // Record the fact that we found at least one rebalance opportunity. + foundRebalance = true } log.VEventf( ctx, @@ -712,6 +727,9 @@ func (sr *StoreRebalancer) getRebalanceTargetsBasedOnQPS( rbCtx.rangeDesc.RangeID, ) break + } else { + // Record the fact that we found at least one rebalance opportunity. + foundRebalance = true } log.VEventf( ctx, @@ -737,7 +755,7 @@ func (sr *StoreRebalancer) getRebalanceTargetsBasedOnQPS( // Pretend that we've executed upon this rebalancing decision. finalNonVoterTargets = newNonVoters } - return finalVoterTargets, finalNonVoterTargets + return finalVoterTargets, finalNonVoterTargets, foundRebalance } func storeListToMap(sl StoreList) map[roachpb.StoreID]*roachpb.StoreDescriptor { diff --git a/pkg/kv/kvserver/store_rebalancer_test.go b/pkg/kv/kvserver/store_rebalancer_test.go index ec3135d021fc..a5334d1913d2 100644 --- a/pkg/kv/kvserver/store_rebalancer_test.go +++ b/pkg/kv/kvserver/store_rebalancer_test.go @@ -25,8 +25,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/stop" + "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/stretchr/testify/require" - raft "go.etcd.io/etcd/raft/v3" + "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/raft/v3/tracker" ) @@ -242,9 +243,10 @@ type testRange struct { func loadRanges(rr *replicaRankings, s *Store, ranges []testRange) { acc := rr.newAccumulator() - for _, r := range ranges { - repl := &Replica{store: s} - repl.mu.state.Desc = &roachpb.RangeDescriptor{} + for i, r := range ranges { + rangeID := roachpb.RangeID(i + 1) + repl := &Replica{store: s, RangeID: rangeID} + repl.mu.state.Desc = &roachpb.RangeDescriptor{RangeID: rangeID} repl.mu.conf = s.cfg.DefaultSpanConfig for _, storeID := range r.voters { repl.mu.state.Desc.InternalReplicas = append(repl.mu.state.Desc.InternalReplicas, roachpb.ReplicaDescriptor{ @@ -706,7 +708,7 @@ func TestChooseRangeToRebalanceAcrossHeterogeneousZones(t *testing.T) { name: "no rebalance", voters: []roachpb.StoreID{3, 6, 9}, constraints: oneReplicaPerRegion, - expRebalancedVoters: []roachpb.StoreID{9, 6, 3}, + expRebalancedVoters: []roachpb.StoreID{}, }, // A replica is in a heavily loaded region, on a relatively heavily loaded // store. We expect it to be moved to a less busy store within the same @@ -878,6 +880,56 @@ func TestChooseRangeToRebalanceAcrossHeterogeneousZones(t *testing.T) { } } +// TestChooseRangeToRebalanceIgnoresRangeOnBestStores tests that the store +// rebalancer does not attempt to rebalance ranges unless it finds a better set +// of target stores for it compared to its existing stores. +func TestChooseRangeToRebalanceIgnoresRangeOnBestStores(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx, finishAndGetRecording := tracing.ContextWithRecordingSpan( + context.Background(), tracing.NewTracer(), "test", + ) + stopper := stop.NewStopper() + defer stopper.Stop(ctx) + + stopper, g, _, a, _ := createTestAllocatorWithKnobs(ctx, + 10, + false, /* deterministic */ + &AllocatorTestingKnobs{AllowLeaseTransfersToReplicasNeedingSnapshots: true}, + ) + defer stopper.Stop(context.Background()) + storeList, _, _ := a.storePool.getStoreList(storeFilterThrottled) + + localDesc := *noLocalityStores[len(noLocalityStores)-1] + cfg := TestStoreConfig(nil) + cfg.Gossip = g + cfg.StorePool = a.storePool + cfg.DefaultSpanConfig.NumVoters = 1 + cfg.DefaultSpanConfig.NumReplicas = 1 + s := createTestStoreWithoutStart(ctx, t, stopper, testStoreOpts{createSystemRanges: true}, &cfg) + gossiputil.NewStoreGossiper(cfg.Gossip).GossipStores(noLocalityStores, t) + s.Ident = &roachpb.StoreIdent{StoreID: localDesc.StoreID} + rq := newReplicateQueue(s, a) + rr := newReplicaRankings() + + sr := NewStoreRebalancer(cfg.AmbientCtx, cfg.Settings, rq, rr) + + // Load a fake hot range that's already on the best stores. We want to ensure + // that the store rebalancer doesn't attempt to rebalance ranges that it + // cannot find better rebalance opportunities for. + loadRanges(rr, s, []testRange{{voters: []roachpb.StoreID{localDesc.StoreID}, qps: 100}}) + hottestRanges := rr.topQPS() + sr.chooseRangeToRebalance( + ctx, &hottestRanges, &localDesc, storeList, qpsScorerOptions{qpsRebalanceThreshold: 0.05}, + ) + trace := finishAndGetRecording() + require.Regexpf( + t, "could not find.*opportunities for r1", + trace, "expected the store rebalancer to explicitly ignore r1; but found %s", trace, + ) +} + func TestNoLeaseTransferToBehindReplicas(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/kv/kvserver/testdata/replica_unavailable_error.txt b/pkg/kv/kvserver/testdata/replica_unavailable_error.txt new file mode 100644 index 000000000000..b11beef6d0e1 --- /dev/null +++ b/pkg/kv/kvserver/testdata/replica_unavailable_error.txt @@ -0,0 +1,3 @@ +echo +---- +replicas on non-live nodes: (n2,s20):2 (lost quorum: true): raft status: {"id":"0","term":0,"vote":"0","commit":0,"lead":"0","raftState":"StateFollower","applied":0,"progress":{},"leadtransferee":"0"}: replica r10:‹{a-z}› [(n1,s10):1, (n2,s20):2, next=3, gen=0] of (n1,s10):1 is unavailable diff --git a/pkg/kv/kvserver/testing_knobs.go b/pkg/kv/kvserver/testing_knobs.go index 9ec3168eecb3..8e21061a3373 100644 --- a/pkg/kv/kvserver/testing_knobs.go +++ b/pkg/kv/kvserver/testing_knobs.go @@ -81,6 +81,10 @@ type StoreTestingKnobs struct { // error returned to the client, or to simulate network failures. TestingResponseFilter kvserverbase.ReplicaResponseFilter + // SlowReplicationThresholdOverride is an interceptor that allows setting a + // per-Batch SlowReplicationThreshold. + SlowReplicationThresholdOverride func(ba *roachpb.BatchRequest) time.Duration + // TestingRangefeedFilter is called before a replica processes a rangefeed // in order for unit tests to modify the request, error returned to the client // or data. diff --git a/pkg/kv/util.go b/pkg/kv/util.go index b13fbb790cb0..8d6bfb00f548 100644 --- a/pkg/kv/util.go +++ b/pkg/kv/util.go @@ -15,7 +15,7 @@ import ( "reflect" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/duration" "github.com/cockroachdb/cockroach/pkg/util/protoutil" diff --git a/pkg/migration/migrations/BUILD.bazel b/pkg/migration/migrations/BUILD.bazel index a1294e6c303a..e9911386e6a4 100644 --- a/pkg/migration/migrations/BUILD.bazel +++ b/pkg/migration/migrations/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "ensure_no_draining_names.go", "insert_missing_public_schema_namespace_entry.go", "migrations.go", + "public_schema_migration.go", "schema_changes.go", "seed_tenant_span_configs.go", ], @@ -23,11 +24,17 @@ go_library( "//pkg/migration", "//pkg/roachpb:with-mocks", "//pkg/security", + "//pkg/sql", "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkeys", + "//pkg/sql/catalog/catalogkv", + "//pkg/sql/catalog/dbdesc", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", + "//pkg/sql/catalog/schemadesc", "//pkg/sql/catalog/systemschema", + "//pkg/sql/catalog/tabledesc", + "//pkg/sql/catalog/typedesc", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/util/log", @@ -49,6 +56,7 @@ go_test( "ensure_no_draining_names_external_test.go", "helpers_test.go", "main_test.go", + "public_schema_migration_external_test.go", ], data = glob(["testdata/**"]), embed = [":migrations"], @@ -58,9 +66,11 @@ go_test( "//pkg/jobs", "//pkg/keys", "//pkg/kv", + "//pkg/kv/kvserver", "//pkg/security", "//pkg/security/securitytest", "//pkg/server", + "//pkg/settings/cluster", "//pkg/sql", "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkv", @@ -73,6 +83,7 @@ go_test( "//pkg/sql/sqlutil", "//pkg/sql/types", "//pkg/testutils/serverutils", + "//pkg/testutils/skip", "//pkg/testutils/sqlutils", "//pkg/testutils/testcluster", "//pkg/util/leaktest", diff --git a/pkg/migration/migrations/migrations.go b/pkg/migration/migrations/migrations.go index 1b784c7a9810..a2998f5402d4 100644 --- a/pkg/migration/migrations/migrations.go +++ b/pkg/migration/migrations/migrations.go @@ -59,7 +59,7 @@ var migrations = []migration.Migration{ alterSystemStmtDiagReqs, ), migration.NewTenantMigration( - "seed system.span_configurations with configs for existing for existing tenants", + "seed system.span_configurations with configs for existing tenants", toCV(clusterversion.SeedTenantSpanConfigs), NoPrecondition, seedTenantSpanConfigsMigration, @@ -75,6 +75,11 @@ var migrations = []migration.Migration{ NoPrecondition, alterTableProtectedTimestampRecords, ), + migration.NewTenantMigration("update synthetic public schemas to be backed by a descriptor", + toCV(clusterversion.PublicSchemasWithDescriptors), + NoPrecondition, + publicSchemaMigration, + ), } func init() { diff --git a/pkg/migration/migrations/public_schema_migration.go b/pkg/migration/migrations/public_schema_migration.go new file mode 100644 index 000000000000..4986005cd40f --- /dev/null +++ b/pkg/migration/migrations/public_schema_migration.go @@ -0,0 +1,225 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migrations + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/migration" + "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" +) + +func publicSchemaMigration( + ctx context.Context, _ clusterversion.ClusterVersion, d migration.TenantDeps, _ *jobs.Job, +) error { + query := ` + SELECT ns_db.id + FROM system.namespace AS ns_db + INNER JOIN system.namespace + AS ns_sc ON ( + ns_db.id + = ns_sc."parentID" + ) + WHERE ns_db.id != 1 + AND ns_db."parentSchemaID" = 0 + AND ns_db."parentID" = 0 + AND ns_sc."parentSchemaID" = 0 + AND ns_sc.name = 'public' + AND ns_sc.id = 29 +ORDER BY ns_db.id ASC; +` + rows, err := d.InternalExecutor.QueryIterator( + ctx, "get_databases_with_synthetic_public_schemas", nil /* txn */, query, + ) + if err != nil { + return err + } + var databaseIDs []descpb.ID + for ok, err := rows.Next(ctx); ok; ok, err = rows.Next(ctx) { + if err != nil { + return err + } + parentID := descpb.ID(tree.MustBeDInt(rows.Cur()[0])) + databaseIDs = append(databaseIDs, parentID) + } + + for _, dbID := range databaseIDs { + if err := createPublicSchemaForDatabase(ctx, dbID, d); err != nil { + return err + } + } + + return nil +} + +func createPublicSchemaForDatabase( + ctx context.Context, dbID descpb.ID, d migration.TenantDeps, +) error { + return d.CollectionFactory.Txn(ctx, d.InternalExecutor, d.DB, + func(ctx context.Context, txn *kv.Txn, descriptors *descs.Collection) error { + return createPublicSchemaDescriptor(ctx, txn, descriptors, dbID, d) + }) +} + +func createPublicSchemaDescriptor( + ctx context.Context, + txn *kv.Txn, + descriptors *descs.Collection, + dbID descpb.ID, + d migration.TenantDeps, +) error { + _, desc, err := descriptors.GetImmutableDatabaseByID(ctx, txn, dbID, tree.DatabaseLookupFlags{Required: true}) + if err != nil { + return err + } + if desc.HasPublicSchemaWithDescriptor() { + // If the database already has a descriptor backed public schema, + // there is no work to be done. + return nil + } + dbDescBuilder := dbdesc.NewBuilder(desc.DatabaseDesc()) + dbDesc := dbDescBuilder.BuildExistingMutableDatabase() + + b := txn.NewBatch() + + publicSchemaDesc, _, err := sql.CreateSchemaDescriptorWithPrivileges( + ctx, d.DB, d.Codec, desc, tree.PublicSchema, security.AdminRoleName(), security.AdminRoleName(), true, /* allocateID */ + ) + if err != nil { + return err + } + publicSchemaID := publicSchemaDesc.GetID() + newKey := catalogkeys.MakeSchemaNameKey(d.Codec, dbID, publicSchemaDesc.GetName()) + oldKey := catalogkeys.EncodeNameKey(d.Codec, catalogkeys.NewNameKeyComponents(dbID, keys.RootNamespaceID, tree.PublicSchema)) + // Remove namespace entry for old public schema. + b.Del(oldKey) + b.CPut(newKey, publicSchemaID, nil) + if err := catalogkv.WriteNewDescToBatch( + ctx, + false, + d.Settings, + b, + d.Codec, + publicSchemaID, + publicSchemaDesc, + ); err != nil { + return err + } + + if dbDesc.Schemas == nil { + dbDesc.Schemas = map[string]descpb.DatabaseDescriptor_SchemaInfo{ + tree.PublicSchema: { + ID: publicSchemaID, + }, + } + } else { + dbDesc.Schemas[tree.PublicSchema] = descpb.DatabaseDescriptor_SchemaInfo{ + ID: publicSchemaID, + } + } + if err := descriptors.WriteDescToBatch(ctx, false, dbDesc, b); err != nil { + return err + } + allDescriptors, err := descriptors.GetAllDescriptors(ctx, txn) + if err != nil { + return err + } + if err := migrateObjectsInDatabase(ctx, dbID, d, txn, publicSchemaID, descriptors, allDescriptors); err != nil { + return err + } + + return txn.Run(ctx, b) +} + +func migrateObjectsInDatabase( + ctx context.Context, + dbID descpb.ID, + d migration.TenantDeps, + txn *kv.Txn, + newPublicSchemaID descpb.ID, + descriptors *descs.Collection, + allDescriptors []catalog.Descriptor, +) error { + const minBatchSizeInBytes = 1 << 20 /* 512 KiB batch size */ + currSize := 0 + var modifiedDescs []catalog.MutableDescriptor + batch := txn.NewBatch() + for _, desc := range allDescriptors { + // Only update descriptors in the parent db and public schema. + if desc.Dropped() || desc.GetParentID() != dbID || + (desc.GetParentSchemaID() != keys.PublicSchemaID && desc.GetParentSchemaID() != descpb.InvalidID) { + continue + } + b := desc.NewBuilder() + updateDesc := func(mut catalog.MutableDescriptor, newPublicSchemaID descpb.ID) { + oldKey := catalogkeys.MakeObjectNameKey(d.Codec, mut.GetParentID(), mut.GetParentSchemaID(), mut.GetName()) + batch.Del(oldKey) + newKey := catalogkeys.MakeObjectNameKey(d.Codec, mut.GetParentID(), newPublicSchemaID, mut.GetName()) + batch.Put(newKey, mut.GetID()) + modifiedDescs = append(modifiedDescs, mut) + } + switch mut := b.BuildExistingMutable().(type) { + case *dbdesc.Mutable, *schemadesc.Mutable: + // Ignore database and schema descriptors. + case *tabledesc.Mutable: + updateDesc(mut, newPublicSchemaID) + mut.UnexposedParentSchemaID = newPublicSchemaID + currSize += mut.Size() + case *typedesc.Mutable: + updateDesc(mut, newPublicSchemaID) + mut.ParentSchemaID = newPublicSchemaID + currSize += mut.Size() + } + + // Once we reach the minimum batch size, write the batch and create a new + // one. + if currSize >= minBatchSizeInBytes { + for _, modified := range modifiedDescs { + err := descriptors.WriteDescToBatch( + ctx, false, modified, batch, + ) + if err != nil { + return err + } + } + if err := txn.Run(ctx, batch); err != nil { + return err + } + currSize = 0 + batch = txn.NewBatch() + modifiedDescs = make([]catalog.MutableDescriptor, 0) + } + } + for _, modified := range modifiedDescs { + err := descriptors.WriteDescToBatch( + ctx, false, modified, batch, + ) + if err != nil { + return err + } + } + return txn.Run(ctx, batch) +} diff --git a/pkg/migration/migrations/public_schema_migration_external_test.go b/pkg/migration/migrations/public_schema_migration_external_test.go new file mode 100644 index 000000000000..7a1eac0b03f7 --- /dev/null +++ b/pkg/migration/migrations/public_schema_migration_external_test.go @@ -0,0 +1,214 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migrations_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" + "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/stretchr/testify/require" +) + +func publicSchemaMigrationTest(t *testing.T, ctx context.Context, numTables int) { + settings := cluster.MakeTestingClusterSettingsWithVersions( + clusterversion.TestingBinaryVersion, + clusterversion.ByKey(clusterversion.PublicSchemasWithDescriptors-1), + false, + ) + // 2048 KiB batch size - 4x the public schema migration's minBatchSizeInBytes. + const maxCommandSize = 1 << 22 + kvserver.MaxCommandSize.Override(ctx, &settings.SV, maxCommandSize) + tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ + ServerArgs: base.TestServerArgs{ + Settings: settings, + Knobs: base.TestingKnobs{ + Server: &server.TestingKnobs{ + DisableAutomaticVersionUpgrade: 1, + BinaryVersionOverride: clusterversion.ByKey(clusterversion.PublicSchemasWithDescriptors - 1), + }, + }, + }, + }) + defer tc.Stopper().Stop(ctx) + + db := tc.ServerConn(0) + defer db.Close() + + // We bootstrap the cluster on the older version where databases are + // created without public schemas. The namespace before upgrading looks like: + /* + 51 0 public 29 + 50 0 public 29 + 0 0 defaultdb 50 + 0 0 postgres 51 + 50 29 t 52 + 50 29 typ 53 + 50 29 _typ 54 + */ + _, err := db.Exec(`CREATE TABLE defaultdb.public.t(x INT)`) + require.NoError(t, err) + _, err = db.Exec(`INSERT INTO defaultdb.public.t VALUES (1), (2), (3)`) + require.NoError(t, err) + _, err = db.Exec(`CREATE TYPE defaultdb.public.typ AS ENUM()`) + require.NoError(t, err) + // Ensure the migration works if we have UDS in the database. + _, err = db.Exec(`CREATE SCHEMA defaultdb.s`) + require.NoError(t, err) + _, err = db.Exec(`CREATE TABLE defaultdb.s.table_in_uds(x INT)`) + require.NoError(t, err) + _, err = db.Exec(`INSERT INTO defaultdb.s.table_in_uds VALUES (1), (2), (3)`) + require.NoError(t, err) + + // Create large descriptors to ensure we're batching descriptors. + // The name of the table is approx 1000 bytes. + // Thus, we create approximately 5000 KiB of descriptors in this database. + // This is also larger than the 2048 KiB max command size we set. + // The batch size in the migration is 512 KiB so this ensures we have at + // least two batches. + for i := 0; i < numTables; i++ { + _, err = db.Exec(fmt.Sprintf(`CREATE TABLE defaultdb.t%s%d()`, strings.Repeat("x", 10000), i)) + require.NoError(t, err) + } + + _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + clusterversion.ByKey(clusterversion.PublicSchemasWithDescriptors).String()) + require.NoError(t, err) + + // Verify that defaultdb and postgres have public schemas with IDs that + // are not 29. + row := db.QueryRow(`SELECT id FROM system.namespace WHERE name='public' AND "parentID"=50`) + require.NotNil(t, row) + var defaultDBPublicSchemaID int + err = row.Scan(&defaultDBPublicSchemaID) + require.NoError(t, err) + + require.NotEqual(t, defaultDBPublicSchemaID, keys.PublicSchemaID) + + row = db.QueryRow(`SELECT id FROM system.namespace WHERE name='public' AND "parentID"=51`) + require.NotNil(t, row) + var postgresPublicSchemaID int + err = row.Scan(&postgresPublicSchemaID) + require.NoError(t, err) + + require.NotEqual(t, postgresPublicSchemaID, keys.PublicSchemaID) + + // Verify that table "t" and type "typ" and "_typ" are have parent schema id + // defaultDBPublicSchemaID. + var tParentSchemaID, typParentSchemaID, typArrParentSchemaID int + row = db.QueryRow(`SELECT "parentSchemaID" FROM system.namespace WHERE name='t' AND "parentID"=50`) + err = row.Scan(&tParentSchemaID) + require.NoError(t, err) + + require.Equal(t, tParentSchemaID, defaultDBPublicSchemaID) + + row = db.QueryRow(`SELECT "parentSchemaID" FROM system.namespace WHERE name='typ' AND "parentID"=50`) + err = row.Scan(&typParentSchemaID) + require.NoError(t, err) + + require.Equal(t, typParentSchemaID, defaultDBPublicSchemaID) + + row = db.QueryRow(`SELECT "parentSchemaID" FROM system.namespace WHERE name='_typ' AND "parentID"=50`) + err = row.Scan(&typArrParentSchemaID) + require.NoError(t, err) + + require.Equal(t, typArrParentSchemaID, defaultDBPublicSchemaID) + + _, err = db.Exec(`INSERT INTO t VALUES (4)`) + require.NoError(t, err) + + rows, err := db.Query(`SELECT * FROM defaultdb.t ORDER BY x`) + require.NoError(t, err) + defer rows.Close() + if err != nil { + t.Fatal(err) + } + + // Verify that we can query table t. + var x int + for i := 1; i < 5; i++ { + rows.Next() + require.NoError(t, err) + err = rows.Scan(&x) + require.NoError(t, err) + require.Equal(t, x, i) + } + + // Verify that we can use type "typ". + _, err = db.Exec(`CREATE TABLE t2(x typ)`) + require.NoError(t, err) + + // Verify that we can use the typ / enum. + _, err = db.Exec(`ALTER TYPE typ ADD VALUE 'hello'`) + require.NoError(t, err) + + _, err = db.Exec(`INSERT INTO t2 VALUES ('hello')`) + require.NoError(t, err) + + row = db.QueryRow(`SELECT * FROM t2`) + require.NotNil(t, row) + + var helloStr string + err = row.Scan(&helloStr) + require.NoError(t, err) + + require.Equal(t, "hello", helloStr) + + rows, err = db.Query(`SELECT * FROM defaultdb.s.table_in_uds ORDER BY x`) + require.NoError(t, err) + + // Verify that we can query table defaultdb.s.table_in_uds (table in a UDS). + for i := 1; i < 4; i++ { + rows.Next() + require.NoError(t, err) + err = rows.Scan(&x) + require.NoError(t, err) + require.Equal(t, x, i) + } + + // Verify that the tables with large descriptor sizes have parentSchemaIDs + // that are not 29. + const oldPublicSchemaID = 29 + var parentSchemaID int + for i := 0; i < numTables; i++ { + row = db.QueryRow(fmt.Sprintf(`SELECT "parentSchemaID" FROM system.namespace WHERE name = 't%s%d'`, strings.Repeat("x", 10000), i)) + err = row.Scan(&parentSchemaID) + require.NoError(t, err) + require.NotEqual(t, parentSchemaID, descpb.InvalidID) + require.NotEqual(t, oldPublicSchemaID, parentSchemaID) + } +} + +func TestPublicSchemaMigration500Tables(t *testing.T) { + skip.UnderRace(t, "takes >1min under race") + defer leaktest.AfterTest(t)() + ctx := context.Background() + + publicSchemaMigrationTest(t, ctx, 500) +} + +func TestPublicSchemaMigration10Tables(t *testing.T) { + defer leaktest.AfterTest(t)() + ctx := context.Background() + + publicSchemaMigrationTest(t, ctx, 10) +} diff --git a/pkg/roachpb/BUILD.bazel b/pkg/roachpb/BUILD.bazel index 6c668ee214d7..83b6d264012a 100644 --- a/pkg/roachpb/BUILD.bazel +++ b/pkg/roachpb/BUILD.bazel @@ -53,7 +53,7 @@ go_library( "//pkg/util/protoutil", "//pkg/util/timetz", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_errors//errorspb", "@com_github_cockroachdb_errors//extgrpc", @@ -108,7 +108,7 @@ go_library( "//pkg/util/uuid", "@com_github_aws_aws_sdk_go//aws", "@com_github_aws_aws_sdk_go//aws/credentials", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_errors//errorspb", "@com_github_cockroachdb_errors//extgrpc", @@ -159,6 +159,7 @@ go_test( "version_test.go", ], embed = [":with-mocks"], # keep + tags = ["no-remote"], deps = [ "//pkg/cli/exit", "//pkg/kv/kvserver/concurrency/lock", @@ -176,7 +177,7 @@ go_test( "//pkg/util/randutil", "//pkg/util/timeutil", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", "@com_github_gogo_protobuf//proto", diff --git a/pkg/roachpb/api.go b/pkg/roachpb/api.go index 314019c38384..dcc644164d19 100644 --- a/pkg/roachpb/api.go +++ b/pkg/roachpb/api.go @@ -1677,3 +1677,19 @@ func (s *ScanStats) SafeFormat(w redact.SafePrinter, _ rune) { func (s *ScanStats) String() string { return redact.StringWithoutMarkers(s) } + +// TenantSettingsPrecedence identifies the precedence of a set of setting +// overrides. It is used by the TenantSettings API which supports passing +// multiple overrides for the same setting. +type TenantSettingsPrecedence uint32 + +const ( + // SpecificTenantOverrides is the high precedence for tenant setting overrides. + // These overrides take precedence over AllTenantsOverrides. + SpecificTenantOverrides TenantSettingsPrecedence = 1 + iota + + // AllTenantsOverrides is the low precedence for tenant setting overrides. + // These overrides are only effectual for a tenant if there is no override + // with the SpecificTenantOverrides precedence.. + AllTenantsOverrides +) diff --git a/pkg/roachpb/api.proto b/pkg/roachpb/api.proto index d3a4556679d6..eb8e377b5c02 100644 --- a/pkg/roachpb/api.proto +++ b/pkg/roachpb/api.proto @@ -2610,6 +2610,60 @@ message GossipSubscriptionEvent { kv.kvpb.Error error = 4; } +// TenantSettingsRequest establishes an indefinite stream that provides +// up-to-date overrides for tenant settings. +// +// Upon establishment of the stream, the current overrides are returned as an +// event, and any time the overrides change a new event is generated. +message TenantSettingsRequest { + TenantID tenant_id = 1 [(gogoproto.customname) = "TenantID", (gogoproto.nullable) = false]; +} + +// TenantSettingsEvent is used to report changes to setting overrides for a +// particular tenant. +// +// Each event pertains to a certain precedence value (see +// TenantSettingsPrecedence). +// +// Note: this API is designed to allow flexibility of implementation on the +// server side (e.g. to make it maintain very little state per tenant). +message TenantSettingsEvent { + // Precedence must be a valid TenantSettingsPrecedence value. + uint32 precedence = 1 [(gogoproto.casttype) = "TenantSettingsPrecedence"]; + + // Incremental is true if the list of overrides is a list of changes since the + // last event. In that case, any overrides that have been removed are returned + // as TenantSettings with empty RawValue and ValueType fields. + // + // When Incremental is false, the overrides contains the complete list of + // current overrides for this precedence. + // + // The first event for a precedence is never incremental. + bool incremental = 2; + + // Overrides contains: + // - all current setting overrides for the given precedence if Incremental is + // false; or + // - the changed overrides since the last event for the precedence if + // Incremental is true (removed overrides have empty RawValue and ValueType + // fields). + repeated TenantSetting overrides = 3; + + // If non-nil, the other fields will be empty and this will be the final event + // sent on the stream before it is terminated. + errorspb.EncodedError error = 4 [(gogoproto.nullable) = false]; +} + +// TenantSetting contains the name and value of a tenant setting. +// +// The value representation is the same as that used by the system.settings +// table (value and valueType columns). +message TenantSetting { + string name = 1; + string raw_value = 2; + string value_type = 3; +} + // TenantConsumption contains information about resource utilization by a // tenant, which directly factors into their bill. message TenantConsumption { @@ -2743,6 +2797,10 @@ service Internal { // UpdateSpanConfigs is used to update the span configurations over given // keyspans. rpc UpdateSpanConfigs (UpdateSpanConfigsRequest) returns (UpdateSpanConfigsResponse) { } + + // TenantSettings is used by tenants to obtain and stay up to date with tenant + // setting overrides. + rpc TenantSettings (TenantSettingsRequest) returns (stream TenantSettingsEvent) { } } // ContentionEvent is a message that will be attached to BatchResponses diff --git a/pkg/roachpb/data.go b/pkg/roachpb/data.go index 36e1fbc1877b..35a353f78536 100644 --- a/pkg/roachpb/data.go +++ b/pkg/roachpb/data.go @@ -26,7 +26,7 @@ import ( "time" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/geo/geopb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" @@ -1865,12 +1865,12 @@ func (s LeaseSequence) SafeValue() {} var _ fmt.Stringer = &Lease{} -func (l *Lease) String() string { +func (l Lease) String() string { return redact.StringWithoutMarkers(l) } // SafeFormat implements the redact.SafeFormatter interface. -func (l *Lease) SafeFormat(w redact.SafePrinter, _ rune) { +func (l Lease) SafeFormat(w redact.SafePrinter, _ rune) { if l.Empty() { w.SafeString("") return @@ -2528,5 +2528,5 @@ func (ReplicaChangeType) SafeValue() {} func (ri RangeInfo) String() string { return fmt.Sprintf("desc: %s, lease: %s, closed_timestamp_policy: %s", - ri.Desc, &ri.Lease, ri.ClosedTimestampPolicy) + ri.Desc, ri.Lease, ri.ClosedTimestampPolicy) } diff --git a/pkg/roachpb/data_test.go b/pkg/roachpb/data_test.go index f38d012cb49c..4b71df693ad3 100644 --- a/pkg/roachpb/data_test.go +++ b/pkg/roachpb/data_test.go @@ -20,7 +20,7 @@ import ( "testing" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/cli/exit" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/testutils/zerofields" diff --git a/pkg/roachpb/errors.go b/pkg/roachpb/errors.go index 7e97fc659789..7ecb8f742743 100644 --- a/pkg/roachpb/errors.go +++ b/pkg/roachpb/errors.go @@ -508,7 +508,7 @@ func (e *LeaseRejectedError) Error() string { } func (e *LeaseRejectedError) message(_ *Error) string { - return fmt.Sprintf("cannot replace lease %s with %s: %s", &e.Existing, e.Requested.String(), e.Message) + return fmt.Sprintf("cannot replace lease %s with %s: %s", e.Existing, e.Requested.String(), e.Message) } var _ ErrorDetailInterface = &LeaseRejectedError{} @@ -622,7 +622,7 @@ func (e *RangeKeyMismatchError) AppendRangeInfo( ) { if !l.Empty() { if _, ok := desc.GetReplicaDescriptorByID(l.Replica.ReplicaID); !ok { - log.Fatalf(ctx, "lease names missing replica; lease: %s, desc: %s", &l, desc) + log.Fatalf(ctx, "lease names missing replica; lease: %s, desc: %s", l, desc) } } e.Ranges = append(e.Ranges, RangeInfo{ @@ -650,6 +650,9 @@ func (e *AmbiguousResultError) Error() string { } func (e *AmbiguousResultError) message(_ *Error) string { + if e.WrappedErr != nil { + return fmt.Sprintf("result is ambiguous (%v)", e.WrappedErr) + } return fmt.Sprintf("result is ambiguous (%s)", e.Message) } @@ -888,11 +891,18 @@ var _ ErrorDetailInterface = &WriteIntentError{} // NewWriteTooOldError creates a new write too old error. The function accepts // the timestamp of the operation that hit the error, along with the timestamp // immediately after the existing write which had a higher timestamp and which -// caused the error. -func NewWriteTooOldError(operationTS, actualTS hlc.Timestamp) *WriteTooOldError { +// caused the error. An optional Key parameter is accepted to denote one key +// where this error was encountered. +func NewWriteTooOldError(operationTS, actualTS hlc.Timestamp, key Key) *WriteTooOldError { + if len(key) > 0 { + oldKey := key + key = make([]byte, len(oldKey)) + copy(key, oldKey) + } return &WriteTooOldError{ Timestamp: operationTS, ActualTimestamp: actualTS, + Key: key, } } @@ -901,6 +911,10 @@ func (e *WriteTooOldError) Error() string { } func (e *WriteTooOldError) message(_ *Error) string { + if len(e.Key) > 0 { + return fmt.Sprintf("WriteTooOldError: write for key %s at timestamp %s too old; wrote at %s", + e.Key, e.Timestamp, e.ActualTimestamp) + } return fmt.Sprintf("WriteTooOldError: write at timestamp %s too old; wrote at %s", e.Timestamp, e.ActualTimestamp) } diff --git a/pkg/roachpb/errors.proto b/pkg/roachpb/errors.proto index c9a24684f7ee..433900af702c 100644 --- a/pkg/roachpb/errors.proto +++ b/pkg/roachpb/errors.proto @@ -332,6 +332,13 @@ message WriteIntentError { message WriteTooOldError { optional util.hlc.Timestamp timestamp = 1 [(gogoproto.nullable) = false]; optional util.hlc.Timestamp actual_timestamp = 2 [(gogoproto.nullable) = false]; + // One of the keys at which this error was encountered. There's + // no need to return new WriteTooOldErrors for each colliding key; the key + // is just present for investigation / logging purposes, and is not expected + // to be used in any transaction logic. As a result, it's not even necessary + // for this key to be at actual_timestamp; it could be at any timestamp in + // between timestamp and actual_timestamp. + optional bytes key = 3 [(gogoproto.casttype) = "Key"]; } // An OpRequiresTxnError indicates that a command required to be diff --git a/pkg/roachpb/mocks_generated.go b/pkg/roachpb/mocks_generated.go index 7344ee7bb366..1886c2e503dc 100644 --- a/pkg/roachpb/mocks_generated.go +++ b/pkg/roachpb/mocks_generated.go @@ -176,6 +176,26 @@ func (mr *MockInternalClientMockRecorder) ResetQuorum(arg0, arg1 interface{}, ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetQuorum", reflect.TypeOf((*MockInternalClient)(nil).ResetQuorum), varargs...) } +// TenantSettings mocks base method. +func (m *MockInternalClient) TenantSettings(arg0 context.Context, arg1 *TenantSettingsRequest, arg2 ...grpc.CallOption) (Internal_TenantSettingsClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "TenantSettings", varargs...) + ret0, _ := ret[0].(Internal_TenantSettingsClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TenantSettings indicates an expected call of TenantSettings. +func (mr *MockInternalClientMockRecorder) TenantSettings(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantSettings", reflect.TypeOf((*MockInternalClient)(nil).TenantSettings), varargs...) +} + // TokenBucket mocks base method. func (m *MockInternalClient) TokenBucket(arg0 context.Context, arg1 *TokenBucketRequest, arg2 ...grpc.CallOption) (*TokenBucketResponse, error) { m.ctrl.T.Helper() diff --git a/pkg/roachpb/string_test.go b/pkg/roachpb/string_test.go index a6114a472162..87854fd38a1c 100644 --- a/pkg/roachpb/string_test.go +++ b/pkg/roachpb/string_test.go @@ -142,39 +142,3 @@ func TestSpansString(t *testing.T) { require.Equal(t, tc.expected, tc.spans.String()) } } - -func TestLeaseString(t *testing.T) { - for _, tc := range []struct { - lease *roachpb.Lease - expected string - }{ - { - lease: &roachpb.Lease{}, - expected: "", - }, - { - lease: nil, - expected: "", - }, - { - lease: &roachpb.Lease{ - Replica: roachpb.ReplicaDescriptor{NodeID: 1, StoreID: 1}, - Sequence: 1, - Start: hlc.ClockTimestamp(hlc.Timestamp{WallTime: 12, Logical: 123}), - Expiration: &hlc.Timestamp{WallTime: 1234, Logical: 12345}, - }, - expected: "repl=(n1,s1):? seq=1 start=0.000000012,123 exp=0.000001234,12345", - }, - { - lease: &roachpb.Lease{ - Replica: roachpb.ReplicaDescriptor{NodeID: 1, StoreID: 1}, - Sequence: 1, - Start: hlc.ClockTimestamp(hlc.Timestamp{WallTime: 12, Logical: 123}), - Epoch: 1, - }, - expected: "repl=(n1,s1):? seq=1 start=0.000000012,123 epo=1", - }, - } { - require.Equal(t, tc.expected, tc.lease.String()) - } -} diff --git a/pkg/rpc/context.go b/pkg/rpc/context.go index 3161b866fc14..63429c10d33c 100644 --- a/pkg/rpc/context.go +++ b/pkg/rpc/context.go @@ -683,6 +683,7 @@ func (a internalClientAdapter) RangeFeed( ctx context.Context, args *roachpb.RangeFeedRequest, _ ...grpc.CallOption, ) (roachpb.Internal_RangeFeedClient, error) { ctx, cancel := context.WithCancel(ctx) + ctx, sp := tracing.ChildSpan(ctx, "/cockroach.roachpb.Internal/RangeFeed") rfAdapter := rangeFeedClientAdapter{ respStreamClientAdapter: makeRespStreamClientAdapter(ctx), } @@ -691,6 +692,7 @@ func (a internalClientAdapter) RangeFeed( args.AdmissionHeader.SourceLocation = roachpb.AdmissionHeader_LOCAL go func() { defer cancel() + defer sp.Finish() err := a.server.RangeFeed(args, rfAdapter) if err == nil { err = io.EOF @@ -727,12 +729,14 @@ func (a internalClientAdapter) GossipSubscription( ctx context.Context, args *roachpb.GossipSubscriptionRequest, _ ...grpc.CallOption, ) (roachpb.Internal_GossipSubscriptionClient, error) { ctx, cancel := context.WithCancel(ctx) + ctx, sp := tracing.ChildSpan(ctx, "/cockroach.roachpb.Internal/GossipSubscription") gsAdapter := gossipSubscriptionClientAdapter{ respStreamClientAdapter: makeRespStreamClientAdapter(ctx), } go func() { defer cancel() + defer sp.Finish() err := a.server.GossipSubscription(args, gsAdapter) if err == nil { err = io.EOF @@ -743,6 +747,48 @@ func (a internalClientAdapter) GossipSubscription( return gsAdapter, nil } +type tenantSettingsClientAdapter struct { + respStreamClientAdapter +} + +// roachpb.Internal_TenantSettingsServer methods. +func (a tenantSettingsClientAdapter) Recv() (*roachpb.TenantSettingsEvent, error) { + e, err := a.recvInternal() + if err != nil { + return nil, err + } + return e.(*roachpb.TenantSettingsEvent), nil +} + +// roachpb.Internal_TenantSettingsServer methods. +func (a tenantSettingsClientAdapter) Send(e *roachpb.TenantSettingsEvent) error { + return a.sendInternal(e) +} + +var _ roachpb.Internal_TenantSettingsClient = tenantSettingsClientAdapter{} +var _ roachpb.Internal_TenantSettingsServer = tenantSettingsClientAdapter{} + +// TenantSettings is part of the roachpb.InternalClient interface. +func (a internalClientAdapter) TenantSettings( + ctx context.Context, args *roachpb.TenantSettingsRequest, _ ...grpc.CallOption, +) (roachpb.Internal_TenantSettingsClient, error) { + ctx, cancel := context.WithCancel(ctx) + gsAdapter := tenantSettingsClientAdapter{ + respStreamClientAdapter: makeRespStreamClientAdapter(ctx), + } + + go func() { + defer cancel() + err := a.server.TenantSettings(args, gsAdapter) + if err == nil { + err = io.EOF + } + gsAdapter.errC <- err + }() + + return gsAdapter, nil +} + var _ roachpb.InternalClient = internalClientAdapter{} // IsLocal returns true if the given InternalClient is local. diff --git a/pkg/rpc/context_test.go b/pkg/rpc/context_test.go index 18c713296327..1b0a76878aac 100644 --- a/pkg/rpc/context_test.go +++ b/pkg/rpc/context_test.go @@ -276,6 +276,12 @@ func (*internalServer) UpdateSpanConfigs( panic("unimplemented") } +func (*internalServer) TenantSettings( + *roachpb.TenantSettingsRequest, roachpb.Internal_TenantSettingsServer, +) error { + panic("unimplemented") +} + // TestInternalServerAddress verifies that RPCContext uses AdvertiseAddr, not Addr, to // determine whether to apply the local server optimization. // diff --git a/pkg/rpc/nodedialer/nodedialer_test.go b/pkg/rpc/nodedialer/nodedialer_test.go index 1daace66c895..9f9eb5db8e3b 100644 --- a/pkg/rpc/nodedialer/nodedialer_test.go +++ b/pkg/rpc/nodedialer/nodedialer_test.go @@ -597,3 +597,9 @@ func (*internalServer) UpdateSpanConfigs( ) (*roachpb.UpdateSpanConfigsResponse, error) { panic("unimplemented") } + +func (*internalServer) TenantSettings( + *roachpb.TenantSettingsRequest, roachpb.Internal_TenantSettingsServer, +) error { + panic("unimplemented") +} diff --git a/pkg/security/certificate_loader.go b/pkg/security/certificate_loader.go index ee51b2d06723..653c1b5e0b49 100644 --- a/pkg/security/certificate_loader.go +++ b/pkg/security/certificate_loader.go @@ -95,6 +95,8 @@ const ( ClientPem // TenantPem describes a SQL tenant client certificate. TenantPem + // TenantSigningPem describes a SQL tenant signing certificate. + TenantSigningPem // Maximum allowable permissions. maxKeyPermissions os.FileMode = 0700 @@ -234,6 +236,13 @@ func CertInfoFromFilename(filename string) (*CertInfo, error) { if len(name) == 0 { return nil, errors.Errorf("tenant certificate filename should match client-tenant.%s", certExtension) } + case `tenant-signing`: + fileUsage = TenantSigningPem + // Strip prefix and suffix and re-join middle parts. + name = strings.Join(parts[1:numParts-1], `.`) + if len(name) == 0 { + return nil, errors.Errorf("tenant signing certificate filename should match tenant-signing.%s", certExtension) + } default: return nil, errors.Errorf("unknown prefix %q", prefix) } diff --git a/pkg/security/certificate_manager.go b/pkg/security/certificate_manager.go index c84afebf5853..1d8c244a25e5 100644 --- a/pkg/security/certificate_manager.go +++ b/pkg/security/certificate_manager.go @@ -126,7 +126,7 @@ type CertificateManager struct { clientCerts map[SQLUsername]*CertInfo // Certs only used with multi-tenancy. - tenantCACert, tenantCert *CertInfo + tenantCACert, tenantCert, tenantSigningCert *CertInfo // TLS configs. Initialized lazily. Wiped on every successful Load(). // Server-side config. @@ -401,6 +401,26 @@ func TenantKeyFilename(tenantIdentifier string) string { return "client-tenant." + tenantIdentifier + keyExtension } +// TenantSigningCertPath returns the expected file path for the node certificate. +func (cl CertsLocator) TenantSigningCertPath(tenantIdentifier string) string { + return filepath.Join(cl.certsDir, TenantSigningCertFilename(tenantIdentifier)) +} + +// TenantSigningCertFilename returns the expected file name for the node certificate. +func TenantSigningCertFilename(tenantIdentifier string) string { + return "tenant-signing." + tenantIdentifier + certExtension +} + +// TenantSigningKeyPath returns the expected file path for the node key. +func (cl CertsLocator) TenantSigningKeyPath(tenantIdentifier string) string { + return filepath.Join(cl.certsDir, TenantSigningKeyFilename(tenantIdentifier)) +} + +// TenantSigningKeyFilename returns the expected file name for the node key. +func TenantSigningKeyFilename(tenantIdentifier string) string { + return "tenant-signing." + tenantIdentifier + keyExtension +} + // ClientCertPath returns the expected file path for the user's certificate. func (cl CertsLocator) ClientCertPath(user SQLUsername) string { return filepath.Join(cl.certsDir, ClientCertFilename(user)) @@ -601,7 +621,7 @@ func (cm *CertificateManager) LoadCertificates() error { } var caCert, clientCACert, uiCACert, nodeCert, uiCert, nodeClientCert *CertInfo - var tenantCACert, tenantCert *CertInfo + var tenantCACert, tenantCert, tenantSigningCert *CertInfo clientCerts := make(map[SQLUsername]*CertInfo) for _, ci := range cl.Certificates() { switch ci.FileUsage { @@ -624,6 +644,17 @@ func (cm *CertificateManager) LoadCertificates() error { if tenantID == cm.tenantIdentifier { tenantCert = ci } + case TenantSigningPem: + // When there are multiple tenant signing certs, pick the one we need only. + // In practice, this is expected only during testing, when we share a certs + // dir between multiple tenants. + tenantID, err := strconv.ParseUint(ci.Name, 10, 64) + if err != nil { + return errors.Errorf("invalid tenant id %s", ci.Name) + } + if tenantID == cm.tenantIdentifier { + tenantSigningCert = ci + } case TenantCAPem: tenantCACert = ci case UIPem: @@ -701,6 +732,7 @@ func (cm *CertificateManager) LoadCertificates() error { cm.tenantConfig = nil cm.tenantCACert = tenantCACert cm.tenantCert = tenantCert + cm.tenantSigningCert = tenantSigningCert cm.updateMetricsLocked() return nil @@ -1039,6 +1071,18 @@ func (cm *CertificateManager) GetTenantTLSConfig() (*tls.Config, error) { return cfg, nil } +// GetTenantSigningCert returns the most up-to-date tenant signing certificate. +func (cm *CertificateManager) GetTenantSigningCert() (*CertInfo, error) { + cm.mu.Lock() + defer cm.mu.Unlock() + + c := cm.tenantSigningCert + if err := checkCertIsValid(c); err != nil { + return nil, makeError(err, "problem with tenant signing certificate") + } + return c, nil +} + // GetClientTLSConfig returns the most up-to-date client tls.Config. // Returns the dual-purpose node certs if user == NodeUser and there is no // separate client cert for 'node'. diff --git a/pkg/security/certs.go b/pkg/security/certs.go index 53d29665a35d..926bfa6a54d3 100644 --- a/pkg/security/certs.go +++ b/pkg/security/certs.go @@ -14,6 +14,7 @@ import ( "bytes" "context" "crypto" + "crypto/ed25519" "crypto/rand" "crypto/rsa" "crypto/tls" @@ -542,6 +543,58 @@ func WriteTenantPair(certsDir string, cp *TenantPair, overwrite bool) error { return nil } +// CreateTenantSigningPair creates a tenant signing pair. The private key and +// public key are both created in certsDir. +func CreateTenantSigningPair( + certsDir string, lifetime time.Duration, overwrite bool, tenantID uint64, +) error { + if len(certsDir) == 0 { + return errors.New("the path to the certs directory is required") + } + if tenantID == 0 { + return errors.Errorf("tenantId %d is invalid (requires != 0)", tenantID) + } + + tenantIdentifier := fmt.Sprintf("%d", tenantID) + + // Create a certificate manager with "create dir if not exist". + cm, err := NewCertificateManagerFirstRun(certsDir, CommandTLSSettings{}) + if err != nil { + return err + } + + signingKeyPath := cm.TenantSigningKeyPath(tenantIdentifier) + signingCertPath := cm.TenantSigningCertPath(tenantIdentifier) + var pubKey crypto.PublicKey + var privKey crypto.PrivateKey + pubKey, privKey, err = ed25519.GenerateKey(rand.Reader) + if err != nil { + return errors.Wrap(err, "could not generate new tenant signing key") + } + + if err := writeKeyToFile(signingKeyPath, privKey, overwrite); err != nil { + return errors.Wrapf(err, "could not write tenant signing key to file %s", signingKeyPath) + } + + log.Infof(context.Background(), "generated tenant signing key %s", signingKeyPath) + + // Generate certificate. + certContents, err := GenerateTenantSigningCert(pubKey, privKey, lifetime, tenantID) + if err != nil { + return errors.Wrap(err, "could not generate tenant signing certificate") + } + + certificates := []*pem.Block{{Type: "CERTIFICATE", Bytes: certContents}} + + if err := WritePEMToFile(signingCertPath, certFileMode, overwrite, certificates...); err != nil { + return errors.Wrapf(err, "could not write tenant signing certificate file %s", signingCertPath) + } + + log.Infof(context.Background(), "wrote certificate to %s", signingCertPath) + + return nil +} + // PEMContentsToX509 takes raw pem-encoded contents and attempts to parse into // x509.Certificate objects. func PEMContentsToX509(contents []byte) ([]*x509.Certificate, error) { diff --git a/pkg/security/certs_tenant_test.go b/pkg/security/certs_tenant_test.go index 49f74b079f6c..b90b244883f5 100644 --- a/pkg/security/certs_tenant_test.go +++ b/pkg/security/certs_tenant_test.go @@ -11,6 +11,8 @@ package security_test import ( + "crypto/ed25519" + "crypto/x509" "fmt" "io/ioutil" "net" @@ -57,6 +59,9 @@ func makeTenantCerts(t *testing.T, tenant uint64) (certsDir string, cleanup func )) require.NoError(t, security.CreateNodePair( certsDir, serverCAKeyPath, testKeySize, 500*time.Hour, false, []string{"127.0.0.1"})) + + // Also check that the tenant signing cert gets created. + require.NoError(t, security.CreateTenantSigningPair(certsDir, 500*time.Hour, false /* overwrite */, tenant)) return certsDir, cleanup } @@ -145,4 +150,16 @@ func testTenantCertificatesInner(t *testing.T, embedded bool) { b, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, fmt.Sprintf("hello, tenant %d", tenant), string(b)) + + // Verify that the tenant signing cert was set up correctly. + signingCert, err := cm.GetTenantSigningCert() + require.NoError(t, err) + privateKey, err := security.PEMToPrivateKey(signingCert.KeyFileContents) + require.NoError(t, err) + ed25519PrivateKey, isEd25519 := privateKey.(ed25519.PrivateKey) + require.True(t, isEd25519) + payload := []byte{1, 2, 3} + signature := ed25519.Sign(ed25519PrivateKey, payload) + err = signingCert.ParsedCertificates[0].CheckSignature(x509.PureEd25519, payload, signature) + require.NoError(t, err) } diff --git a/pkg/security/certs_test.go b/pkg/security/certs_test.go index 2459ad68ca43..1d8dc3e03a1d 100644 --- a/pkg/security/certs_test.go +++ b/pkg/security/certs_test.go @@ -150,6 +150,8 @@ func TestGenerateTenantCerts(t *testing.T) { require.NoError(t, err) require.NoError(t, security.WriteTenantPair(certsDir, cp, false)) + require.NoError(t, security.CreateTenantSigningPair(certsDir, time.Hour, false /* overwrite */, 999)) + cl := security.NewCertificateLoader(certsDir) require.NoError(t, cl.Load()) infos := cl.Certificates() @@ -175,6 +177,11 @@ func TestGenerateTenantCerts(t *testing.T) { Filename: "client-tenant.999.crt", Name: "999", }, + { + FileUsage: security.TenantSigningPem, + Filename: "tenant-signing.999.crt", + Name: "999", + }, }, infos) } @@ -221,6 +228,8 @@ func TestGenerateNodeCerts(t *testing.T) { // ca.crt: CA certificate // node.crt: dual-purpose node certificate // client.root.crt: client certificate for the root user. +// client-tenant.10.crt: tenant client certificate for tenant 10. +// tenant-signing.10.crt: tenant signing certificate for tenant 10. func generateBaseCerts(certsDir string) error { { caKey := filepath.Join(certsDir, security.EmbeddedCAKey) @@ -248,6 +257,7 @@ func generateBaseCerts(certsDir string) error { } { + tenantID := uint64(10) caKey := filepath.Join(certsDir, security.EmbeddedTenantCAKey) if err := security.CreateTenantCAPair( certsDir, caKey, @@ -257,13 +267,16 @@ func generateBaseCerts(certsDir string) error { } tcp, err := security.CreateTenantPair(certsDir, caKey, - testKeySize, time.Hour*48, 10, []string{"127.0.0.1"}) + testKeySize, time.Hour*48, tenantID, []string{"127.0.0.1"}) if err != nil { return err } if err := security.WriteTenantPair(certsDir, tcp, true); err != nil { return err } + if err := security.CreateTenantSigningPair(certsDir, 96*time.Hour, true /* overwrite */, tenantID); err != nil { + return err + } } return nil diff --git a/pkg/security/password.go b/pkg/security/password.go index bfaca3ea469c..14a8fd666553 100644 --- a/pkg/security/password.go +++ b/pkg/security/password.go @@ -14,6 +14,7 @@ import ( "bytes" "context" "crypto/sha256" + "fmt" "regexp" "runtime" "sync" @@ -28,13 +29,34 @@ import ( "golang.org/x/crypto/bcrypt" ) -// BcryptCost is the cost to use when hashing passwords. It is exposed for -// testing. +// BcryptCost is the cost to use when hashing passwords. +// It is exposed for testing. // -// BcryptCost should increase along with computation power. -// For estimates, see: http://security.stackexchange.com/questions/17207/recommended-of-rounds-for-bcrypt -// For now, we use the library's default cost. -var BcryptCost = bcrypt.DefaultCost +// The default value of BcryptCost should increase along with +// computation power. +// +// For estimates, see: +// http://security.stackexchange.com/questions/17207/recommended-of-rounds-for-bcrypt +var BcryptCost = settings.RegisterIntSetting( + settings.TenantWritable, + BcryptCostSettingName, + fmt.Sprintf( + "the hashing cost to use when storing passwords supplied as cleartext by SQL clients "+ + "with the hashing method crdb-bcrypt (allowed range: %d-%d)", + bcrypt.MinCost, bcrypt.MaxCost), + // The default value 10 is equal to bcrypt.DefaultCost. + // It incurs a password check latency of ~60ms on AMD 3950X 3.7GHz. + // For reference, value 11 incurs ~110ms latency on the same hw, value 12 incurs ~390ms. + 10, + func(i int64) error { + if i < int64(bcrypt.MinCost) || i > int64(bcrypt.MaxCost) { + return bcrypt.InvalidCostError(int(i)) + } + return nil + }).WithPublic() + +// BcryptCostSettingName is the name of the cluster setting BcryptCost. +const BcryptCostSettingName = "server.user_login.password_hashes.default_cost.crdb_bcrypt" // ErrEmptyPassword indicates that an empty password was attempted to be set. var ErrEmptyPassword = errors.New("empty passwords are not permitted") @@ -74,14 +96,14 @@ func CompareHashAndPassword(ctx context.Context, hashedPassword []byte, password } // HashPassword takes a raw password and returns a bcrypt hashed password. -func HashPassword(ctx context.Context, password string) ([]byte, error) { +func HashPassword(ctx context.Context, sv *settings.Values, password string) ([]byte, error) { sem := getBcryptSem(ctx) alloc, err := sem.Acquire(ctx, 1) if err != nil { return nil, err } defer alloc.Release() - return bcrypt.GenerateFromPassword(appendEmptySha256(password), BcryptCost) + return bcrypt.GenerateFromPassword(appendEmptySha256(password), int(BcryptCost.Get(sv))) } // AutoDetectPasswordHashes is the cluster setting that configures whether @@ -168,7 +190,7 @@ var MinPasswordLength = settings.RegisterIntSetting( "Note that a value lower than 1 is ignored: passwords cannot be empty in any case.", 1, settings.NonNegativeInt, -) +).WithPublic() // bcryptSemOnce wraps a semaphore that limits the number of concurrent calls // to the bcrypt hash functions. This is needed to avoid the risk of a diff --git a/pkg/security/pem.go b/pkg/security/pem.go index 39702b6c748a..f61e49619644 100644 --- a/pkg/security/pem.go +++ b/pkg/security/pem.go @@ -13,6 +13,7 @@ package security import ( "crypto" "crypto/ecdsa" + "crypto/ed25519" "crypto/rsa" "crypto/x509" "encoding/pem" @@ -79,6 +80,12 @@ func PrivateKeyToPEM(key crypto.PrivateKey) (*pem.Block, error) { return nil, errors.Wrap(err, "error marshaling ECDSA key") } return &pem.Block{Type: "EC PRIVATE KEY", Bytes: bytes}, nil + case ed25519.PrivateKey: + bytes, err := x509.MarshalPKCS8PrivateKey(k) + if err != nil { + return nil, errors.Wrap(err, "error marshaling Ed25519 key") + } + return &pem.Block{Type: "PRIVATE KEY", Bytes: bytes}, nil default: return nil, errors.Errorf("unknown key type: %v", k) } @@ -136,7 +143,7 @@ func parsePrivateKey(der []byte) (crypto.PrivateKey, error) { } if key, err := x509.ParsePKCS8PrivateKey(der); err == nil { switch key := key.(type) { - case *rsa.PrivateKey, *ecdsa.PrivateKey: + case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey: return key, nil default: return nil, errors.New("found unknown private key type in PKCS#8 wrapping") diff --git a/pkg/security/securitytest/test_certs/README.md b/pkg/security/securitytest/test_certs/README.md index 7e9c85296d0f..1b7a058e51b9 100644 --- a/pkg/security/securitytest/test_certs/README.md +++ b/pkg/security/securitytest/test_certs/README.md @@ -12,7 +12,14 @@ Contains the following files: * client.testuser.key: testing user private key * client.testuser2.crt: testing user 2 certificate * client.testuser2.key: testing user 2 private key -* +* ca-client-tenant.crt: tenant CA certificate +* ca-client-tenant.key: tenant CA private key +* client-tenant..crt: tenant client certificate +* client-tenant..key: tenant client private key +* tenant-signing..crt: tenant signing certificate +* tenant-signing..key: tenant signing private key + +The per-tenant files include IDs: 10, 11, and 20. For a human-readable version of the certificate, run: ```bash diff --git a/pkg/security/securitytest/test_certs/ca-client-tenant.crt b/pkg/security/securitytest/test_certs/ca-client-tenant.crt index b335269193e3..80516c312dc1 100644 --- a/pkg/security/securitytest/test_certs/ca-client-tenant.crt +++ b/pkg/security/securitytest/test_certs/ca-client-tenant.crt @@ -1,19 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIDJTCCAg2gAwIBAgIQOfrWjjtXCj0OAGD2N505aDANBgkqhkiG9w0BAQsFADAr -MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y -MTEwMDUyMTU5MDBaFw0zMTEwMTQyMTU5MDBaMCsxEjAQBgNVBAoTCUNvY2tyb2Fj -aDEVMBMGA1UEAxMMQ29ja3JvYWNoIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEA0NfPF+ui5dGvNmmQm21FQTXxRgM7+Vs7AwIk4Z1t4LY7haEZdHDV -2vqh4/xjfKaJTBc2izIzZ1yvcupjRX3W1kTNjmN7R0cPlKZdjLmblNi6CRw/IpaX -D9yQvec8pDm7OaGNjPQvMSI5DxhljMXmzX0qxO6OFz8Y6BlrlF2J1FGVNkt91zzn -a8pimqV9VZz8+GxDlM+JUXktR9keoOvn3ltaLstsuePoAlkxI2eho8NvcFcyGk4U -rSgot/jsz8tbmTeMKKx+QCT91i4k04UtDuNB9ioTDP7gWd+bDyZD08fv3ZiGBq9H -8DGYN0vV+EZsSXSlX2vN3mgKjEB/e9WKgQIDAQABo0UwQzAOBgNVHQ8BAf8EBAMC -AuQwEgYDVR0TAQH/BAgwBgEB/wIBATAdBgNVHQ4EFgQU1b1+UsN/PvoRENmqPBk1 -Yk483fIwDQYJKoZIhvcNAQELBQADggEBAE5KGV3T0DQHuYlo3rvLoh3Z4IfredoD -b/lt4rIY8fzCla+amYKdnRuptc5CluZ7MG/cTS+qhr69bUJcKI1p7Ya2HaBQ11ZL -1vNy0XVPSom3rZyT+p5JOcsF4iOmBfNybEo0Mtk1jEYVaGu8X6RgrKmBBOLBNOU/ -kn6YIqEqX2Ya2sNc3X9ddau+fpH2E7kA7LSG2WlBOK/yIHIn9c65++nm2DGeTQ+W -kP+l0uXHwKlEGknnhX3xTKyBOOssk04XSyTRzKGFzY8fmtb50RVx0fsiSAv6fXSF -OS8djwtt0mqYpAFOAHIqamSKjsUlyUX4+qqNjxvijCAWIqEk+wzdroQ= +MIIDJjCCAg6gAwIBAgIRAP3ot2EqZmVMs9ZW7K+ge8cwDQYJKoZIhvcNAQELBQAw +KzESMBAGA1UEChMJQ29ja3JvYWNoMRUwEwYDVQQDEwxDb2Nrcm9hY2ggQ0EwHhcN +MjIwMTEwMTkwMTIwWhcNMzIwMTE5MTkwMTIwWjArMRIwEAYDVQQKEwlDb2Nrcm9h +Y2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAMeGaYPQCNgFEeW30pFfUzuge7AX96mHjgcAwkwonBfYWFL4LduV +/bry0uGuXfr079sULQFSaB5BQUvWWjGSW9lPQ3oQAw0PXqFCj0euBaypMAQgTQcw +MQOis1OWOs+8gIAb17dXPxMC0DsRj/aEjt7WIEfQFpkHjdl9CjFfXn6FgQMDpp6/ +W1WEXG0AU8l4XRyrT3450VaPRudi/88muPdvSWPuRNxolepEgzpCQHJptaPBn0Tr +gZQHxfoY93vvoEJJjh6QC9JqgTpwS8Dmv4bKkkAsSpVVXR5tKW3wtqGGh2fTWmcl +cIcNEFudTJ2Nry5/mY9zB4Xiqx/IAgrJatMCAwEAAaNFMEMwDgYDVR0PAQH/BAQD +AgLkMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFAw9X1Nmta6HSkIVuZTb +D9czNUg6MA0GCSqGSIb3DQEBCwUAA4IBAQDEeaGEFc49osHz81rb6+j6K9GWqWuP +v9ktK+A3sBG8xbF763OFPF7yXWtSPxe9g3sqpAursQ2wPCm6R3j4gd2ekoT8aE86 +WZNcVJ0oLy/HWOjPtiKlbbh4rQIZNhV2f1UXkkfLANbjdZhtlD1ljuSC7IOtdRuy +g0y9Rxp9BChA1s5M1GQevW0qzUSiZbhDiQ2zjB5Uq0GIjhUrKgh4H7w9Ra2uozRO +v2u8GNXRBRsTIWYaHT80Kb9wZIqEsXQ3YlrjTO1WY/Blv0WtTCkeU3wwV6NzHmCr +isRyL6YpvKCbdvWXWd5Q7LV4zKHxi7yy9gkxvmuXbvL6asqdHeWcvvSX -----END CERTIFICATE----- diff --git a/pkg/security/securitytest/test_certs/ca-client-tenant.key b/pkg/security/securitytest/test_certs/ca-client-tenant.key index 2f98bdab29ab..6c017817d5d6 100644 --- a/pkg/security/securitytest/test_certs/ca-client-tenant.key +++ b/pkg/security/securitytest/test_certs/ca-client-tenant.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA0NfPF+ui5dGvNmmQm21FQTXxRgM7+Vs7AwIk4Z1t4LY7haEZ -dHDV2vqh4/xjfKaJTBc2izIzZ1yvcupjRX3W1kTNjmN7R0cPlKZdjLmblNi6CRw/ -IpaXD9yQvec8pDm7OaGNjPQvMSI5DxhljMXmzX0qxO6OFz8Y6BlrlF2J1FGVNkt9 -1zzna8pimqV9VZz8+GxDlM+JUXktR9keoOvn3ltaLstsuePoAlkxI2eho8NvcFcy -Gk4UrSgot/jsz8tbmTeMKKx+QCT91i4k04UtDuNB9ioTDP7gWd+bDyZD08fv3ZiG -Bq9H8DGYN0vV+EZsSXSlX2vN3mgKjEB/e9WKgQIDAQABAoIBAH0Q8E1D6M62brYo -K9hYbMPrmOyznSPsVX+stgeCrr2GhEi3PDY2KGu18mNEzxNj7A7sS9a6HnyEzfEE -mqSQAqtYkrz1qGr8DEGaC6XttVVDIM2547lD830lOmzvYKp8hDm0HL/Pr7893mvC -0XWnQlvG3GkI74DOYY0FRB6akaLajsPiYlss9ldlyJWS5v2zkS3eBxGa0/K+F6+n -yBt43/erJJqI/KC4gWmqtylAVWYKg4udrRLBlC+WJYODLOpFTjZPRNjETdFbgwXr -Vi3CwbUVthX+V3jUJxYykKOJvYNu2wKN6hMN+/V9AadumFouJXw751A9LvjIOBA5 -1U1VIQECgYEA1lzqzeYlw0kHUdkah2lY/IVGOixCt9RKejGMJTg/s7Ak+EuN6U6U -LGaiz3KlxrxsOtDzA7oNU2SXLuouLDeVRIXb5TVcbG2PkItDQWyZ85uWDQ4J8cOs -PeB7XoJdKgFyFmILRSyargntROSdM4VSmWWPUPw2t3fpcz+ZtYhHQbECgYEA+Whq -PdkT8rkp52YxyKudVrRRAmLAggFLdleNOAGm3YWNSKPZEwiMi0Dwv6hzVFlduyXY -HN2x7QLckVmhL7HCdsfQfm/o/8oOZxSD3d68hlKRRwQUU8c6aNl80m4S4ue1SBfs -U0/DqOYY97HIaFNuWZ9dZcoWjzjmINe2KoowudECgYEAhcnSRpW4xexjZCMTGqaW -gmyPyyjqrx8RKLiZ1nUH1Rc/y7QkKxP/CMM6JC1S5/mbtw3ITnAldwHWFdLU5IYA -ZJcDCkhGmem7a6RvYnrsjw1xFk7HyBuu+aOMfAzzJo9mbC/fBtpy4+KQMqpYrtos -Fk87pnjzkYiGrIZqYdFfX9ECgYAjmAQ5iWZwaH6TeKE4O028FwXMql1A8IBdhkcw -LAQtbybnrAKJ1dmA8iapo5e5/PsEf3UlQXpoEdV6pOj9DohrPT8DHGOLRcHicioT -mvsazGPRV8Ky5ibu2cMXoP/vBBR6Enf2t9Z7n0MG7odkZCvGfJjusvWfQQaOv1pP -HeOwUQKBgAX+ZhSIaFYcIvvz68E9wpNJ/L+t15XmJLKTkvKV+bG6DEOWqhuuPFVX -GL1LC7BEeJDfwFDS9y5IADb6stulBEpy0KMIgY/zyHt1me0q5PNk1xl/ktnMGLjo -4x9RZn/XiY1IsfbOvj7/TmkzH118+2RV074HNtDgM6avouh3bEqg +MIIEowIBAAKCAQEAx4Zpg9AI2AUR5bfSkV9TO6B7sBf3qYeOBwDCTCicF9hYUvgt +25X9uvLS4a5d+vTv2xQtAVJoHkFBS9ZaMZJb2U9DehADDQ9eoUKPR64FrKkwBCBN +BzAxA6KzU5Y6z7yAgBvXt1c/EwLQOxGP9oSO3tYgR9AWmQeN2X0KMV9efoWBAwOm +nr9bVYRcbQBTyXhdHKtPfjnRVo9G52L/zya4929JY+5E3GiV6kSDOkJAcmm1o8Gf +ROuBlAfF+hj3e++gQkmOHpAL0mqBOnBLwOa/hsqSQCxKlVVdHm0pbfC2oYaHZ9Na +ZyVwhw0QW51MnY2vLn+Zj3MHheKrH8gCCslq0wIDAQABAoIBAAaVBpIUoNYPhMGh +SM8G6AYFi08J21+6WxMcEUzV4iBfQLqr+UdPMWmjbRWI3QzUW71McxeiElE9MdxA +nAUaoPEQTleOg6tAoIyNV5CzyvghNLZOInxkOJm4GlZdlF8aBtszD/C6bhhAdYId +WDR3twbe5X24/aXau/E60MVEMifWTLn9Pto1XGapbyaf+2hGL9raji3DBP4JiM2d +Xdf7TOqrVIF7qmiADvFVnsSSWMr04VH45FojBdEZKHXbVoCLwC9qn8nptTlPFojd +ihnBUy9QXLr8R+YUgTGpQZmbiWapEqHYa3eS8vwsbB8i4YHrm504lvybdzBgi0x8 +E3OMeAECgYEA6TgW61JLCtOiuDmupDyThP8QEHHs/9k57b7PplVEimbVuIlUNs+u +c2isHRiM3rts+/CZUrP7I4Om5qMA1HatquBxL2Hawh1M2ztSNFJTEB2OTOQQ4Iz9 +oThi988xSBAIDQ94gpj0B1ZlgtQSVGj4imOVU4BAHawcDSBFjaIWsNMCgYEA2wPD +qZoa9kbu7OWCOsTUua4ERWeR4Tsy6QH0RakckcuJy/ZKBfpv1wiDyKAJhvpIPMvC +5xYaw2hb85YcmAWzi2Lu9jymO9oFv5QRMGGetE8Alm2/qThec2moPR80iYrOmydy +Ie4dNxasulcdAofPdL0n0lm+L7sFaYofPFPeHgECgYB2+zmeJqbISD581FjHy2vL +b0En0qeBw7YtF6rihh/oqBwjAFTpfbzXfjBIy8yamW45fn8KVW4rqS/N/J0gx8dE +JSs5bCfp3n7mXfZLYTClSR7fFX+Sv/tpc9Xx7U+MHzmsSBdIMXZWA/rX6w/K5p7e +I338UrLjMHpDLBKv9mCzJwKBgQDCW8nUhfStX2+CfX4fhzM8gCg8K1gzJ6TbUKek +9hlrbNQhU7SHL6L2khDZBuTNiuh2Q2D4UA56IO+Q8EL5yf12kdp8XIAtFyMIy26h +n9AGNSHRXR28H1D6XOY3L60g7jTBTbUkVTpJ++5XAx20dC9varmfG5MCqpZ3/WIQ +2GCCAQKBgHgUmqRxe2Hg4r5lrlVE1K71kT50GlQzuS/aegu087J3vFyhV3ySRJMH +hyi4zW3g2YKH0/piwp+x8jq5Sm2GZKYPyciYQe7fRrLSK56HnlD+OUakbKO4I19m +UJfNlyGITsqzAPe6Ax7ETLt6MIaAV1KG6PwG3xFJJ7wYQtNFgogO -----END RSA PRIVATE KEY----- diff --git a/pkg/security/securitytest/test_certs/ca.crt b/pkg/security/securitytest/test_certs/ca.crt index 8c4a5ec5c28f..33eeefe9e2a9 100644 --- a/pkg/security/securitytest/test_certs/ca.crt +++ b/pkg/security/securitytest/test_certs/ca.crt @@ -1,19 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIDJjCCAg6gAwIBAgIRALbIchRMiLviwEldEDtibbswDQYJKoZIhvcNAQELBQAw -KzESMBAGA1UEChMJQ29ja3JvYWNoMRUwEwYDVQQDEwxDb2Nrcm9hY2ggQ0EwHhcN -MjExMDA1MjE1ODU5WhcNMzExMDE0MjE1ODU5WjArMRIwEAYDVQQKEwlDb2Nrcm9h -Y2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP -ADCCAQoCggEBAOF4MrfCn8vMJAMIk5MGe4Nsg24MsfpAOYMmcKaV8FI0s3qdZDDJ -VMt2VCjZXpvEa5AMPHiaEqWYWtXlSj42yY66KYbh23BnVfJM8sYN2qIPhAzRqnOq -UYmuPBgfm8fXj69uY+oV4N1SOyONuLzDDksPaLIdN8TMQJ0aTRvDAHG76G4IAIrh -Ncc2EUV1nX9NcRA6CHP2ebMKySmZoWMzYtO9a453kWm5guSNLHXBhF9vTTM4o6hK -qs9ZgBSjLBbRaFqCkQ7OV0nrLfKhtDaRooEri+lgn/QslhPmVkYExABKeO0ZNAJi -ZEWiXWjRinTtt/Z+ru93LfGCD18zskNk1iUCAwEAAaNFMEMwDgYDVR0PAQH/BAQD -AgLkMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFBAPRPhW0Jue+Fs8syxV -rD0g47GGMA0GCSqGSIb3DQEBCwUAA4IBAQBF/4Tb5xsrCkhlItS4zzIYQ+Vi94N0 -T1q0mvUVaTIJQCVfz0FlNc44xMZf3f+DotSxwPRLHOQRAPUX/Yy41PRxcRZqzFC7 -mOwayk38CAu4Sv9CXV2Dz4pho4rb46frblAcEOodHvsGqBr7bhnYktMGRWjPtKET -BZdXbLRQ/yp4NdFfuyYpR6nZaIvs3DomFzJZ40teFTq+vB3Ggvy6RzfkU7E2ocyo -0yBgqD47BGYuz6jsheyrkOcurm3sgp7TwFhZRrVAX3+/EG/TNJSCm7/nm1SjjZ1Q -wLwictxi4Y5NLCvlUndj9DFATkynJfVit7AcPAprp4rz2O/jGo/aX9ZU +MIIDJTCCAg2gAwIBAgIQVTqh0bKWaGc9mXyKQM1r3DANBgkqhkiG9w0BAQsFADAr +MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y +MjAxMTAxOTAxMThaFw0zMjAxMTkxOTAxMThaMCsxEjAQBgNVBAoTCUNvY2tyb2Fj +aDEVMBMGA1UEAxMMQ29ja3JvYWNoIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAySkLp2+7YudQOZYTc/fmF0RHeLXgcDSY2Y+2wb2lZ616lSsgmXGP +aWVHLza9E2vsS2C9BaY2qrK/lUlxMOFXnT3GVnRPdbJVt6uPz0K9hzcKZHT+6WjC +0R1tfUBK6GNmlvsby9+U1WMkThR2f7KH5ARrv7Lihm062INZQzJljkYcFVEEEmoL ++eYT0y1+SrJfuYrQeIdVYSC+4IhAHzryVxDA14lzInBXgVxVuC1b6uGOry/f++s/ +pBFo0FRUOs4noT30gFkJ434oX/YCMIld/frnLcpwR/qkbZZA6mLwOrFeniJsz7kh +sP3u76Cz6lUHPwyHJzW7oa8PR4udYdvibQIDAQABo0UwQzAOBgNVHQ8BAf8EBAMC +AuQwEgYDVR0TAQH/BAgwBgEB/wIBATAdBgNVHQ4EFgQUizkuY+BH48avRNXEnr/N +Ei9cteowDQYJKoZIhvcNAQELBQADggEBAIu67a1RKpmdD/DTB3Qxg+uSgyeTpcIe +fUNwyym4h8C6Mtp0jjYqrHNH6h6aGhuDj9vTPBLdAtp/s5gH5EMydFvr+2LiuyOo +N5LAszEN+A+6TrCN3EpcsRR+YpF/fGbvgdTibFdnnqCfpaPxZd1+8Nse2bFm6xjj +0mRDSzNU+Ti6kAKvYtFSmFXSOMWtGImHJXR46CV54CF+rOhvRllKsjSgZcKBikrQ +fyJBU8TtjpDA6xVTCTMJg0dp+c9rvYET40lEvuGMOxRmgkOOKdbsrSv2KD5FPrIk +y2gzRhcB2HnC44lhPBX1AxBbKdH7dsKeAQFaBzdIpLBMLPW19XsJmaI= -----END CERTIFICATE----- diff --git a/pkg/security/securitytest/test_certs/ca.key b/pkg/security/securitytest/test_certs/ca.key index 99667470f06d..fc771f664b18 100644 --- a/pkg/security/securitytest/test_certs/ca.key +++ b/pkg/security/securitytest/test_certs/ca.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA4Xgyt8Kfy8wkAwiTkwZ7g2yDbgyx+kA5gyZwppXwUjSzep1k -MMlUy3ZUKNlem8RrkAw8eJoSpZha1eVKPjbJjrophuHbcGdV8kzyxg3aog+EDNGq -c6pRia48GB+bx9ePr25j6hXg3VI7I424vMMOSw9osh03xMxAnRpNG8MAcbvobggA -iuE1xzYRRXWdf01xEDoIc/Z5swrJKZmhYzNi071rjneRabmC5I0sdcGEX29NMzij -qEqqz1mAFKMsFtFoWoKRDs5XSest8qG0NpGigSuL6WCf9CyWE+ZWRgTEAEp47Rk0 -AmJkRaJdaNGKdO239n6u73ct8YIPXzOyQ2TWJQIDAQABAoIBAB7bPU2qxAW3DWKg -5NllNLgavK3t7NmLupM5Sodx067caYepTJHkq8cgjYn9nZYBvpq7Ifd7Vnv1KtFF -DPEQvGrdDVvZQxfR/JApebwjHz24k+DRPNuRfpEyGCVRBhfGZ3Khz+i0T87aNx+5 -gIPMygL5u+FMYFp9fYZCmDTpbgEibW6XJ0PiBgfqhTNRDnOufNLRh11A3nGlg6Wm -8cDdV6mDGs/fmTDj3CWXQjBX71wamfc+KYBbBI4uezM+VLjzXpCu8yg0rzOy0x7A -l4EKpRWVUd7yYtN8y8XIAiNDXkHhaSWBIT46rUxFPSIPHyIPocb7K9Wcx0a7V5kf -9WFC5o0CgYEA5mPhoJyxbO4Ru9BBmhyVeD9zfaIHmZVUYm137kIvyKeLpsrAHEXx -g6qRegOFbE7j8jkJG3MWzuqpPiRoxvOPmulOYvhYxmWfLQ7VwSEcht2BTUDHNqqE -pY4K1yEZI9I4538L/kCpdifcQQP8+cATLzyq6EOP/ZqRlWm0jO4Vv1MCgYEA+ohK -zKYUVDIYDp6N5qaJt988lwOXQgDFdqgxITlNHQRMXHhLgcx++8nbDOja23edBXe2 -i+6hS2Ap1r1ZiUPLDMbue7DonDZbR25Z+JMR48ZN7K0aCAvQjGgt35UbiL0/oqTC -mz6ZZ2VfUSHj6nCOuqiA1gie+pQ7Rw3MmLJg/acCgYEAkC9cSn8mDw0jLg5UsBVJ -efsD1VFtPjIVoI/v6b2kWAh7PsRO3KLtVkCOUPUZMEJjJ/mH+JTbZl2303tH05Mk -gYeXlhrkvC1nI/TP09FcIA9KV8W9pzmqUE4H/MDrEd8D2+VH5TQkmhyyHEJZtRf/ -KaXnM/rsh4OPrxl7dGOgWp8CgYAfnzRRwGSwe+ItdjXS9xJK8xJivUFKQ26pG3GY -D/N7xh8dTNG7AGTGwyOy7XEyYlafagzQfL4/FVQylpPGj5gBdNqFIj85+GJtEbCp -f2ayqUUMqoyul08rabrgjaF8EOcHUGR/09bpBhKR2wU5ghuB0+LvIQdv+WXCAivg -2iMD4QKBgQChfCn4drfZRJuMUElo6fKZRHMjRovQthQUeAGFrKauX4jgcRDdpzzT -fjCzowke82GEfOGE84KqvZjtH9SuYCd0JUJLDKfABcHB2ATnbqIDPVi44/Rwyyfq -IF2r9xpQehYvSjzRABMnF/6D+G8lV17JAqqCWhS9auiq3b78hhistA== +MIIEpAIBAAKCAQEAySkLp2+7YudQOZYTc/fmF0RHeLXgcDSY2Y+2wb2lZ616lSsg +mXGPaWVHLza9E2vsS2C9BaY2qrK/lUlxMOFXnT3GVnRPdbJVt6uPz0K9hzcKZHT+ +6WjC0R1tfUBK6GNmlvsby9+U1WMkThR2f7KH5ARrv7Lihm062INZQzJljkYcFVEE +EmoL+eYT0y1+SrJfuYrQeIdVYSC+4IhAHzryVxDA14lzInBXgVxVuC1b6uGOry/f +++s/pBFo0FRUOs4noT30gFkJ434oX/YCMIld/frnLcpwR/qkbZZA6mLwOrFeniJs +z7khsP3u76Cz6lUHPwyHJzW7oa8PR4udYdvibQIDAQABAoIBAQCVYeHBqXuiatxs +p0Iy8Hjx9kaNIaNWL/kCN3MkVM0sPOu3Mpu211oEjq1aJnAqqA6Fu4UjWNdn0+3p +0uw3vF/v6RwMv7ryUEjPaJwW8h0E+J7DEw7qDl3+JLhWNxRplsdsf3WY5KQGAuXH +BfMpyU6YyZ+qcBFAeoUknAYBnL9F8wFupYvlO4Gd9pKoli5gho26pw74A1NB21mh +dDyc9tmNpq8Nvz5qMdUrV1opK6a965qqxMG/wGVlUjg8iN4y1lQXyQv5/RyPOwZC +IaK3CbMgiQQjmQzpteeW2HlOyWZktg5AXs+JOusVKbURpXFSblONic1rMcLiD2Zn +/EMi2tYBAoGBAN3SrbmDg5aZe0nIgOGv8p9r+ONDxs6UP2wpr3DC8u0Sm8Hbnt83 +KoP7+BRCZ+iauTD8sRl1T+OOpi6na3dZCqXPXgUymMdNSPyYObbHC6Uqjx0j7ytA +wbimYqhi/FCgv6TjsG5ocOP3CkDoqyEmBSAfHGZWJfgCGdmba/bHpS/hAoGBAOgn +X+1a6YlWGzXxIGyYyZCz+Ffi6dU7r9antX4+Mjr7MVN1n6bTr15spS3IzwY7FXKO +lQM5m7t+TtB1cwAgVd7e9LlcY7XjEibZxI8d7rK7v+08boItsM2Uvv0LGawL1Y7F +l1dXbP9MeVFDT7H93Bs+KoVn9+9zo2N1suWflvQNAoGBAIwDyoJZn/q0YFy/QZKz +M6srRQt2oYuNicblPQcpFptL7qLb1JlCwgRTTFDFZb8two1IQyU1pjqVtRGnva60 +toLYtJkFSegrQVGnaG6VjyUvCuyy3OlpU54Q8B8nc+oUvUMAMUJPjEpoicFU24ft +7rhKyutRn1+/O7/eWbSIah0hAoGAQCFgZnkOulmG+se5ZUZvqAGPQPf2EGmEkY+S +m2UjCxgI8D019SfU8gihOJyYU+hObG7myxVG5+xkaUGImyhTkFWW1P2orb4kbYcK +vV5PaiBjTG29OUjV5nSIre47EUPTorUCsaX8/ilp+gDWKx0tiHkL1f56hzMyl28U +FEqZsKECgYA2FAfuoOXotKOSNYKfyPWu+LjK4ii6soDlFIgu6DOgspy9sjz9nXYV +6GCMl6mKLqPmC/chTmKaQBnONw905Hu4GOTVSQ8JlQBwmE1xis0r0Mzc0fl0viH3 +1p2fxauenvPwMTQtTbHxdCpXe7g7p2KanscOPxOsuvRvBCHSdFu9UA== -----END RSA PRIVATE KEY----- diff --git a/pkg/security/securitytest/test_certs/client-tenant.10.crt b/pkg/security/securitytest/test_certs/client-tenant.10.crt index 66d95593505d..22bfa6b2d7ec 100644 --- a/pkg/security/securitytest/test_certs/client-tenant.10.crt +++ b/pkg/security/securitytest/test_certs/client-tenant.10.crt @@ -1,21 +1,21 @@ -----BEGIN CERTIFICATE----- -MIIDdDCCAlygAwIBAgIRAKikNg+m5fyrsJl1eGo7oEgwDQYJKoZIhvcNAQELBQAw +MIIDdDCCAlygAwIBAgIRANJrz/fSnAE8mJrPPyNpr7IwDQYJKoZIhvcNAQELBQAw KzESMBAGA1UEChMJQ29ja3JvYWNoMRUwEwYDVQQDEwxDb2Nrcm9hY2ggQ0EwHhcN -MjExMDA1MjE1OTAwWhcNMjYxMDEwMjE1OTAwWjAzMRIwEAYDVQQKEwlDb2Nrcm9h +MjIwMTEwMTkwMTIxWhcNMjcwMTE1MTkwMTIxWjAzMRIwEAYDVQQKEwlDb2Nrcm9h Y2gxEDAOBgNVBAsTB1RlbmFudHMxCzAJBgNVBAMTAjEwMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAqIUXM+AYmAs5jF/195zfAH+tLHl2M6ZMhm4M8FRY -lTf3Z7hy83IFSP0+1wIUndMxMl6z+OpjufwGGTh/0X3EGYAvfybZwYbxVCW5NkH+ -ESbmBQOkXNhPCd74ABlRkdA+8JRP4DodF7ZI+GWfFx4q2ty37iOBktvlmNk6B4+r -HbkfNc1QbgJuAMih4ThdqBI69m/gJHradFuLIRknw20h393iJxmHaUUz5z0Tl7aU -Zc4y95+8NYwRj+V7JS97dnQWVrR6XbN9aemlrYB/csaiuwXw+aSTXLEQGwVXzdbw -Ib6SCBUxulbC39lOWELdyESEpI/QfAvI4tXsZ05szKOYbQIDAQABo4GKMIGHMA4G +AQEFAAOCAQ8AMIIBCgKCAQEAz1f7YcFm2Kkg4ZGGdX2HDKRoLgvwSjiqZ+LzBD86 +ZPu6lMrd8nr0LGdP/omKIbgqTdIUUbRdjay4oyVAjj8gi/VnqC12nL6lHkoP/Shd +Nhzz26o5s5mKry3P8G2Dw7KfrOJoGXFOGferEP598Dpl8kPR6AgU2ByxmnLQ89yV +SdSlxtWScXuzS+a6Rk7jgcjMLhsirkewonPlo7pllc5QW7n9AenpVl6TigC1uepa +UO+f/BsmecBwXV5RDp809GtdvxmTcETj3geLVJyaNFGUEhyiRpq5CoRbJcJyXnGA +a+JB+mibaNhGYNwFsStbRt2tWAKhzcs3N946iRfCif1Q1QIDAQABo4GKMIGHMA4G A1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwHwYD -VR0jBBgwFoAU1b1+UsN/PvoRENmqPBk1Yk483fIwNQYDVR0RBC4wLIIJbG9jYWxo +VR0jBBgwFoAUDD1fU2a1rodKQhW5lNsP1zM1SDowNQYDVR0RBC4wLIIJbG9jYWxo b3N0ggcqLmxvY2FshwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqGSIb3DQEB -CwUAA4IBAQAf0VPhW4EPvNKgmhqMz3jGzQfi56mhZKzqYT0ZYdhUgJd97aWubu4+ -98icg+Nd/+T863gi7RFUgn113SbV0+aA9yz5S5gUmhxfzCuW+h/rZR2QQjqAKgw6 -JoVlrjXpLfbGM1bzFfBYAtbecuNrJ7yXdM1FHLUktleskXP/XULvkxGA1yQk0SSe -6HnCQnPXEAGz6cM8NDyS/LkmE0CJzCyFPkT5jvMT2WYqP6ffnr1wrACJKwWvl8X5 -3jhn7dHhM1GodZQyCAuDI/xVzS0vvuw/vYziBwyUzEAvEqZ2mcK9OX4klbCW0Zum -gm0NqyOuCiYyZfv1quNXBOpDxxKynub9 +CwUAA4IBAQABsIoGVpxCdZPFEq8lgBUaFombn/5x7SJ0RCn1RM4hw3x4xvOStiIS +KKHU7je+4eXR8m2WhT16NvF6nCDEJOCyDDGAPmzAYXRAyMR019C5TmCL+8SIKW8S +eR/gPSTYlQ6KZdIrSsQkPX0ytXdcifZHGQh5PIa9WQY999DheH7sSf0OQcstRG1W +ZX/kQzkBRf9wO6qY8Vhr05WWcFjug28/u2Ah7IbXxRxtAU6INu7bO65NOv2fRgxG +MvDDCtcjMEH+6wzlBz5aGj/DWqtrbxag43/HB4A+tNqQZRm9VRoEbH4ll/xskiuv +b1gpWAxDkoSK3eGmtVJkkLhWEhl2RDcR -----END CERTIFICATE----- diff --git a/pkg/security/securitytest/test_certs/client-tenant.10.key b/pkg/security/securitytest/test_certs/client-tenant.10.key index 0e41ca4c5935..617a69d7f628 100644 --- a/pkg/security/securitytest/test_certs/client-tenant.10.key +++ b/pkg/security/securitytest/test_certs/client-tenant.10.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAqIUXM+AYmAs5jF/195zfAH+tLHl2M6ZMhm4M8FRYlTf3Z7hy -83IFSP0+1wIUndMxMl6z+OpjufwGGTh/0X3EGYAvfybZwYbxVCW5NkH+ESbmBQOk -XNhPCd74ABlRkdA+8JRP4DodF7ZI+GWfFx4q2ty37iOBktvlmNk6B4+rHbkfNc1Q -bgJuAMih4ThdqBI69m/gJHradFuLIRknw20h393iJxmHaUUz5z0Tl7aUZc4y95+8 -NYwRj+V7JS97dnQWVrR6XbN9aemlrYB/csaiuwXw+aSTXLEQGwVXzdbwIb6SCBUx -ulbC39lOWELdyESEpI/QfAvI4tXsZ05szKOYbQIDAQABAoIBAH03z6LbPEsV5CAb -WdAwvz9Gn63xXtRIGIq3EWQuuFfAa1SlqLyeF7FBg8UNpbYULHUqC37E429aBkoN -QmSul9z+88v+WUXE+GM3WKUpahjil35YU3QtwqUqCYtBROwdGfd3rNaHOADbkOMJ -16m838kawozv0yMFxFfL6aOerF57b9xTYu/1BGUlHS0J3T7u2icgdghuo3Y+l2ex -xJrQb9O7UDvaJqJQ/26uDvJ2aoEarHVrQUIvVAJv0TXlhVFJSopBKHvQu/xRH5tz -exLA7sjKUJc9uRrNuozFGOczZ0zuNrYlq1nORCaqRuj7IU/9k38iXnDiNIBf5CW2 -kv6+/p0CgYEA2hWOrX1TxDFmkb5WGOYdrj9rUyYXwZFfIpYHYc8lySKHuGy+cuEu -PMJ1NBJX23np23c/k8yalQLVYBZlbD4G545TorlWr1GrWCWiuwV6sKa2GO2Li++q -8vjx2XkY1qNjQ+DmOuSJGOzq44nW5sbaNeSLE5CDmQWqxpEgbZdJU2sCgYEAxdGJ -HcHxSz25iawHN/EwWlJKXOMxwscU4FPixNTXdWZdM8n6QRe73OsPxRCloP/XYV5N -H7vLtsddKiQ/8PT3K1jXmBMW6hEhTObxoXd4CzLgJCkP6rRBz3avIkTYA5H1Ea9n -8TJ5yegW0cjH4G0UQ4ckzXkWyUf2Xrw2KZ1rkYcCgYAW8NmXam2bX874WdtYtyw8 -HjO+BW3OUOp/CuUv5siExfsc7WODVii0/4ewuJVBYyu0iHDJrWY9cpTbjL+SiVvU -k+Y8Yd66ho7NsPfTUJo9ZuwU5zgculNzR7i5NcHlIj7Y7xmI7G13j8SNtvxCPnzJ -2oa+wh71w+DZDn8oyBdQjQKBgAcCP4twXa4+x54bHD5dK0tn8v6Lhi3Sg+jW0AgN -Snz8XX0CQP2ha6SOJ4bh6hxkNCtc/fqL2tHlCmX91s4gv8knYqwesQcmCDoDTLhd -1UEAUnS1h7zXGiXRnK7oJYcq2rX/sv0Wvr3qnTRdmsdW1gr2dNVa3vXOuOnuXTTo -QPFtAoGBAK6hjd9SKjuZ/4w7tRU577JqjA5xERJUYqNZbdJwrNw/OpMRHoLyEBMh -l1X30CHPOZW3GFU1yixG2ETWIOqGYChlci6cZLMrxYGrDd/1OoxAjhaV27U1IfIR -V/3AiaGU5/wl7NvgkC3Ai8OsSNt9msveKTrMmt1JdkCJceGLrsTy +MIIEpAIBAAKCAQEAz1f7YcFm2Kkg4ZGGdX2HDKRoLgvwSjiqZ+LzBD86ZPu6lMrd +8nr0LGdP/omKIbgqTdIUUbRdjay4oyVAjj8gi/VnqC12nL6lHkoP/ShdNhzz26o5 +s5mKry3P8G2Dw7KfrOJoGXFOGferEP598Dpl8kPR6AgU2ByxmnLQ89yVSdSlxtWS +cXuzS+a6Rk7jgcjMLhsirkewonPlo7pllc5QW7n9AenpVl6TigC1uepaUO+f/Bsm +ecBwXV5RDp809GtdvxmTcETj3geLVJyaNFGUEhyiRpq5CoRbJcJyXnGAa+JB+mib +aNhGYNwFsStbRt2tWAKhzcs3N946iRfCif1Q1QIDAQABAoIBAQCnEaadux+qvoSf +HQpxyyaxehvz1mzU8VnlgYn9RxE/Y1KRJ/G0u3vZ95kOaTbjOqjjsb3ro+CqEp1n +39FnjNglziSq748ed8NGZ7kAbLDGtIeN3VjHLZYA13IwsZ21Z02gGYJ11cVvyQ+P +DvDdS8Dvd9RAGZrqFBzLbW6OwJOOO5T1DNsCspjivbrAi6EiZ1rE4pxmFl34z6+o +n/BTxrKhtgTuWBvMkCxflY3fAfSSsW5i16qAweDT5XHGoWRi4plQKdJF5jrvSS/x +NH4Y7rSBCHnhNBhUVdwyQ5Sxhqnxa5YoqpmVf2ev//QRocyBc54ZKr/XSFwYUb04 +OADjXIbVAoGBAPcBALZzxI6scMXm5QASZhk68clak9Z58QEDodRCiqq7M74FICQ8 +mtpLPYDfkL5Lw1zGiShOJFTcy5Svdz+kIZ/lU4XuVsgOzIM/0w2f6t4byOHpFuY4 +RnXUtIt/+//PysZ/wi44rD6JKSYEr5W6qz0UR6AiGsLKna6jjqvD5SpHAoGBANbl +MqttdIklZ1YAh6Goz4q8X8xryarMfv49Z9IH4VNvS9dKVEbkvS3o36f/A94hxnAl +vehkTHHh1sbhp4J++QxeE1DIS0ow7vQJz0cfYoyaCLZMQbm3kCz1GfzaeiisoAnU +dSHn0qZrGpzEDyZ26Nh6zEFO4Zc3aG/O9UDlGp4DAoGADx70mDbGFaXg0XytEDAQ +KAM/wf/VhQ+5/UHnqkLYklMbe8p8iTtcj3iDr1wAVGX287sDsn/2IWvS2qtTNYYq +uMslLdHFZkHhqzdBCFh93FL/HTVTvYw8ZAI9ezy+hI6H71bq4EF/6eQjrLwks5nV +2ctgByGPWdVlicdheIppgQkCgYEAh0+WUh7/jAPDR4HZ5U7YL/FhGOSd/S/6nren +kbZoiRLBXHRvEJyjCi9h9PQ8SThXLPJ228eb4vFjPaOEyESPKNxrqSgVUEfzjjJH +E++NLB8pcTAfCoOtAsHqdS5UURwxQT9H6euA1k0GWsORDpU9FGJuDolOvtqiphRY +lV4tHmUCgYB+Zn1kMM7MlpoWzgbOP/f1jixSWrwHfb2X5uRhjpqvqAWOcQF8WtFk +87tUe2XJHwWjoF+eKaTSOpqHL0spS12oPyLznAgpmqg3TG1j97Luo54zbgPamlR0 +zDnAJIDiDcudkFCwdF+3+zwySE0kNIHKv0gSa9kIljXtNqGXHfjU/A== -----END RSA PRIVATE KEY----- diff --git a/pkg/security/securitytest/test_certs/client-tenant.11.crt b/pkg/security/securitytest/test_certs/client-tenant.11.crt index e9819be045f4..c5e90fbc9441 100644 --- a/pkg/security/securitytest/test_certs/client-tenant.11.crt +++ b/pkg/security/securitytest/test_certs/client-tenant.11.crt @@ -1,21 +1,21 @@ -----BEGIN CERTIFICATE----- -MIIDdDCCAlygAwIBAgIRAJY3yxpOl+tnHJS+ceTUddwwDQYJKoZIhvcNAQELBQAw -KzESMBAGA1UEChMJQ29ja3JvYWNoMRUwEwYDVQQDEwxDb2Nrcm9hY2ggQ0EwHhcN -MjExMDA1MjE1OTAwWhcNMjYxMDEwMjE1OTAwWjAzMRIwEAYDVQQKEwlDb2Nrcm9h -Y2gxEDAOBgNVBAsTB1RlbmFudHMxCzAJBgNVBAMTAjExMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAwIVdBbzWdZlw+Pbb7DoddKK7ZbuNwmHGUHItDS2D -BKGREoMY/4jJAUP23FhwByDvcs2Gu9asmVeRXld1mnVVP0kKTJ49KHpWepMGa94e -FN8Oz3UDJJ5t3+P3Ohwo6m1ZQPlom5jqUlivr6mdQbvSgB4M5Iix4OYY3+r6UW0k -U2cKet0d9TashZ+VNFOvED+UiWs4D9fjJDXfOoNtSUDZRng9FET/8QS9b++pl5X5 -OwYDwrlgusCXMDGp7LNopG1RBViFsO1h3OvihpOFLuXVEKNZn/1I+fUD/S7bmUnD -TtTWw3sX7Dia8vynFrct2eTXhO0shTX6tCLr+iBfVJtCHwIDAQABo4GKMIGHMA4G -A1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwHwYD -VR0jBBgwFoAU1b1+UsN/PvoRENmqPBk1Yk483fIwNQYDVR0RBC4wLIIJbG9jYWxo -b3N0ggcqLmxvY2FshwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqGSIb3DQEB -CwUAA4IBAQAWpTbl96ttW0O66qX1xqMGWZYjhzvH3j6dAdFSyy2JGe8Satq506jZ -1sp98IqDlDylUgJJRFmZUzyfqzzAfpizctfyXk0cjGNGfMS7f6S/7SVF2lFjHIxy -LcSf0jMzaN6GODEHiqry7pDnpqyYycobamSvg8ZT8Ws2VrF717Xf6zVmW+KEG+ZI -UbXT4TITA12XAuweROAfnF0JyO+0uS9xjgln2PqGMYyAmUBmMQjIVnuH+oj8+Hy6 -q59QijaEgL+TLqKKDPFngAJ82A3RL1wMs1/6zffBu9OtjwMwx9ZfOEOnlEqCeZuA -+KZygxSEPpTVBdVzzRhyIJPg6dIssSNb +MIIDczCCAlugAwIBAgIQMrEKZlCQe8qOSmFZrdjGXzANBgkqhkiG9w0BAQsFADAr +MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y +MjAxMTAxOTAxMjFaFw0yNzAxMTUxOTAxMjFaMDMxEjAQBgNVBAoTCUNvY2tyb2Fj +aDEQMA4GA1UECxMHVGVuYW50czELMAkGA1UEAxMCMTEwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQC1hIfulxvujaZPaFH+m7CZxfO0EpfdFDy07uyw0p3V +JXnK9Lrk/ynhX/hhg2wR50sjaZH5bSjaFXQY6UIOaBjncG8a344S0788gvsDoTm1 +n9+GglVF5hvCsvnWTiZgWBH/sZ+rLzRXIVuc8ItJL9nhI5Y0n4pwvhuK7UN1B1gm +Yx+SwJvsLpnXNHxj8SYva/X0bstDkaP5gZfIHt6kHZJXVjasAWTYNdeCnPWoyErF +0yCl4W6NSocDa9MPtmDNlI0WS7KMyCymqPs+DKoMzV+6XfrrSrhJnuQkLhmXDKij +1b/0PvsHSxy+nPcRTQd2d1PI/2zdIxZDWSGuL7iV38HxAgMBAAGjgYowgYcwDgYD +VR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAfBgNV +HSMEGDAWgBQMPV9TZrWuh0pCFbmU2w/XMzVIOjA1BgNVHREELjAsgglsb2NhbGhv +c3SCByoubG9jYWyHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQEL +BQADggEBADbOdSWk3gD9Xhdi9ESXzaDzJHNx1gljzBZFZW/qWuJEYJJ9H2GPGZpf +Y4Pkq79xsQ40tsSdqp+vpNqIkM+QvdbT+G+B0PA/Y9sgNzr5UBAaRQN5ISfvhxFv +/uiOrOuDxiiGg7xmPEY7PnW78iezzIOZHQSNuPev+t7UocrEMzj10znOA76Mh2wQ +pBnZP7LUEAOZ+Bjly6fzdNGv9sCnGF0ZlAfASUz/V1rEGY9GXRTgg6O4C69qBw+d +4KkI/9vkfcPosqobAcAUKA5mvkQkAfHP2W0n+bD5iBgJ8uEQt4qktjLQNhdAhpVE +Eb23bdtWe6a/7WmRfbMky7R1xFt2uqQ= -----END CERTIFICATE----- diff --git a/pkg/security/securitytest/test_certs/client-tenant.11.key b/pkg/security/securitytest/test_certs/client-tenant.11.key index 6bd818e7855e..c57f734ed3de 100644 --- a/pkg/security/securitytest/test_certs/client-tenant.11.key +++ b/pkg/security/securitytest/test_certs/client-tenant.11.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAwIVdBbzWdZlw+Pbb7DoddKK7ZbuNwmHGUHItDS2DBKGREoMY -/4jJAUP23FhwByDvcs2Gu9asmVeRXld1mnVVP0kKTJ49KHpWepMGa94eFN8Oz3UD -JJ5t3+P3Ohwo6m1ZQPlom5jqUlivr6mdQbvSgB4M5Iix4OYY3+r6UW0kU2cKet0d -9TashZ+VNFOvED+UiWs4D9fjJDXfOoNtSUDZRng9FET/8QS9b++pl5X5OwYDwrlg -usCXMDGp7LNopG1RBViFsO1h3OvihpOFLuXVEKNZn/1I+fUD/S7bmUnDTtTWw3sX -7Dia8vynFrct2eTXhO0shTX6tCLr+iBfVJtCHwIDAQABAoIBAFALItHv4XqjgUUB -3g4pW3XS8vcpM5dN75Yi4nhII+EZC6gWuZ4E0Q2lpE2Yt3s/Rk8FBK+0Ya6fqeD/ -m/XMMtVNGOsiH2tk8e+kkVbn0rn6w72deOEQvb4ImhGfA6P0MtfA2q1WtpXmKaVv -b+KUpMB/vfyGmO5YvJ7co5aXOjMyO1Ss7/7+pG3d7Pgnn3JFkEVmo72b4MJFusm4 -xTgEdkrvoedQOQeXc1Qp3z3y02AeaUf68+KCCQweIbyUhUDnScMlfwEVbBXnDLM4 -K6fPVR47gg6HheCEbhfv+UPlwKGqin3sVtNBMgUKoJ9cibWkjBW1b1Yw+9lgJyqw -kSl2lwECgYEA6r9yI+feQ2FZwZ1kagXWBMKNUa3aCXddR+kgo58Nl2vWut2hIKaq -Ebwr8bHtmUbdhQcLa9QX3ZVEjO1JFzISnJH+Aodw+yO9K8mNC+ieRvIEZMVGSvdB -o5EYKnGPGPSkfv50WbCA6NsLHZoZhnJgQlskA1mKH8eVSHDXF8XtmfcCgYEA0fNC -pXVhstUuRjqX3FKrrgn/CRFFkdaTK2QkkGJnvF918daQU7eibCgkCxpAVsGiBJxf -YGfhlr9jIw9w6kYxHZQhhQ3vbMe7/jIUw9W8FZVexA4vgoUVjPOyt0g1SVMEyQib -hKnUNR3WbPtdYXvT79yn5F0f4qS8iNPGFig8TxkCgYEAwkaa0fhtS9pfjAsUNWKL -EvloN3boAfmVa5QWfRN/DhgjaBdP6FPGYFMZEzgr6TRq1FNWMnEfJzUHvJIMcXPx -Tz7Lt5AcAtw7DOgLIRXtF83nsPZLDHPBEPtdwpKk2QRutuMjq66VoSDW/DKyeUBj -Rcdj8JnD5DUq6P7qJDScRVUCgYB3HSqMsQIS8fvlAE00RPr+LNGzFUeBLFom3hQY -jpwINai24mLT/HV8I3H/ko86KXWAZC+AhK9qVX8rXxFS+SvYejhiQru53b/8wfEi -AltJa4tudBXkWRkW9RRKA/+ncCBg5i9Fnwy0bg8/mYUrT5O4nFnsqCD09z23616R -J5a/0QKBgQDQOxX+bHuSLZf3BKiVVEIq73foDFfPKQ+po2WSJAiRZ54s9GWwyfmf -dZLT6Rl+IaQ8x26z8KB86GQgBrevpwK01tTrH+/TNE/FQCe1crAv/27KG/IE4rEK -YwkWBl3T54o+ypIxePhQgIlowIYbl86DzFeTEAv93ucnyVPyjiiYMQ== +MIIEpAIBAAKCAQEAtYSH7pcb7o2mT2hR/puwmcXztBKX3RQ8tO7ssNKd1SV5yvS6 +5P8p4V/4YYNsEedLI2mR+W0o2hV0GOlCDmgY53BvGt+OEtO/PIL7A6E5tZ/fhoJV +ReYbwrL51k4mYFgR/7Gfqy80VyFbnPCLSS/Z4SOWNJ+KcL4biu1DdQdYJmMfksCb +7C6Z1zR8Y/EmL2v19G7LQ5Gj+YGXyB7epB2SV1Y2rAFk2DXXgpz1qMhKxdMgpeFu +jUqHA2vTD7ZgzZSNFkuyjMgspqj7PgyqDM1ful3660q4SZ7kJC4Zlwyoo9W/9D77 +B0scvpz3EU0HdndTyP9s3SMWQ1khri+4ld/B8QIDAQABAoIBAGQiBT6oG1+AwqMB +gGH9DvH1Ulge/amWtVp2hxmQRkND1ikQ0lzrKfZLE+DvN9m0hy202jMHdcbAmPf5 +DViXMk3SJ2hitKRMLS27b69z7Dr2Q7+W/GV/6AaC5vHC0MbLLrqoCNXNR4ldPIWZ +6Kxp+j6JfB3xeNRy+wyrkE/pykX570JXazZIBiBTBZkbwcVv2BvWOh+1fmBST/To +WQuhni3zoU1pIve4nYwu0S/Tm7xASiRSkYep/vsdRHTxBJMlko8OHFpYkh5rGNDp +v2I1dIOqBPoAvhQgLFaIbpB7jGXAfCyOvr/bQDhvgJuThe8vbbOSC2dQN/+ZGmJS +KC+IKNUCgYEA8Cyhb2u5B5+ghtLd2BDpbpcLR1fKJR+hCh+C6XANAwX7WTL8c294 +AR9XAET5Ahg0KD0RmXFNMlNzCRrD7q21Z4c0YQckcQLBirecALaqSAkoxKHDDnv7 +BunT9e27PBmDafJNYGM+As1zROj1lgXznkpuaC14KSQc8Ufh0cLcUmsCgYEAwXp0 +IpG6asCLhrvT/NBGQiSeKuuP/nAqt8vT+eCC1anltwlkG/PxcSbkD5XIKdsEZzOm +TaeorgVNrA1dugRTUej1Z5GvV7EYLwEizbgTZVlQgw1oMdZu1MZ5D7jL3qm2bTnX +hcMAlrDG82pp4rPS9eOg6IXAMDdIEjXS5pth7BMCgYEAo+LQhedL6xfRwi5Bkx53 +Ky+GUrhlB8/9Y5r9Ca2cM2Pxj3xrJ5n4mUt5YoWuJO+/J3YEfGAD/UNUS/InoMaH +8o0gANWO2E65Ip8HpLUAnQci+oonP8r6EE2ehUIjcW83bSQaCJuvxNnMvkj4y9Zj +1q+ThyL/y5MI7NvQDAKbtOECgYB6NxXhOFifUl1QkJlKG24mHedjiUV+HfB+BV0z +fKRov1eCFYaNOb0MEtsBFUZJWjYf0rp8VynwMx1rT04jUNQo65UJBTfTluSF3JvV +gy+NV6vJ/NASmzeLZIvYaI0va9j2ihEgR5u5lJU38cJNF/ZsqIteFg7e5iy6hBFi +5kgmzQKBgQCvqra2081hOAgLiIth4wF0MaPFoP78508XHNt6Rw0NB7nJU4vcVdTK +XXxl9ZI+em8joFCJ1g5xrA80ACHxodcESePNrPB30jwJc3Iw6Jc3Ivn4iL55pkN9 +beOVq4tCdXJHp5FwRwIiiv/h16eZwlli4oJwj5zrO/7qCk4cXEtvUg== -----END RSA PRIVATE KEY----- diff --git a/pkg/security/securitytest/test_certs/client-tenant.20.crt b/pkg/security/securitytest/test_certs/client-tenant.20.crt index cf9dc36103ee..66f1b80bcfcf 100644 --- a/pkg/security/securitytest/test_certs/client-tenant.20.crt +++ b/pkg/security/securitytest/test_certs/client-tenant.20.crt @@ -1,21 +1,21 @@ -----BEGIN CERTIFICATE----- -MIIDczCCAlugAwIBAgIQaqKi8Z/LkkmSogKvjuxjGzANBgkqhkiG9w0BAQsFADAr +MIIDczCCAlugAwIBAgIQQwcxVR4R88JtcUteJru52TANBgkqhkiG9w0BAQsFADAr MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y -MTEwMDUyMTU5MDFaFw0yNjEwMTAyMTU5MDFaMDMxEjAQBgNVBAoTCUNvY2tyb2Fj +MjAxMTAxOTAxMjFaFw0yNzAxMTUxOTAxMjFaMDMxEjAQBgNVBAoTCUNvY2tyb2Fj aDEQMA4GA1UECxMHVGVuYW50czELMAkGA1UEAxMCMjAwggEiMA0GCSqGSIb3DQEB -AQUAA4IBDwAwggEKAoIBAQDJoB8NQAU9zdrjqwbqMZKWEO3MlUQvtpaIpZiG9itg -gdlA5SRjMAYA4gyuxafOT5pd66+qsSiM/3W/ri/84kbXBrosDshMHhyNmllJxQRp -QwtRXZXywtfN5sfqD61UJGxGKoez/OCL8GKwOZjlDRZzZYSOfUaRtQzHfuHP3UCb -VGNjKxCmIUqSJPYuvGu2OI6n+/sR0p57DbmSJL2zmOAQ163jhL13cYyo+XxGuGJL -2widguOQo7f6ICzof90FkWKO19Go00bwYAWypqfRtjsc47J0AD9H1Ums5DVxttbi -buME/Oy0SHXihMxJ20FhE7izjeYpkwM0T8OuOUGGD7+RAgMBAAGjgYowgYcwDgYD +AQUAA4IBDwAwggEKAoIBAQDGVVmuxLLyjdTpKfrWfdm038n329OYn1GFxfeLGeRc +18rN6XczYkI+yAJyH0mweSMjq7x5X85J7CC6IOQOLcrAwBs/oNYHU+xK0uSCgOoU +G9PXu5Gz/Q6VdBKqXXH5Uy3XlDAz8eE1x9RePMdPHJ7Qoq4UJjbRx0QX0LaZ8ZCV +epDEJoRfhDTZYyEKkS4xjErKUzMlydRw4IYOycvbz5PAZ611WG9L+BZCa5az1xBL +FEUyMfMeAuMvAkmzD9IPfx7/p797l8uW1p2zCbQ6nnqDIiDQk2yUye6jmB6qwVBj +UYUxHeopGCtHTWPqSDga8ylg+s8g0dtuDsCm7+GYUJ61AgMBAAGjgYowgYcwDgYD VR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAfBgNV -HSMEGDAWgBTVvX5Sw38++hEQ2ao8GTViTjzd8jA1BgNVHREELjAsgglsb2NhbGhv +HSMEGDAWgBQMPV9TZrWuh0pCFbmU2w/XMzVIOjA1BgNVHREELjAsgglsb2NhbGhv c3SCByoubG9jYWyHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQEL -BQADggEBAD9ammC9Y7FeS656mG0K52P1za+CuOHgn/riUMcMtyN69Q2N5Tr5Vm8c -OzKePeemuf6bWewm5dlu1f/J4oYl6qqE/kEvrkjBefDa/QlCRDD2IVqq6U+hfud7 -cd/uQcmyiX8N6AakWXn4Z1UaAWH8kdoqFtdgmSYaJlBiI2gmlhtUbpeEpd0ee5nH -8M/OzeoRDaP9F1TeZYwjkDCEX+WmdwWYnTrEN04B65doUxwFdEUR7kukxbQGLmtK -6FJW71DYwtz+owPKanBn8NWOeeEKwOqx6zLIR8c2/rAWQbP2WjsW4pqkL6eECdPL -bZ3nxHiuyUB9LiEw36slaP8sk5tSOQE= +BQADggEBAHQPeU7HlsBiWZiIKQstSCcoevqeHtyEtA9hrRIef9EcZHLOKOeyYVpf +C9lLigYMrhAlYt6Q387sldzBvKBuC961jGhG7FvAFdYLHSon3aHnZmq1NIYesJmw +YdUt7OPJtpzW6tpBhmqeDZBXKrN9BYxcrUFBJzOpDPB5zBnhdtMn5krAjJzrQJ5V +29AACpttr+fWzFZchPgDU2jxbATHUZTUCbaf8KBSBQOgbwhrEoAXKAsEpTdBD0f/ +QN+Miof/WEOT869KOmFtp+gFlfTJUidi3pg5y77FiXwasUYwgzvmoHGfAzHHztzI +rzPaKSUAFwZBduzsUghQChXoT1vLvKE= -----END CERTIFICATE----- diff --git a/pkg/security/securitytest/test_certs/client-tenant.20.key b/pkg/security/securitytest/test_certs/client-tenant.20.key index 6591001f9ee3..23bfda906ae3 100644 --- a/pkg/security/securitytest/test_certs/client-tenant.20.key +++ b/pkg/security/securitytest/test_certs/client-tenant.20.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAyaAfDUAFPc3a46sG6jGSlhDtzJVEL7aWiKWYhvYrYIHZQOUk -YzAGAOIMrsWnzk+aXeuvqrEojP91v64v/OJG1wa6LA7ITB4cjZpZScUEaUMLUV2V -8sLXzebH6g+tVCRsRiqHs/zgi/BisDmY5Q0Wc2WEjn1GkbUMx37hz91Am1RjYysQ -piFKkiT2LrxrtjiOp/v7EdKeew25kiS9s5jgENet44S9d3GMqPl8RrhiS9sInYLj -kKO3+iAs6H/dBZFijtfRqNNG8GAFsqan0bY7HOOydAA/R9VJrOQ1cbbW4m7jBPzs -tEh14oTMSdtBYRO4s43mKZMDNE/DrjlBhg+/kQIDAQABAoIBAQCFbate72EIl3Ql -egbncMra+HUGEDnBgYl5AG1B0yK9iPCm230LDrrKVs2h0zfLdi5nvRLRWM/HDz9z -C+jB51SN8+u8QEQY60xmASDZisHyjuWsgMw8jOiXYAQoRxujNT9Z58dKOyy2LuyA -Rn2CBvk2gJYKXakXv8KnOrZhAsBAiZjnXQ99JKbfuauVfQlrMIdM1TGDjnH/X/Km -JopPxlGY53mt2kDFcUTb8Xjo/gY4s2MDhSAr8wm4OsJVMKsz1Two7Gn9l9O0n0j4 -mFvmvZDRGXb9XLkDLr/V6Q7iQBi+pt+V1rsJ3fIxn5Wn1J/Uo6PRwEA6/2+16eRQ -q5m7clthAoGBAO9pnadYEpGD7iU4oWdpHMIYMTj1KyQrls5Kz7rc8/kiq+XRP8vB -5a66mofPirhwpwuQbwiKnzk9g+Ghfejzw02dsefDFZutvaYDezt8PqJUNkWHjuJP -LtknnM61CdYNwzbbXd579oX9vQDhAMAZStB1AclldjeBjZO7/R2qyo3VAoGBANeY -SbbUafRyPO9+vM3smYwYoB14LoKxfCwdALG4ws8QfqnsolU+BDic73Ksnap6SHj3 -iy+yf8HUxfm/5pkVGJv7o8P9gZhwmUAnGsZO3wC9ABAOtzCbb6gPpQl6fSx4iyWx -5qNmuKTHSrh9LkcwvmjVENEiIfNSLlLH151gBXzNAoGARRKXpZxHXyxNHPgFuSwe -W/FW4jP5ymm5DLOQ0zzENvDF33xLh6HdflfgFabPQv9dcnytTC2SWpVg40wQY5FQ -m6gPl+o05cFuoeMHPtZNy8+4CL9fffOSdJx1JT4eJF+8HquTnYjuSyI10XcZHFMv -vozDgzN+s+zwx9rI4DJSVjECgYB397HtHr3m3Dq0Oh3tqJ64qmtOerXG2+Rw16Kv -QlSEzHTvMVIThPLsXceQVCVX27vANF8rJ3SX80JG+1XiES8kfR5fzQz0OpPMcvYf -K3xD+Oe0B7xAO/4h0rLPV22X9WvYieoBOwO8DXmyt1WtH56UzxsQhKHHuA822SVN -2US0cQKBgD8TWsYemznhyIlvZm7FC7te6ZVwgYYdavkg+BQf067Xprp7kyYA0Fnv -3XhLlyM5Cf0ZF6jiKoPjwXNBa+f8weZLGmU4fz5D25d9iAfm8S0/OXHzeiBoxQI/ -XeOAjFCyeqvNIbGNncpprWAHblUQyVRHSWebPLSWG8l/md9QipbR +MIIEowIBAAKCAQEAxlVZrsSy8o3U6Sn61n3ZtN/J99vTmJ9RhcX3ixnkXNfKzel3 +M2JCPsgCch9JsHkjI6u8eV/OSewguiDkDi3KwMAbP6DWB1PsStLkgoDqFBvT17uR +s/0OlXQSql1x+VMt15QwM/HhNcfUXjzHTxye0KKuFCY20cdEF9C2mfGQlXqQxCaE +X4Q02WMhCpEuMYxKylMzJcnUcOCGDsnL28+TwGetdVhvS/gWQmuWs9cQSxRFMjHz +HgLjLwJJsw/SD38e/6e/e5fLltadswm0Op56gyIg0JNslMnuo5geqsFQY1GFMR3q +KRgrR01j6kg4GvMpYPrPINHbbg7Apu/hmFCetQIDAQABAoIBAHooq9p3kPjQ3yjW +EIf4cBV2GYIuxf+lcaMBslzdD8kXqPR3LlJZ3Q+qRcdg+hRWKIyaBLaFihwB8o5y +H8WT8uQR7zabq/hLeqkDiHfRS1wjX7Hq9+1ymn73RV/luoOk9gFoZuA3xU1IzcdV +jDVwHWnIKYyDlRRUtd4tUas0HKfUBmRNJrH8YCKOZMCXE+jKi483UtQQ/ASakRVn +K+nZe/aisZVOPO/pzWgZMUoakq5mMUbCu4cJ7JM3NUCnriDcJUSsDnhdZ3cN8R4y +f+3RwwbIoj5onKrEZZCHBAFLiPcm8OJ8ZSbZZYby8TrIqFvevBPCESQsGK7AnqwA +Zch0ysECgYEA1zyxj9XLMkko0Jo0xaaPlh3xxoeuqq2kYSgwxmgljsVC9ESbOZxr +8rrQsVjfvY+RtvRr299Mwlwy8SDAEOX7bco842AANkdUqTNOMJf3BaStUWKwOfXR +C3+JzQnYzG9Ig3klTqNIHr25wbdqr1eLJoriHPzJN7MyP79pm90uUH0CgYEA6+Ue +NBPLFXgf/JrAdMVk0DYy3Hss+8k0kuVnYXRkFNXPeSYv6/c4PSOjnRlxFYN6vo+i +yiDH42APSGM/ihxFSVJuDIUMFhm0xA7fiSnd7b5UfC2MkoC6GYLToHuOlasaZWsF +r9wfmatgXSympMDgEJ7h4gIo5u/O/HEp8oTX1JkCgYBp0FnOx6FUwGjDXPxSqxbu +CxygqHWzTRiB9zs7X1oPfWT0J4JUaVUciMEuXu3oCFvvoOwhtP7Mkn0s1Bf4dsgL +6p/SfJC+HoU9hY6MDzmO2a2nVCgk5nd1+qZpWczufEse74DqzxUWn9lhpeVZ/GTZ +du/ApnnZ20v50QV/bdZmFQKBgQC5gM60o1ATzQhSbBumeEgkGEr86XxhcEOAtRgM +IixF2jGykp4i0KGQKsOSWhx8j41p56hbjVXDb5n1Ed84q6ys0T6rZ8Eua/6kIxIU +WjEksYTctjESUFqIj0H+tMtW1VwHnxa0ycSr4oIAI4nUi7xoNZlqUsp5eOHr0M3s +4hycGQKBgAo75A02xiRf0fvUxisVXvk6avQ4ToN6MDRppgcLoMFd9XRgLHhTZ7Bc +K2KVFoKxOd9Y9+7lG6AzPUz1QF3YvaOIEMPKR4Im1VT+EFrFaSn4vdKzMQU8K1RX +m8w+SbNa/2moEvnFZDoTHec6NbezmSRWtIVbogONwHjgJh39J5zG -----END RSA PRIVATE KEY----- diff --git a/pkg/security/securitytest/test_certs/client.root.crt b/pkg/security/securitytest/test_certs/client.root.crt index 187adb53fc51..f704030710e0 100644 --- a/pkg/security/securitytest/test_certs/client.root.crt +++ b/pkg/security/securitytest/test_certs/client.root.crt @@ -1,19 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIDIDCCAgigAwIBAgIQPFwr7pc+R6Ive1Xq3w/uBzANBgkqhkiG9w0BAQsFADAr +MIIDIDCCAgigAwIBAgIQcgkmM8M7za0dPIlRWKOCMDANBgkqhkiG9w0BAQsFADAr MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y -MTEwMDUyMTU4NTlaFw0yNjEwMTAyMTU4NTlaMCMxEjAQBgNVBAoTCUNvY2tyb2Fj +MjAxMTAxOTAxMTlaFw0yNzAxMTUxOTAxMTlaMCMxEjAQBgNVBAoTCUNvY2tyb2Fj aDENMAsGA1UEAxMEcm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -ANAHbBauGD9nnrW4KBq8BBDAeZ6SfRQeZZnGkcNDlbA/JJPHxerNkNoqI4IJNTWx -im96c8d1NMa4szwgJVJ/acu2j2o9QW/bpcvtk3KjqfuwmGLOd4mrknTqo9bZRL8g -4gcRMZz7/mwwpiCBaDe3FG75QAGdyo5GFrFKTRMFzbbrLWcO5CkTgfetfadnSFd1 -YkvpAvo3gG8Kb3lGAT0ieiQSxR04VMXYKUU5LUQQidLX4vPywcoTp7JKbAzSrD51 -iTLgMX7KQrmH86C55Q6FLTc6EXLhnWetQ5zN6VeMP3T/lBesweGS36LTlaRDAeOr -qKI5KzA5r8luxrtpckxZnmMCAwEAAaNIMEYwDgYDVR0PAQH/BAQDAgWgMBMGA1Ud -JQQMMAoGCCsGAQUFBwMCMB8GA1UdIwQYMBaAFBAPRPhW0Jue+Fs8syxVrD0g47GG -MA0GCSqGSIb3DQEBCwUAA4IBAQCjm0UPV5lDVUOEg0geI5gYOfPa0MFswfO06ANI -wVFndfghQ6g55MsEVJpaV6+HliFyGIgnpmbWjEcuUaE/nWomGH62ZOcrA8i43FhP -h1EDKBiM1xfbYF7v8aDUNfDmB/1zgBKxrYZ/H3yTsQrnupy3p26z2tlJuPAxO4KF -XALpPrGwvf7/IpMCt9pG3vxvM0MC73CQcbloXXRyvdkQIY4O2T4TjmKrwkfSnqnA -+E5rhgqBPC8Eoy6bBkB48AiFU6ot6e1kAp9jCu5q3+uaOF0teI2h7W27Rtfouct/ -8h2EzGEkIDv1xsUoSm0YvLpGQlKov8t37vC8Ar8Pe6BDoHwe +AKNJzS0UydhoR9W74/Ikkw/oO4eS2FIJT99YHhHriUQJ8L/SZCtp48sVZmPlNzU6 +qt9cJTgx8tCJ6vu97fRjpyLT4/78objzt4jb0o3+xk+zZ9yaLQlQdI5Ff/QVuNcU +Jc7CFCbcHgepyHovY9kkU2Fl1V2+r9IzafpiicoHHlM0OHmTjmiF3KfY3OdUsANS +USr7f+c5yKy7/6kd1RzeqqRGS+bNpgAb1LP6EQGCzqrjG7nojvWSMq+TZbxGtPvw +iYPKQS52Sil4upJwEsNywcrxGa63Aqo6JLWdYqxw1zCAB2SQmMUOW7th1zvOjyNj +P95LfrgQHpzqDYmvDDFXoekCAwEAAaNIMEYwDgYDVR0PAQH/BAQDAgWgMBMGA1Ud +JQQMMAoGCCsGAQUFBwMCMB8GA1UdIwQYMBaAFIs5LmPgR+PGr0TVxJ6/zRIvXLXq +MA0GCSqGSIb3DQEBCwUAA4IBAQAVtCgQR5MqTcCchlanTM+FDGDl3TYKCsQQvKDa +c7qlUu1Hg3FTI0Ahw2D8X4aHd7kcujTslh8P0pDe1vi/mP3rS27bpx/d/0LEI3Vk +miavTUhixkl3Tw9Ovd1waCNMiysCuHV1V/bvKhn406qNO9hYFjK1saUhfPa+rZzv +HuuVyK3+OSgU23Pc2ifQXg3XDAabHldreCHt+x4YEAlwVqeurGClyrMiqqvRRsdi +6kvis/xYoZesT0nMmUi9fmjw/Ot6gZy/YMKJzQ1qqxUd0L1yW1h5uhDJE1JYXREb +dV34oUpjHI1y9K40bMHeQ6lwzWwfydIGDliSVmlXYuiuXPwr -----END CERTIFICATE----- diff --git a/pkg/security/securitytest/test_certs/client.root.key b/pkg/security/securitytest/test_certs/client.root.key index a2308de4d7c9..77d6f803b5cb 100644 --- a/pkg/security/securitytest/test_certs/client.root.key +++ b/pkg/security/securitytest/test_certs/client.root.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA0AdsFq4YP2eetbgoGrwEEMB5npJ9FB5lmcaRw0OVsD8kk8fF -6s2Q2iojggk1NbGKb3pzx3U0xrizPCAlUn9py7aPaj1Bb9uly+2TcqOp+7CYYs53 -iauSdOqj1tlEvyDiBxExnPv+bDCmIIFoN7cUbvlAAZ3KjkYWsUpNEwXNtustZw7k -KROB9619p2dIV3ViS+kC+jeAbwpveUYBPSJ6JBLFHThUxdgpRTktRBCJ0tfi8/LB -yhOnskpsDNKsPnWJMuAxfspCuYfzoLnlDoUtNzoRcuGdZ61DnM3pV4w/dP+UF6zB -4ZLfotOVpEMB46uoojkrMDmvyW7Gu2lyTFmeYwIDAQABAoIBAHBeEfyOPXrCNfBi -cQc/19JUpV0KM5XSdX8Cy2l18D0rXk6U8ktKjpvJdzSeVKeUJvrgYfmzTbapvecU -VhShzjvDeOQiAl6xfnUQ9zxNO1xAn49/hwJOMuYEeEttQL0cvfqjRvFNDLjT7mhQ -fkLaGad36yL0YJ1JLqjFQayszdmnZJ10QzuSdkWpJEvUZL/kpr48OOmPAoqr34La -gBoLKQKJxtqMrkK6qiDeuJkDZ50HtQvL+RjD/xBa/Hi0gncf6N9XO3E2S0TDA9cv -VnXnwZntTpCzzCpOuBLkP7w8Rp3zTmdlKv6fSQHDffPt1fFxM5fc9HZyypKlYlfL -VE56gEECgYEA0p00K8fFrPo7pcF5IFVG0QTR6S7OpFkrM/26Pt3zo3Uh0NSlxn1G -pp6zEW4T4R9a8FQueYrZMjRdq0vJTRroUX1AiYXpnp5eC3S+KCSMJjghNjS9u+Qq -xwKOjxKklP0OkIuRCzZ0eZWtjkoP/0rvzzNqfw7PpDjR1NB1ZgYa8R0CgYEA/Nub -4wTfFnqV0FdgEJrYm43/J95xn6y03juLvOaX74Tco0ICcBaibRqDYQe2MHquZaso -Cx6tln6arq1cBVMW90Q7FlpKIfjtVJKXftuFusqwqM8pKw8H0ifXMrGymA4dPXBQ -HHZl6GMHIyBCyl5dvmhhtCXCNLeMa7gPNZGFNX8CgYAlVHfKyr3eyvuHhaNSd8r1 -gUUzb3hw6N0feflo6y4j6r7zpL09UTcUkUxr6nbbkBMuRb13DYUxwNZZZjVn1T0Y -NHsb5QBjfO5uWWLa+p9cptY8JVz70JVEcM+TzFofjjrmo9YxhpVed4krjzUGquGW -eCaIHrfvA6MtdUVZr/tWgQKBgQCJY2WzFx2D4D1wg1kuDISvGJtP9Vx1buija2VE -0WIygVVE24PplVLRxzslSIRkmPQTR8ktKZEWGVpAmO9L8VakKY2VWp+3D8McVb48 -7lgeIy2Tu2PaAoO0W3NhFw478kdO76yHmolKmCvzmP7RaC61W4/iyzVXYXfJu6t/ -5r6jTwKBgGfFt6/4IHpabmyj7muO0V5sRLLa/U89H7wMf6kAih0ohjlFtykcxRHK -tfMp359JItdTI6EZYuKe4EKpNWqQ7Zr9Y5X2P+K7cAWZugy4NAbrIITNtfKT2H8e -Bv8QL1ZVVH3HgVo9R3c3wY+fgfx/VzY6vnFPSs6VEEunBNzWszVL +MIIEogIBAAKCAQEAo0nNLRTJ2GhH1bvj8iSTD+g7h5LYUglP31geEeuJRAnwv9Jk +K2njyxVmY+U3NTqq31wlODHy0Inq+73t9GOnItPj/vyhuPO3iNvSjf7GT7Nn3Jot +CVB0jkV/9BW41xQlzsIUJtweB6nIei9j2SRTYWXVXb6v0jNp+mKJygceUzQ4eZOO +aIXcp9jc51SwA1JRKvt/5znIrLv/qR3VHN6qpEZL5s2mABvUs/oRAYLOquMbueiO +9ZIyr5NlvEa0+/CJg8pBLnZKKXi6knASw3LByvEZrrcCqjoktZ1irHDXMIAHZJCY +xQ5bu2HXO86PI2M/3kt+uBAenOoNia8MMVeh6QIDAQABAoIBADmk9CnWDOu45KMv +kWkKQGB9O4bA8F0FrIzMLtFktTCv0a3mODabSy+Gfn8FjFfePjRb80fDWlUEW1BD +3J1KENbatsJtrSn93+0QrWQzbQ715tSaGQwQuxT+tA0XHgnPswkqurJ9Qpyx83Qv +BrDBgi4AJTLS/n7WZ7Nc1gfcO3hjhp2JBK6EECAB0JrNNXsbJIzJ5w/gqoplCRlA +floHgZS2PNtR3V72Vb23QLR6D65S46fpnIbzxektIOvj9UqoAzSpc4iOiyjbUqOS +XgBSOpIYBcAx6cKZ1HKV8FJSmkFLcfav/na34deTiqSK/vAoBxV3Rrg2fHYxxCGM +3ytuN4ECgYEA0zrOD+wkVsFMHgaI7hO6+z+qP29cBOUR2CDdYwaPH8jbbNwtGD0J +sgwWh9gEp4+n1cQ2AZTD7MOzlG+qwCu+DqO+XPZAzph6SynOCiRgNQJfeEqQtwTV +aXJKD1jhlHfbmyisRgcv8r2VmNheY+AiAYlgdxwO5J3Iz3ypmhh/wVcCgYEAxeW5 +bkq2hHBxTDGM57sA2vcyOiniE1Nan/A+xKYAoY9heISfN+NtnPIaFOTb8CGTmCn0 +XK+V1QDojs8199aZt9BTrdiVkwfkox3d2xeiXO9P3JvVGdkMqiE4B3Y6JGd+rVxy +E0l7tD1LsCLBkaVCDG8jXuVUj4swLTIC9YvFbr8CgYAD0OwoHXwKlTNq13Nh2bln +EJ9ixgBDll/cJ7vYLiYnzNkp/lBSP8gND2rYyW5MGKxPkFvpa2aewGpeJCZRkni8 +ivjFdS12jgqnkPnH9SBH1OMkqTQ0GkJAxW/RFyn8JK4y/2kdWsPi/snVGRObelEi +9fhoLnmWZ8NY/EeUIR0twQKBgAqjXcN7CrK15LFG3J88Y0BiF3Ye+EM3sOB2Jrml +ftUwgvnaj0CO3j6YmSRUZSpUc72zS6qL2c8YfGfo5arMA3lpHoZy5R+BRh4qpdl4 +PMcoKi/exKbeDxs6K+vixB9e3OVu2ccFpTu8K8xtIeC1dIZ8lvcr9s68mbtkO9p9 +SAC7AoGAZU+UvgKzN7Ln8RhvKl9IwBiPOYUJtAY+YCpLW0kbW9peDZOEhJcyFa54 +HwetoUZOig0162y8rb6//VCyDBSmsBb9yv5hilZG00wba5BW0SjlR1xk+j9V9ZYH +kVJOMLxqGQGtHgJjuzYJx7/SG2L1wkFcrcZIezzsrfxwmAOh7DE= -----END RSA PRIVATE KEY----- diff --git a/pkg/security/securitytest/test_certs/client.testuser.crt b/pkg/security/securitytest/test_certs/client.testuser.crt index cd383e440252..1445eea84a8a 100644 --- a/pkg/security/securitytest/test_certs/client.testuser.crt +++ b/pkg/security/securitytest/test_certs/client.testuser.crt @@ -1,19 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIDJDCCAgygAwIBAgIQNxDPPxP/FIJ/fRwl0Hk0rDANBgkqhkiG9w0BAQsFADAr -MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y -MTEwMDUyMTU5MDBaFw0yNjEwMTAyMTU5MDBaMCcxEjAQBgNVBAoTCUNvY2tyb2Fj -aDERMA8GA1UEAxMIdGVzdHVzZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDZeJ01BzMVd2hgJarcozyTGhI8Jkm0Qqnp57VUOLbSSmS7Tc2rIYAZpQpi -iE3TlmeYZEYhoyhxppGwdhtw7wTKbyMDGjhmc9M1gzHoCG3nuTY6Ksnfd7+i8UDk -GrM/CT3zIl9qHzBjbwnyTB+CdE10/qcMIoA88FJi7Im5L5JdK0ppPuBki5yB2JUX -jjuL70levxqmIkG8M8lo+DhsitvSBPOd4Mpc8z3NynZlX96MSoHx7xLLFnCCoh/l -KOoUBFiLzw8vKD6yDQLyAdBYIUxX5vL92MrsbdWt0eMcMnA7ZA3NapuKEGCrFTEx -mHNOrrEci46GxFUYI7LatTqLPmbPAgMBAAGjSDBGMA4GA1UdDwEB/wQEAwIFoDAT -BgNVHSUEDDAKBggrBgEFBQcDAjAfBgNVHSMEGDAWgBQQD0T4VtCbnvhbPLMsVaw9 -IOOxhjANBgkqhkiG9w0BAQsFAAOCAQEAovbbyvCtwz3uZ32OQHz+Dk1OjmdO0BrF -g9wxBVaOx1Dfn4BZsiNYjpUttSxswom7D7TNJcA2UAPITlGiJHfFNwfHK3ZefCt0 -9OkccRxqOHGlx5J0QqbPrFk+cYR1rTy+qr3CRWbYbQX49OwS70QvUHDUgvVa+dk3 -bZMm4xid+fMyGKZ4dAlojxmxYxAFcQVRCrMHjSRqHSXJ+1ALul6JwFVj5wjW4wYC -RnKog5wJc4cYR5/xyzfFtHTSsGf+iiCpIWPP8UmMtbjVlekpC9f3+NJ+lTIv/Zrj -8crQZomLTYYk2f99ldcgTH7eQP2QShV5j+irbOWX2EReCJ4Kf3P5pQ== +MIIDJTCCAg2gAwIBAgIRAKDLzHuf1H4wP0HR3GmF9ZwwDQYJKoZIhvcNAQELBQAw +KzESMBAGA1UEChMJQ29ja3JvYWNoMRUwEwYDVQQDEwxDb2Nrcm9hY2ggQ0EwHhcN +MjIwMTEwMTkwMTIwWhcNMjcwMTE1MTkwMTIwWjAnMRIwEAYDVQQKEwlDb2Nrcm9h +Y2gxETAPBgNVBAMTCHRlc3R1c2VyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA3/uh9wMNN8SeFzDg7ZHowA6Oo9Hf2CFRZjscNxV2bK+Qm2HSiuEzj3HB +20b0nkbPXQBUF2vVTfMrsrI4Tfn00Ja4FddFp8z+y3Ol5mpXcM9mtn10hnJcXWIx +d+ApLbfsugEmZst00GsbvyKVDV24owJLllFMnHMZtymdbd068KNt2w4jgbTNpl2w +7e/8mE+fcdTm9Q3sXPDTTbbW/Lbvgc5oPpazPBNfVg/JkC+kzxjTbBiLfooi+Srj +TdJ/Bqk6I0+sntxq0O8xd/mzLdwqGuHoUm001pzw8E6SsmaErQdZVwYv2tm7lvRU +0iTzTRW/UzbP/g1g3AM7AmaPJfKvSwIDAQABo0gwRjAOBgNVHQ8BAf8EBAMCBaAw +EwYDVR0lBAwwCgYIKwYBBQUHAwIwHwYDVR0jBBgwFoAUizkuY+BH48avRNXEnr/N +Ei9cteowDQYJKoZIhvcNAQELBQADggEBAHRZZfTFulWx4oeGoamhGZ/jiOEaM5ii +MV8K1DwTOk9sWGANEFRV78utEJyHTvONcoSDYO97Iar0Hc3SmRG8iKtNqCAsGTqV ++BbUxKqEkkIBXJ3jZ0obEdNIJ73u34Fm0iJeGcqUwqmSWsqLV/NJrs3F/QlTPK4p +JGcW9wkT7kLFugsUKaTxPrVHjfbYMdQ9mYFFOd74Eem/gCS3O8XmEunIH+pAo3wR +7lcv1bHz3b31+eHh82vbFMj4tUUqRq7Z0vLsGHpy3JzI0/aWcBqJi96jYmRRCf9a +i0jsTsUzMBxTp1rOrzjAf9OPxG/8ZvqjIrgv8NXsBtSZDyAH71Jo0Xc= -----END CERTIFICATE----- diff --git a/pkg/security/securitytest/test_certs/client.testuser.key b/pkg/security/securitytest/test_certs/client.testuser.key index 4a4ab0ea0ac6..62e3c9a413de 100644 --- a/pkg/security/securitytest/test_certs/client.testuser.key +++ b/pkg/security/securitytest/test_certs/client.testuser.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEA2XidNQczFXdoYCWq3KM8kxoSPCZJtEKp6ee1VDi20kpku03N -qyGAGaUKYohN05ZnmGRGIaMocaaRsHYbcO8Eym8jAxo4ZnPTNYMx6Aht57k2OirJ -33e/ovFA5BqzPwk98yJfah8wY28J8kwfgnRNdP6nDCKAPPBSYuyJuS+SXStKaT7g -ZIucgdiVF447i+9JXr8apiJBvDPJaPg4bIrb0gTzneDKXPM9zcp2ZV/ejEqB8e8S -yxZwgqIf5SjqFARYi88PLyg+sg0C8gHQWCFMV+by/djK7G3VrdHjHDJwO2QNzWqb -ihBgqxUxMZhzTq6xHIuOhsRVGCOy2rU6iz5mzwIDAQABAoIBAQCus55XAzmefRZ0 -Zs4kAV6gLcmIWJnNHeCx71pUFluIBAt4DMC0QW1PLK5zCVy8JY+DdMqCKzhKVK2l -PLw+tUT7+tv9mgrFAzchPgvRgzQHe1p8z/Lsep6Qn46psGlesmX59BsCfZu5xXIQ -G+cUPaob2pcHwI+UZNtKbQsWoYjLHoiabvEK91Eb2L40rspKzbW9YbiI2RFNUYi8 -2qT5K5a6maHpb3JNL/UAJXxo8O7hV+CFRw9dIPxJiaFeiNOsE1y3YCVapsR/e6t8 -YvEPeck/H9oQrIZIoyWhi6iQ0Nec25THjUkp1WnP81DpwBqkiy5ZDYuBIBlPw8RF -adUnIcoBAoGBAPM6PC19+ZDsvmxyYh1+MmIcunETP/Tx4USQYGacFOjAIdOfQ9/4 -F27kDKmjPK01o6BuLxPBAHzPgmU1Q5JhRrPt9/wNVwwae4JcyTubi+0ru5iaBOD5 -CJXdWEQwqFfIu0a1qyMZVKPxoCt6HenJOjlysCaF0pMtsCHgVqIgm5uvAoGBAOTk -IWmujY1MT8C3oB8si+0AAf0KVnHKvIfPb2TMX58YCBjIfyOX2GnP0m76ejZsXTIN -G4FLb5ICh1gnJvS2jsPNCno0AJTwfh2fEgE3dE2pxYveNq+JdtcmoctIduB7b2R3 -ZITkgZpjgJw8C2EBAvGOGj8McPK8Xopv5S0qOQ7hAoGAS5K9nfj1cUVOCY4k1Dsg -1oFMoNO1XJtHPjGJ8nfuuz7vg7yJr257gXYURSU2Ni9jbvatY/rk9uLXBjFdlH7r -I/79vXA7qsQTujawQ/gZqxCakljQyUI3m7ERuuZPeC8jmV3bFeXJ+cKa3MdtwfVH -RRBQkTelbRuWDYCSCoANOXsCgYEA01Ap6L9Vv5UsK+da1Gzz4aGYNFxO4k8hLuC7 -Hnvv65jsILU5a0rMaSOMSxjDq2rvvz2KnxtdrZbYDPJYbKP9uZaajadkwhwk0oQA -w07G+Mw7rJ1CL1zhclAgHuSPmz9Z3466JlYvsLEcEyD8AFpGtWtTn9GElRSC05vq -IIBSnQECgYEAiFeNzQfjOPl/8tl4BQdBFyLDSgMruu0U5baQzeyZMBONgy2C+XOo -U5EJ66nRVROVC6P2ID6QKYBoc3ojr8e2ZQIdS/OesB8ppLRnCZ2JITHBnnVzalAn -Jz1MRphPt2ewdT2tGv2O26W2fxfs8AWk1zBakw4jwFNt0hkwbnNZC7U= +MIIEpAIBAAKCAQEA3/uh9wMNN8SeFzDg7ZHowA6Oo9Hf2CFRZjscNxV2bK+Qm2HS +iuEzj3HB20b0nkbPXQBUF2vVTfMrsrI4Tfn00Ja4FddFp8z+y3Ol5mpXcM9mtn10 +hnJcXWIxd+ApLbfsugEmZst00GsbvyKVDV24owJLllFMnHMZtymdbd068KNt2w4j +gbTNpl2w7e/8mE+fcdTm9Q3sXPDTTbbW/Lbvgc5oPpazPBNfVg/JkC+kzxjTbBiL +fooi+SrjTdJ/Bqk6I0+sntxq0O8xd/mzLdwqGuHoUm001pzw8E6SsmaErQdZVwYv +2tm7lvRU0iTzTRW/UzbP/g1g3AM7AmaPJfKvSwIDAQABAoIBAASGZONEoIO76SW2 +yxSBmh4nLSKKHueS5L4X+53xRQ81DMrW0xYTLqN7PNtdN5vq+k16sDg46XpFq2BU +0WZh4lxEbzuhubqGHa+mind5NoME7aJKLox4yvzn+u/dC3fs+09WrpvtCFMdltXp +sPEwL4a7iSNkSRPwD1jv8kpB6erqmJ9+SY35hLqtf0EsAxgY8AGds9MxvAk2/B7/ +Kor7wuKHriPOUaDoRigpLSOWrw4pOPitSBhbUcjODJpD29HuDW8mmsjv803MJTGJ +wyd74Qlbcz34GBIWviUXUWfljuGL4M6gG3sXBYsKqBcGNvilpyhdwrmZGFHg6ic3 +iVuyX8ECgYEA/3bfbATAUF3z4KHnBDAWWYzl29AVcvmXuwK8mw2g6nR3GhLg6e+6 +hDZADrl+/F4vb5eQ1JyK+scI/WB8KhjdLLNv5ygPXVKIlc4QkgMwX6jEbHXbI4h4 +PB2zQ8298Pf6XkVPdKkZe4E4Psmxqqxxb736d/iJEs9RFAC8MtK33KkCgYEA4HPc +iEJa1KQE5W/A/czC66884TTpJ5v/Ht7WXABt0+JOqxRrQt+zhrzxgjtEpGkfslnl +JfAIBiQ+PgkXSDVczcjPJ8a1B73tYI8LBqEwt6UQ/ktMEOIYSVmr1yp3UJfmoN59 +3WE4AdAWvk/upOJC5gIcCUHgvmSgW5K9lbS7UNMCgYEAn0jgT/q6aqHaMSLh5zOQ +i++VVrRs206S89DmBou93NIXfRNuV47ZLhyhXkz8x6B2VU8cx+R/p5O4oDurz5fH +OFr9mBTbV6Xhcf6VSGVioRKavsHRjFtIFLu0Db/YAcqsumDfBO926xIMHuIlvDRf +WnwLEwjNdwP7GszGi63lZFkCgYBLoV7HHyzCB/6KXQy3uH5ZsjOeYxjJOvxNJ6Mt +Xwui4NfHN9sorn4swY/TZSstBysiCr52+RmLED1U4/VPZIO/55E+AuvPDwVkiu6Z +LklfKCTAuxiHe9fZJ3kKyIlpw7V3sWDR7sdTfQ2c1QxBzOfj3wQZbnRPU1LhaGGv +hzWy9wKBgQCD1VELtjlzZZ4A+9lOOPLM90OHggmfVtsk2dGKGTg/5e+VWeqX8z7g +NXdSWKTG8S0wp/Yoc+o/cqwjes5BXXPry35wB9pFsGm415cGIOtM5oCwUJfsZ0H6 +0shHW67yAvWwqSxA5DjNnfBPGRSiDueSkGrF2ZxLbKhVAws/XccSUA== -----END RSA PRIVATE KEY----- diff --git a/pkg/security/securitytest/test_certs/client.testuser2.crt b/pkg/security/securitytest/test_certs/client.testuser2.crt index e3b3e6adb399..c35f0a72143c 100644 --- a/pkg/security/securitytest/test_certs/client.testuser2.crt +++ b/pkg/security/securitytest/test_certs/client.testuser2.crt @@ -1,19 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIDJTCCAg2gAwIBAgIQEJ94O2WOO8dJFysb3a6ggTANBgkqhkiG9w0BAQsFADAr -MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y -MTEwMDUyMTU5MDBaFw0yNjEwMTAyMTU5MDBaMCgxEjAQBgNVBAoTCUNvY2tyb2Fj -aDESMBAGA1UEAxMJdGVzdHVzZXIyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEAqw6D0/F1ltr13dI0uUzOhbdjfuiNX+Y23TJeLFGLbI4Eb+M7HmmdrNIW -cQsIKuOznieYl7P1UlxwDy/ASr4L11z0jVNng7HxyV0VgXu508GJLqHpN6DiY7+I -jRyRT5I2GvvXZJinxZGMQ9EMDDf6qzaLyaWt0QOJH2Dnoz/DQJJqbBqOo6YpoMOD -Okfa2qXdbo5Rf++tJetMAAywid5TltqBFMSTBf2J4jATD0O0S51guS22ITJFTqbc -qxRR5t7/iWbEgCXfcb0No9cZJw5/FrO8wx5zQM5OS29Ij25bd3y//rylnF9np+yX -h8Id5eugd1hY0wvBb0KGcUL1J62qEwIDAQABo0gwRjAOBgNVHQ8BAf8EBAMCBaAw -EwYDVR0lBAwwCgYIKwYBBQUHAwIwHwYDVR0jBBgwFoAUEA9E+FbQm574WzyzLFWs -PSDjsYYwDQYJKoZIhvcNAQELBQADggEBADl6utEAYJiD0pT8OPjT/hDk0bhv2Bxs -5M71L2jas7Va/Yqg0GqqMZvf9nGyaY2imuyDqu+5BFc0vB7l6DqwpY9VZeyvwOVQ -eGn8YXocrUFEFpjoJ2H5lnuC1xKQJvsHwvgwLNTCWN9U5aBW36VFZnm+UVXnTZA3 -TUaSaa28eTVS3fov1TzfHlrlWu3rGq0m3FacKG8mpdRpSEx5iFpM6qkzo8LC4Iwp -Xua9g21ckeOBWhiFPSPuOC892JcH40/8kvQTXrmQEN5erzIkqtGiy7xFvbYVS+GH -avmt5r4NH4Hzm9tNbpNsRPTLNXlP8zAyaHg2LjSYgYRx1xQEO8GzJHI= +MIIDJjCCAg6gAwIBAgIRAKFvTOA9+71Z6FxeIzdsUMgwDQYJKoZIhvcNAQELBQAw +KzESMBAGA1UEChMJQ29ja3JvYWNoMRUwEwYDVQQDEwxDb2Nrcm9hY2ggQ0EwHhcN +MjIwMTEwMTkwMTIwWhcNMjcwMTE1MTkwMTIwWjAoMRIwEAYDVQQKEwlDb2Nrcm9h +Y2gxEjAQBgNVBAMTCXRlc3R1c2VyMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALiduQvG/etVIE4hrs6m9IR/+fdpVhcEmBetr0h4C9Du84bQn4q+zAhB +j/hrUaN6HR01hFaNLU0RpJBdTi8A/AwFX8KT0BV6EYSRkAbrt5qvw9kmd+AdHogV +YVSGH0amHUH9Z9VBy5/mmO9ONaor/rCvuXdq3YLXTrEyF4q8ChaE0LQZ4+knMX6Z +0CnQXFAXRQE+jFpBhklqgwIpeaY1wQjI02WZtxTL15j1z1BFrDNlF16yPoNVUjOt +1p8LM1418PvOE+KWWUAP1WmxOzGyCFv3VQiCxFfUeFANt4DqQnMufZURDnxTLydm +VFvqJPTTPj0JHoxoWBPzCdZX8jkS0xkCAwEAAaNIMEYwDgYDVR0PAQH/BAQDAgWg +MBMGA1UdJQQMMAoGCCsGAQUFBwMCMB8GA1UdIwQYMBaAFIs5LmPgR+PGr0TVxJ6/ +zRIvXLXqMA0GCSqGSIb3DQEBCwUAA4IBAQC/2EZcZwiB0zxVaUh+ZssB9FZbipSZ +kENeSLTNAG3POM/4FdvkWh+YsLJfu6rASmzde9lt5gDb/3Jo/i453X33n/SUNaLf +ISBYM8H1BraGLD0IyySy69JWL760YyM3CxQurf+Gl/27FfWx2mj0q2LcTzV60NED +gL+BMKvwCd83UeYhzcnDmZ52QIB5rwifqfDhEWpdoIkJAtki5bZiCSD4gc61TuPi +Ov3x/o+PgdD1bceemv90MZv9Y3oE/X6Ft9bU34vWB08ZJqliY8zjBmwWfCK5Ke18 +NO9gDuBh6MgyOMylERufsrt5QEcwGODH9DQkNqxq1njL4bKiqFuHUDeR -----END CERTIFICATE----- diff --git a/pkg/security/securitytest/test_certs/client.testuser2.key b/pkg/security/securitytest/test_certs/client.testuser2.key index 1f3d5a26fb6c..8483635b843f 100644 --- a/pkg/security/securitytest/test_certs/client.testuser2.key +++ b/pkg/security/securitytest/test_certs/client.testuser2.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAqw6D0/F1ltr13dI0uUzOhbdjfuiNX+Y23TJeLFGLbI4Eb+M7 -HmmdrNIWcQsIKuOznieYl7P1UlxwDy/ASr4L11z0jVNng7HxyV0VgXu508GJLqHp -N6DiY7+IjRyRT5I2GvvXZJinxZGMQ9EMDDf6qzaLyaWt0QOJH2Dnoz/DQJJqbBqO -o6YpoMODOkfa2qXdbo5Rf++tJetMAAywid5TltqBFMSTBf2J4jATD0O0S51guS22 -ITJFTqbcqxRR5t7/iWbEgCXfcb0No9cZJw5/FrO8wx5zQM5OS29Ij25bd3y//ryl -nF9np+yXh8Id5eugd1hY0wvBb0KGcUL1J62qEwIDAQABAoIBAESNv6V8Oq0nuauZ -qAmvT4FletmMp8QMRmal3foTADaUQBD7V2Eb4NY3KElLq41MzlqO8eeydpn4c+yS -LZWGzsYT+ep9x5mPAiC6TnkjNdk40hq/RvFFaBWj2Dz9evsR1N9BmRQ23d4fM8Cm -Ehb/hFWDucgpvxxqN1U9B/h0AHNq0qvbQk8KhZ3pkAdm6OPiarLj8+JIBz/Isqfq -akRDd6x/onvYCjhmGv/quDJAoOXUrq9L9o6GkhkwpGp34CQ2ZBHZwdyNlsQqzrgO -eziyaSDICSPfr869dKcgvIEx+6iX4vbFYh8VHkPWPSgLt7Vk8uDBx9n11D20E4l2 -OSS5hAkCgYEAypqDisbqmFHIaXttcH25LvhHEjcx0cmjTy4tY+rrXt/v8v16Jkh9 -LdvyQWIY4PrnnaAyxdW1v9yk5Nf7792DOdBhlYq57qjER+aVVdY368Kqh8AOPNJh -bWUR86morfLNSeHhecO04gwnDhKarpB9CWV8hAa+/7MdGng5LiugcI8CgYEA2COP -rxJhey3UiHM87vNsDBFBiatmU0l9Z9L2lSjocxAROwYkFwXC1q2T1WPm93iyDaQy -hrFyUe0QTzkhu+Q8yNRhmIN8nT9cVlMmsX8ylkyWemaAgjzmMXiXmgXJI6xb/nbV -1z/N9Tq6Ow+uZEKfvZoN4osnvwUoDRKWQI6bqD0CgYA9IU3bS5VxDAwKN6GXtXdw -gpz/FARGJi7OL0KmyTTnVW4SxQRcFLcgZSK9M5oKF+pWlwV6x7alAzAFlF2BzDZR -9VvcQRVEu0st3pbkZVROIFhPyVUCaP/YgAxglqo8gewLBMqBcNlc8wm9p0NgtFvl -v+dH9rkeca6wU+H82lD4ZQKBgH9/2s6nxSFo06r6b8N1jRNBSqvvlBkljPimFhe/ -XgAiRkfPQ5LhLyJ8mmyS1ji91ir95WBXZNy3P5u8VA4GgWTnA/t39Ra9vbaeroCe -TDUlUM87kCtDyXb9cFDHgPOuRUZCy7qCRPiYgCs/ANzcAGdUbURqWjPw0sODZ2cF -uQTxAoGASE4BrjwNetr3ZlhNxNNG0qwZb3UJBzZ1PeyyU0eF3exfgROxsXTfLOAl -jRl6oDxY4UP8CNYumbJ0S57fDux/ASLuR9xkuPQW2CVKYKzvQ5ZfQNnPQxmUAy9x -ye/ABIhQUnp40Yof31RzkRUlWBxPra3wS3KUW3xKg8nkH3RfrSI= +MIIEpQIBAAKCAQEAuJ25C8b961UgTiGuzqb0hH/592lWFwSYF62vSHgL0O7zhtCf +ir7MCEGP+GtRo3odHTWEVo0tTRGkkF1OLwD8DAVfwpPQFXoRhJGQBuu3mq/D2SZ3 +4B0eiBVhVIYfRqYdQf1n1UHLn+aY7041qiv+sK+5d2rdgtdOsTIXirwKFoTQtBnj +6ScxfpnQKdBcUBdFAT6MWkGGSWqDAil5pjXBCMjTZZm3FMvXmPXPUEWsM2UXXrI+ +g1VSM63WnwszXjXw+84T4pZZQA/VabE7MbIIW/dVCILEV9R4UA23gOpCcy59lREO +fFMvJ2ZUW+ok9NM+PQkejGhYE/MJ1lfyORLTGQIDAQABAoIBAQCpl9phw0+HXA/t +NmwLUrvU7GuIqK95PbMqLVeUTxMrwBbehc/J+TQdcXz8TDoW3xrXtk335ID1B3wR +UmV8MH9Z26X4bSj+UcC986pHcUqdQ1G6ref5bUaa5Gkg6ITatcay1EMKWQLhxUhA +rawGw5uYXBUYaodKpteXV9jgjZUG0TJsecpyP1igRLEiU9M5P+IP5QhrPGsM+QPz +X+tQlin4utROuaW3avPxBuS/UaJ9hGQjgurpR9WBwAf428/rTxArSzmPHWkh+S5x +WK3MfZn0YSq3RTIWeIb6spni0CDzOx4hsGUwk1zxEHptvmcr/o3DpqHsPb/tAown +SCw7rvkxAoGBAMX6qC/qsLRHTQzERhTg46fNf01NI/m9Ua8+X/z2vLNJiM0YmCdS +Tuuu1U9uRFNaWD/wmzczHW0PFxdslRMxootc6rCIgkFwuCl8MJaY3tkDDujoelBO +SSCeXNcx3VxotpueoiQlh4oy2PPXcj88uJTCPbg4tT8sfxvc8yryF6ujAoGBAO64 +gfwyQ57YGIXuTdtoHNvy3LilcWjswgb1sQiu2rDIDqkaT7t3yEX+Ukbgdt7Ufx3q +QitwdJZ4ZuBtuJwjZTQDS0HnDVbl+KzrpvwI0EzQsBfLIBKhDENsQQ7BAEMU+r4s +L/zKHPCtQx5cUu7pJnnHCyAIwpMZ0o/Sj6SLu/ITAoGAbBGdSTq05lOdbYCeOLth +ybFU8h9Pqf1730sPHoiZDMzxDfOE6sH1LpWq/sbUKSPB1HVEZOdUArogArtUzLtl +XOmFeoOphos/G/Ycl7guvQr8UorEaZ2yMUoAp78idFT8iQoYu954lCmZX9GVHYvJ +vfohsrPRzABACjebzS+FWD0CgYEArt37YclUHVyAgkMxRyJ52WiK5LtUWx7rdnut +ZgXn7o1tp9O9Sj8RNqx4irDMgqg4QaqjM/zZeovSGF5nWADZloM/MpRVAi3NvqWU +mZS/OTW5eIR0BxFv0UfQVEVusrUAhCQMNum6z1asDuZkXdvuMlBqxtmD5ouI4Y/F +ZyxwzX8CgYEAjuuYnHXJWhRA8JPns8PZ4y6vfI3RUkj4sjBZAzxN3Zwn1FTnbKQf +ODG2HuOCXbvLe/3vqyItnHTx0yeIc1CE9Fz6MjLUcmTVuwdcIY2VprTok0SMIsea +oX240ucbS2rp3p1j/MZXXHQJtRzf1BzsylpdxShQzeb7M5g/XBlQqbU= -----END RSA PRIVATE KEY----- diff --git a/pkg/security/securitytest/test_certs/node.crt b/pkg/security/securitytest/test_certs/node.crt index dd0c3cb21b8c..2c513d3a0930 100644 --- a/pkg/security/securitytest/test_certs/node.crt +++ b/pkg/security/securitytest/test_certs/node.crt @@ -1,21 +1,21 @@ -----BEGIN CERTIFICATE----- -MIIDZDCCAkygAwIBAgIRAOYT6VqP/TnPG1KfyBnhQd8wDQYJKoZIhvcNAQELBQAw +MIIDZDCCAkygAwIBAgIRAN83bo8ZydHRqc6pm2T0rIgwDQYJKoZIhvcNAQELBQAw KzESMBAGA1UEChMJQ29ja3JvYWNoMRUwEwYDVQQDEwxDb2Nrcm9hY2ggQ0EwHhcN -MjExMDA1MjE1ODU5WhcNMjYxMDEwMjE1ODU5WjAjMRIwEAYDVQQKEwlDb2Nrcm9h +MjIwMTEwMTkwMTE5WhcNMjcwMTE1MTkwMTE5WjAjMRIwEAYDVQQKEwlDb2Nrcm9h Y2gxDTALBgNVBAMTBG5vZGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQDI5mwFSZ87Y87Pa3YSsHeD1TGqtzAyoFrdeW6RDtzD5UhjCennOM1epmNnW/zQ -O4ixX+mQHb9FUmlopGTvJqcZxqyTRuwVpABuhw/guYmiYjVMwfzHIt6X3L4/Kd6J -lICSo3zQKG9AVHmdt88M4r4VmdJYuOf+npoAxSkdclq/H7KtsM1uEDlwvM0cjrRa -K85iYFMb1WiYVpUt0ZGAlC0ISCj9RIZlVewHfCmX1iuiY/8DGNnOahRaDHjOoIzS -f2IOm8XLedttl2tvHTDADHAqYBhmfX4PXr6xHcwtSqQ1iCZ7NnkdQA320CUkCxIT -O9E9XARHdpDiiosZquIMbwW9AgMBAAGjgYowgYcwDgYDVR0PAQH/BAQDAgWgMB0G -A1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAfBgNVHSMEGDAWgBQQD0T4VtCb -nvhbPLMsVaw9IOOxhjA1BgNVHREELjAsgglsb2NhbGhvc3SCByoubG9jYWyHBH8A -AAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQELBQADggEBALPIWm3O7kiR -NhkEsmQeZdV8x3lrmxhMWnzX4N/9xhPc6ybGesvqVv/cU1WcBVAwCgfeTJKaBGn/ -J1sHbtC8K5RfDeRDThfuE3F3U43ZfyzD5RU6yjPRI7Ih2dEU9/nUJBf44twudgMQ -iPmsnsUiGoduo8AvxHBQJTAWQkIs0/zI1e/a+8qknLoYZ2mZaWqBdb7songZWWIa -76J/zNmRcsSrsEKcfu6OYmYHPbGE3wFk8+31Rt1DMBpELqpcDjvGi/Ojof6ao1It -2ERip7nvPFyGaP2JcAQ9h+8NA4xNBIkds3/c1508e9lzB8SxVatxvPuALjszD/J/ -xWSaqjLIhnY= +AQC2ymvgVD1xJh7Gy8qNJrlVgM3TNezpnW5hbPa6NHEV0Asj4+hq2hEaDBsG1aUA +MA6B355uzeonlcSLffUogKi4vMCjHS6s/Nv5Nwo/JowtS9Af4udrnQN30R6dDJpU +NprHFv2BRCRnWmQWnq8ALsitB6svi4QJf9Q3qwCgoOTaZody57glus6ABZsmIsPg +BaTqKyzkif8GVVfZrGBJKp1vnl5R493RV7vP8t6tIZd/PW2uw90LTaVdqhjBYggm +jVy1pjklXOVX0XdQIdd588868Qd8TLzV8vvR26uPyKZ7rCQltCvf01N3fgo4aiz8 +4xZ9VAv82Oo0IEbFc+S1IENFAgMBAAGjgYowgYcwDgYDVR0PAQH/BAQDAgWgMB0G +A1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAfBgNVHSMEGDAWgBSLOS5j4Efj +xq9E1cSev80SL1y16jA1BgNVHREELjAsgglsb2NhbGhvc3SCByoubG9jYWyHBH8A +AAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQELBQADggEBALuPAJKeSDhU +Vt3w3EeSlKnxL34WHxX8mOZzGkZAaZyBFUvFAdglVStLS4U3tKmtOH6b9vk41pbo +n8vW56aChzAoyJITEpeZriBXIPb48WIb2L039nHFW/dnXJrgV1EPk+7XdGvDBeqO +VgHqrfnLZO6wP/yMWa48tAa6PRH+x62VvjEd/fm6ZxKiy62jnhNV874kPhT4H/jE +vD790LHzzilpKp2Rjv18BbnwVu3wv93V5Ka9FXYwP7OMtMArRFQPKQBnAYyCxNAA +G5V042U1CbTLKM+xbJ7AL7ZhF1Aqn4c0x2EX08D4pF4of+9ubpSvrbjSEseZjrBx +txBzRZsA8rQ= -----END CERTIFICATE----- diff --git a/pkg/security/securitytest/test_certs/node.key b/pkg/security/securitytest/test_certs/node.key index a0ffaa050ceb..c1a9595c7c52 100644 --- a/pkg/security/securitytest/test_certs/node.key +++ b/pkg/security/securitytest/test_certs/node.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAyOZsBUmfO2POz2t2ErB3g9UxqrcwMqBa3XlukQ7cw+VIYwnp -5zjNXqZjZ1v80DuIsV/pkB2/RVJpaKRk7yanGcask0bsFaQAbocP4LmJomI1TMH8 -xyLel9y+PyneiZSAkqN80ChvQFR5nbfPDOK+FZnSWLjn/p6aAMUpHXJavx+yrbDN -bhA5cLzNHI60WivOYmBTG9VomFaVLdGRgJQtCEgo/USGZVXsB3wpl9YromP/AxjZ -zmoUWgx4zqCM0n9iDpvFy3nbbZdrbx0wwAxwKmAYZn1+D16+sR3MLUqkNYgmezZ5 -HUAN9tAlJAsSEzvRPVwER3aQ4oqLGariDG8FvQIDAQABAoIBAQC1tRPGxIwtH10S -5lNpHAs9Rjsh+Yayg68r2oVS1mp/FIagbXsOE6ZNEZmfBuhnPzGnnOSX4Q/7F0sX -xnk0hXVmLvgwTVmDSU7KqYaG78m5AbLLtjl3s+Y43QOGBcyuC48vEof1jlzxhnZp -XLL9KKhcyUAj1zqeZb1NLQa/B4tkiR7gX5kUY1Xs45Ihl8zGA2/MkyHEbO2a3dkF -poGMhwf9xUoAs+HZ08QsTLEAFa66epy2OoRniWAIlkDnrGiRZCkkAgF2v6SafdVU -lg+gJhtyJXwIhf5GTgVys60eBid5uj5xk6AdvX30f1XDhmPrYOzO8bCVoVlRVWzl -0bj7f0IBAoGBAM3sUZOZl0jHdMQfUtF1bZT4Wbsbni+CRPvtHBd1rLhpurKiXpJw -/v/i2Z/N5m1Z67vLn+1A8AQ8lY8nB8w21F68BrpowCmZtVjJgi0W+rDN6Dfo496b -0UZ1XCHdr+9DmPb1koFeCLzZA1j7toM4JrRymGOLDCvvUVBjYZ2vRaSdAoGBAPnB -ZVXDjuvbmOvkkvFT4+knZnXm07E34OaLN/EfyYhlWWD6JkHXMH5yIJtAJ+R+tTZy -XdexVQ++bJfQaYD+Sd0yF2VrmZmq9xAY1Uy1Fdfs4l5doeKzvuUUsU54FURuxjDn -oiEZAVVbhLdG3z6+UNTtmOiWDnXv7e9KygGkYsuhAoGBAKy2E2mffaJ+H7JKr1Sc -j+9NKT1kmZUxRpihs1j9yoLVstAWnQloGogWBP8iW6zFqZDV5WnBeE2lXq81RjYC -mrZmL6JzLxD1KYOxZyxYRKS73pPeEhjoSWs2DbMd02zzHj2rlFhKvTtfLI6ad+aw -1s2r+6IpwmirDWFGKV5u3hOFAoGBAPkdfw8pjTASjJiWGD9gNs9Dfn6Ra4uzub8b -FASNeT50IM+LE4fOtU8aVQ2l02SlQej58EKJXdfjT3up5OJGQbTBFvcReVb+YYqL -yo3FffEvFaecazGtqfXjSuuC+WuVtlfnYx7ySxMxwtHwfhKeUA/iogc0TV4zoDdz -eMRkjCqhAoGAW7NagzeejaqRmWaTVfeHnLtnsOQVgIe/1Dkr5wYxuVNMAai1sdh1 -RUAu2gzrExDGp8RGgFJih9y5vC3LuAz/WUZYoEb4xxNEIEWq7tXdO6HIuG7EV26S -NPvtNgv+1qicUlmMzwX6y2vxc6+xm9EAXRxDYgCKYXezMj79SJR4jis= +MIIEogIBAAKCAQEAtspr4FQ9cSYexsvKjSa5VYDN0zXs6Z1uYWz2ujRxFdALI+Po +atoRGgwbBtWlADAOgd+ebs3qJ5XEi331KICouLzAox0urPzb+TcKPyaMLUvQH+Ln +a50Dd9EenQyaVDaaxxb9gUQkZ1pkFp6vAC7IrQerL4uECX/UN6sAoKDk2maHcue4 +JbrOgAWbJiLD4AWk6iss5In/BlVX2axgSSqdb55eUePd0Ve7z/LerSGXfz1trsPd +C02lXaoYwWIIJo1ctaY5JVzlV9F3UCHXefPPOvEHfEy81fL70durj8ime6wkJbQr +39NTd34KOGos/OMWfVQL/NjqNCBGxXPktSBDRQIDAQABAoIBADd9VahBCnHp55fj +z7Zv1f1d353JlgUJVLPgtzmpp9a+VFNt4WVmk6B7oky92Jwo+o50iw0KF5Yywfqy +nxTPkeia7EPYHQ5IqKKMEeE/23f4ttKnOCeT/7SE2C1G4SbFeNENaqGuRPrXFuFD +BM8iZKsaU95YFRopIwxPLh6VGUQvLha1/kP53aV8cKOiVCDN1L6KaVNn9vUW6Caf +mg5+fMW4u+CGEYWeI738EEkqjMaYsSCR08HLC3k0LW+kMidpvsPKOm5ZMEAMztX1 +yc8GAaFBq6nz5+BtNOG0Hocg4rXFy6+Tnil/NEaDnkpVPAMRpO5trGVTrNPL057k +WKSoZtUCgYEA6lwuyDui2Vrvk54UoK2nI0DwLeMNYl3ETNxfJYT/kW2n1uzT9sJ1 +5KjzTsSIuKdZYq+aqSi2qESUPcv/1GE/BrdQqh0AlKegAdY6lp9ej2LgGytkmFxI +3GPoS/ABpp8OHlH3q+fMmAgl0z4qWTwHWUvGpV8eQDdBzWUdUIgrM2sCgYEAx6s9 +GaGlxpkwWtXGkjqrVOPaYBgG6GIhnqNIkBBmwwUsYoALzBd+tERM2qPwIEkrtLdm +MNCNUlru4lV/AKCSB6LVR3tMaoKnd/0EA+pftkK/REH8aTw1QXPUxYGY/LawqXWA +2WjlUWw7ZNna3SfXgW+KWn6xE2E9jBBKIQYYwA8CgYAf6YWFRnmaV0OgOjpc1siX +iFQsK2q7JkGApdFe7olOaDwejAkg5MHg7RCUfTaQzljhkz/gIOceapg1Af5IESXf +6D5Xq7NUiq7DEUTRFcpug+w4RuRfytExEXmkPX48DhSCFG9BPUMiwJlF9oUVuZLW +mfbmtdkMrXmMWmRvfttDGQKBgGVfGfk+aYTn13X2nQc2xC+oMwGgkTlAQSNicP+7 +ZADVSpCDw/mNYCWzm3VR0CMEIy1wA3D7IRTT1/6PO5ic7Sb1U+Ujw0s8JDw19+jp +AEjvoF3ORpFDISKm5TqVLo/3TL/sSUuYBv0MvybXuFeZ177+WzbQpaRaNT48MvaL +OtufAoGAe7nk37VP/HZ/xqMYUYqdQ6Udn28WK6uVZQ1wG6pF4rUXjJ7qzQTzvYps +cLK50EQH/g1W59BU72dwHgOkC9pLzTr3n2bmL15zcW0LjZ6hob2b/f7yPYz1Euf2 +HMp85FpxKx5Jhs+mwxb0XGAkQj9iUc4GCInB7AkColQBaY921V0= -----END RSA PRIVATE KEY----- diff --git a/pkg/security/securitytest/test_certs/regenerate.sh b/pkg/security/securitytest/test_certs/regenerate.sh index 9f3c4066a8a5..06936c49b49e 100755 --- a/pkg/security/securitytest/test_certs/regenerate.sh +++ b/pkg/security/securitytest/test_certs/regenerate.sh @@ -13,6 +13,7 @@ rm -f "${dir_n}"/*.{crt,key} ./cockroach mt cert --certs-dir="${dir_n}" --ca-key="${dir_n}/ca-client-tenant.key" create-tenant-client-ca for id in 10 11 20; do ./cockroach mt cert --certs-dir="${dir_n}" --ca-key="${dir_n}/ca-client-tenant.key" create-tenant-client "${id}" 127.0.0.1 ::1 localhost *.local +./cockroach mt cert --certs-dir="${dir_n}" create-tenant-signing "${id}" done make generate PKG=./pkg/security/securitytest diff --git a/pkg/security/securitytest/test_certs/tenant-signing.10.crt b/pkg/security/securitytest/test_certs/tenant-signing.10.crt new file mode 100644 index 000000000000..58fa08c455a0 --- /dev/null +++ b/pkg/security/securitytest/test_certs/tenant-signing.10.crt @@ -0,0 +1,9 @@ +-----BEGIN CERTIFICATE----- +MIIBJjCB2aADAgECAhEAwYXboWr0rlxwzYhqt/W0FDAFBgMrZXAwKzESMBAGA1UE +ChMJQ29ja3JvYWNoMRUwEwYDVQQDEwxDb2Nrcm9hY2ggQ0EwHhcNMjIwMTEwMTkw +MTIxWhcNMjcwMTE1MTkwMTIxWjArMRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNV +BAMTDENvY2tyb2FjaCBDQTAqMAUGAytlcAMhAF0oSVEnZtHsrEsPTaVLsqP+N1s7 +QJFlo+i8Yk0ewtTSoxIwEDAOBgNVHQ8BAf8EBAMCBaAwBQYDK2VwA0EAoUoP7SQF +OXPyWEsDcxXxIG7MRDhVV90R7AYUTjOv7OB7foyTtxCXfTZCi9PLL5JQ92qKCgz/ +MB/5DKB+KudkDA== +-----END CERTIFICATE----- diff --git a/pkg/security/securitytest/test_certs/tenant-signing.10.key b/pkg/security/securitytest/test_certs/tenant-signing.10.key new file mode 100644 index 000000000000..6b3d066ac488 --- /dev/null +++ b/pkg/security/securitytest/test_certs/tenant-signing.10.key @@ -0,0 +1,3 @@ +-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEIFAgC7O1AQOgd49BJJGHO3PezuEGV5OeG7DnoLiaPE5L +-----END PRIVATE KEY----- diff --git a/pkg/security/securitytest/test_certs/tenant-signing.11.crt b/pkg/security/securitytest/test_certs/tenant-signing.11.crt new file mode 100644 index 000000000000..cd3be4714e4a --- /dev/null +++ b/pkg/security/securitytest/test_certs/tenant-signing.11.crt @@ -0,0 +1,9 @@ +-----BEGIN CERTIFICATE----- +MIIBJTCB2KADAgECAhBqyjNPz4jSnANHz/6OIzgaMAUGAytlcDArMRIwEAYDVQQK +EwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0yMjAxMTAxOTAx +MjFaFw0yNzAxMTUxOTAxMjFaMCsxEjAQBgNVBAoTCUNvY2tyb2FjaDEVMBMGA1UE +AxMMQ29ja3JvYWNoIENBMCowBQYDK2VwAyEA6LLJUILUOGqH/GBtYwC/5SeDYwbw +xjrJUeaZA2l9Ia+jEjAQMA4GA1UdDwEB/wQEAwIFoDAFBgMrZXADQQDNiES6JpRA +R5S6h/6Pz/MOT4uS7eZ91JY/YJoaaw2t4+QvBXqm+y57t3DBf2EGUK3SHmdFgHPi +etrC0zm+bSgM +-----END CERTIFICATE----- diff --git a/pkg/security/securitytest/test_certs/tenant-signing.11.key b/pkg/security/securitytest/test_certs/tenant-signing.11.key new file mode 100644 index 000000000000..a11bf6881898 --- /dev/null +++ b/pkg/security/securitytest/test_certs/tenant-signing.11.key @@ -0,0 +1,3 @@ +-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEILAan5LFiNxCE40Ac7oGWUS1nQuBcYHY28bJu0booQI0 +-----END PRIVATE KEY----- diff --git a/pkg/security/securitytest/test_certs/tenant-signing.20.crt b/pkg/security/securitytest/test_certs/tenant-signing.20.crt new file mode 100644 index 000000000000..c4c88f3e056b --- /dev/null +++ b/pkg/security/securitytest/test_certs/tenant-signing.20.crt @@ -0,0 +1,9 @@ +-----BEGIN CERTIFICATE----- +MIIBJjCB2aADAgECAhEAuH7vVx4nwdKhJsaZRGuirDAFBgMrZXAwKzESMBAGA1UE +ChMJQ29ja3JvYWNoMRUwEwYDVQQDEwxDb2Nrcm9hY2ggQ0EwHhcNMjIwMTEwMTkw +MTIxWhcNMjcwMTE1MTkwMTIxWjArMRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNV +BAMTDENvY2tyb2FjaCBDQTAqMAUGAytlcAMhABI/EmtjPSxifwP8zHg09u59Ai2v +pMbQ1R0Tf4mwZMw1oxIwEDAOBgNVHQ8BAf8EBAMCBaAwBQYDK2VwA0EAhGkpAAeX +R92VWUazcfN+Dq9IX8PJ8fxgb3KU+JuK652uog1rwM4/NK7RfMtlIH3dQ8/GMBpR +SUCl7JGgrI+LDQ== +-----END CERTIFICATE----- diff --git a/pkg/security/securitytest/test_certs/tenant-signing.20.key b/pkg/security/securitytest/test_certs/tenant-signing.20.key new file mode 100644 index 000000000000..767343a82f92 --- /dev/null +++ b/pkg/security/securitytest/test_certs/tenant-signing.20.key @@ -0,0 +1,3 @@ +-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEIHPb1nVztKWqTGLk22FoU23W8e9q469cYQd/CPZuKaWS +-----END PRIVATE KEY----- diff --git a/pkg/security/x509.go b/pkg/security/x509.go index fb04b6c8dc6c..b530298d6f65 100644 --- a/pkg/security/x509.go +++ b/pkg/security/x509.go @@ -276,3 +276,35 @@ func GenerateClientCert( return certBytes, nil } + +// GenerateTenantSigningCert generates a signing certificate and returns the +// cert bytes. Takes in the signing keypair and the certificate lifetime. +func GenerateTenantSigningCert( + publicKey crypto.PublicKey, privateKey crypto.PrivateKey, lifetime time.Duration, tenantID uint64, +) ([]byte, error) { + now := timeutil.Now() + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: fmt.Sprintf("Tenant %d Token Signing Certificate", tenantID), + }, + SerialNumber: big.NewInt(1), // The serial number does not matter because we are not using a certificate authority. + BasicConstraintsValid: true, + IsCA: false, // This certificate CANNOT sign other certificates. + PublicKey: publicKey, + NotBefore: now.Add(validFrom), + NotAfter: now.Add(lifetime), + KeyUsage: x509.KeyUsageDigitalSignature, // This certificate can ONLY make signatures. + } + + certBytes, err := x509.CreateCertificate( + rand.Reader, + template, + template, + publicKey, + privateKey) + if err != nil { + return nil, err + } + + return certBytes, nil +} diff --git a/pkg/server/BUILD.bazel b/pkg/server/BUILD.bazel index eb49543c15f1..6c195218fdc2 100644 --- a/pkg/server/BUILD.bazel +++ b/pkg/server/BUILD.bazel @@ -130,6 +130,7 @@ go_library( "//pkg/sql/catalog/lease", "//pkg/sql/catalog/systemschema", "//pkg/sql/colexec", + "//pkg/sql/commenter", "//pkg/sql/contention", "//pkg/sql/distsql", "//pkg/sql/execinfra", @@ -200,7 +201,7 @@ go_library( "//pkg/util/tracing/tracingservicepb", "//pkg/util/uuid", "@com_github_cenkalti_backoff//:backoff", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_circuitbreaker//:circuitbreaker", "@com_github_cockroachdb_cmux//:cmux", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/server/admin.go b/pkg/server/admin.go index a98d5d6f38d9..19ce8da00ca6 100644 --- a/pkg/server/admin.go +++ b/pkg/server/admin.go @@ -22,7 +22,7 @@ import ( "strings" "time" - apd "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" @@ -1662,7 +1662,7 @@ func (s *adminServer) Settings( ) (*serverpb.SettingsResponse, error) { keys := req.Keys if len(keys) == 0 { - keys = settings.Keys() + keys = settings.Keys(settings.ForSystemTenant) } _, isAdmin, err := s.getUserAndRole(ctx) @@ -1686,7 +1686,7 @@ func (s *adminServer) Settings( resp := serverpb.SettingsResponse{KeyValues: make(map[string]serverpb.SettingsResponse_Value)} for _, k := range keys { - v, ok := settings.Lookup(k, lookupPurpose) + v, ok := settings.Lookup(k, lookupPurpose, settings.ForSystemTenant) if !ok { continue } @@ -3087,25 +3087,78 @@ func (c *adminPrivilegeChecker) requireAdminUser( return userName, nil } -func (c *adminPrivilegeChecker) requireViewActivityPermission( - ctx context.Context, -) (userName security.SQLUsername, err error) { +func (c *adminPrivilegeChecker) requireViewActivityPermission(ctx context.Context) (err error) { userName, isAdmin, err := c.getUserAndRole(ctx) if err != nil { - return userName, err + return err } if !isAdmin { hasViewActivity, err := c.hasRoleOption(ctx, userName, roleoption.VIEWACTIVITY) if err != nil { - return userName, err + return err } + if !hasViewActivity { - return userName, status.Errorf( + return status.Errorf( codes.PermissionDenied, "this operation requires the %s role option", roleoption.VIEWACTIVITY) } } - return userName, nil + return nil +} + +func (c *adminPrivilegeChecker) requireViewActivityOrViewActivityRedactedPermission( + ctx context.Context, +) (err error) { + userName, isAdmin, err := c.getUserAndRole(ctx) + if err != nil { + return err + } + if !isAdmin { + hasViewActivity, err := c.hasRoleOption(ctx, userName, roleoption.VIEWACTIVITY) + if err != nil { + return err + } + + if !hasViewActivity { + hasViewActivityRedacted, err := c.hasRoleOption(ctx, userName, roleoption.VIEWACTIVITYREDACTED) + if err != nil { + return err + } + + if !hasViewActivityRedacted { + return status.Errorf( + codes.PermissionDenied, "this operation requires the %s or %s role options", + roleoption.VIEWACTIVITY, roleoption.VIEWACTIVITYREDACTED) + } + } + } + return nil +} + +// This function requires that the user have the VIEWACTIVITY role, but does not +// have the VIEWACTIVITYREDACTED role. +func (c *adminPrivilegeChecker) requireViewActivityAndNoViewActivityRedactedPermission( + ctx context.Context, +) (err error) { + userName, isAdmin, err := c.getUserAndRole(ctx) + if err != nil { + return err + } + + if !isAdmin { + hasViewActivityRedacted, err := c.hasRoleOption(ctx, userName, roleoption.VIEWACTIVITYREDACTED) + if err != nil { + return err + } + if hasViewActivityRedacted { + return status.Errorf( + codes.PermissionDenied, "this operation requires %s role option and is not allowed for %s role option", + roleoption.VIEWACTIVITY, roleoption.VIEWACTIVITYREDACTED) + } + return c.requireViewActivityPermission(ctx) + } + return nil } func (c *adminPrivilegeChecker) getUserAndRole( diff --git a/pkg/server/admin_cluster_test.go b/pkg/server/admin_cluster_test.go index 269d8518a416..04e38252c80e 100644 --- a/pkg/server/admin_cluster_test.go +++ b/pkg/server/admin_cluster_test.go @@ -55,12 +55,17 @@ func TestAdminAPIDatabaseDetails(t *testing.T) { require.NoError(t, serverutils.GetJSONProto(s, "/_admin/v1/databases/test", &resp)) assert.Nil(t, resp.Stats, "No Stats unless we ask for them explicitly.") + nodeIDs := []roachpb.NodeID{1, 2, 3} testutils.SucceedsSoon(t, func() error { var resp serverpb.DatabaseDetailsResponse require.NoError(t, serverutils.GetJSONProto(s, "/_admin/v1/databases/test?include_stats=true", &resp)) - nodeIDs := []roachpb.NodeID{1, 2, 3} - assert.Equal(t, int64(1), resp.Stats.RangeCount, "RangeCount") + if resp.Stats.RangeCount != int64(1) { + return errors.Newf("expected range-count=1, got %d", resp.Stats.RangeCount) + } + if len(resp.Stats.NodeIDs) != len(nodeIDs) { + return errors.Newf("expected node-ids=%s, got %s", nodeIDs, resp.Stats.NodeIDs) + } assert.Equal(t, nodeIDs, resp.Stats.NodeIDs, "NodeIDs") // TODO(todd): Find a way to produce a non-zero value here that doesn't require writing a million rows. diff --git a/pkg/server/admin_test.go b/pkg/server/admin_test.go index fb37402aeb55..e3b329df7afd 100644 --- a/pkg/server/admin_test.go +++ b/pkg/server/admin_test.go @@ -1074,10 +1074,10 @@ func TestAdminAPISettings(t *testing.T) { // Any bool that defaults to true will work here. const settingKey = "sql.metrics.statement_details.enabled" st := s.ClusterSettings() - allKeys := settings.Keys() + allKeys := settings.Keys(settings.ForSystemTenant) checkSetting := func(t *testing.T, k string, v serverpb.SettingsResponse_Value) { - ref, ok := settings.Lookup(k, settings.LookupForReporting) + ref, ok := settings.Lookup(k, settings.LookupForReporting, settings.ForSystemTenant) if !ok { t.Fatalf("%s: not found after initial lookup", k) } diff --git a/pkg/server/authentication_test.go b/pkg/server/authentication_test.go index decf366d005f..39099c77c5ab 100644 --- a/pkg/server/authentication_test.go +++ b/pkg/server/authentication_test.go @@ -201,8 +201,7 @@ func TestVerifyPassword(t *testing.T) { if util.RaceEnabled { // The default bcrypt cost makes this test approximately 30s slower when the // race detector is on. - defer func(prev int) { security.BcryptCost = prev }(security.BcryptCost) - security.BcryptCost = bcrypt.MinCost + security.BcryptCost.Override(ctx, &ts.Cfg.Settings.SV, int64(bcrypt.MinCost)) } //location is used for timezone testing. diff --git a/pkg/server/combined_statement_stats.go b/pkg/server/combined_statement_stats.go index e6425b043a71..88f8b7447eaf 100644 --- a/pkg/server/combined_statement_stats.go +++ b/pkg/server/combined_statement_stats.go @@ -43,7 +43,7 @@ func (s *statusServer) CombinedStatementStats( ctx = propagateGatewayMetadata(ctx) ctx = s.AnnotateCtx(ctx) - if _, err := s.privilegeChecker.requireViewActivityPermission(ctx); err != nil { + if err := s.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } @@ -116,12 +116,18 @@ func collectCombinedStatements( transaction_fingerprint_id, app_name, aggregated_ts, - metadata, + jsonb_set( + metadata, + array['query'], + to_jsonb( + prettify_statement(metadata ->> 'query', %d, %d, %d) + ) + ), statistics, sampled_plan, aggregation_interval FROM crdb_internal.statement_statistics - %s`, whereClause) + %s`, tree.ConsoleLineWidth, tree.PrettyAlignAndDeindent, tree.UpperCase, whereClause) const expectedNumDatums = 8 diff --git a/pkg/server/diagnostics/BUILD.bazel b/pkg/server/diagnostics/BUILD.bazel index 5944ec06f7ca..0b66814c6f4b 100644 --- a/pkg/server/diagnostics/BUILD.bazel +++ b/pkg/server/diagnostics/BUILD.bazel @@ -56,6 +56,7 @@ go_test( "update_checker_test.go", ], embed = [":diagnostics"], + tags = ["no-remote"], deps = [ "//pkg/base", "//pkg/build", diff --git a/pkg/server/diagnostics/reporter.go b/pkg/server/diagnostics/reporter.go index 0e7b3965c58d..50646a48ecd3 100644 --- a/pkg/server/diagnostics/reporter.go +++ b/pkg/server/diagnostics/reporter.go @@ -209,7 +209,9 @@ func (r *Reporter) CreateReport( for ok, err = it.Next(ctx); ok; ok, err = it.Next(ctx) { row := it.Cur() name := string(tree.MustBeDString(row[0])) - info.AlteredSettings[name] = settings.RedactedValue(name, &r.Settings.SV) + info.AlteredSettings[name] = settings.RedactedValue( + name, &r.Settings.SV, r.TenantID == roachpb.SystemTenantID, + ) } if err != nil { // No need to clear AlteredSettings map since we only make best diff --git a/pkg/server/index_usage_stats.go b/pkg/server/index_usage_stats.go index 1c638930f163..12332e6eae7c 100644 --- a/pkg/server/index_usage_stats.go +++ b/pkg/server/index_usage_stats.go @@ -40,7 +40,7 @@ func (s *statusServer) IndexUsageStatistics( ctx = propagateGatewayMetadata(ctx) ctx = s.AnnotateCtx(ctx) - if _, err := s.privilegeChecker.requireViewActivityPermission(ctx); err != nil { + if err := s.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } @@ -194,7 +194,7 @@ func (s *statusServer) TableIndexStats( ctx = propagateGatewayMetadata(ctx) ctx = s.AnnotateCtx(ctx) - if _, err := s.privilegeChecker.requireViewActivityPermission(ctx); err != nil { + if err := s.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } return getTableIndexUsageStats(ctx, req, s.sqlServer.pgServer.SQLServer.GetLocalIndexStatistics(), @@ -223,14 +223,13 @@ func getTableIndexUsageStats( q := makeSQLQuery() // TODO(#72930): Implement virtual indexes on index_usages_statistics and table_indexes - q.Append(` - SELECT + q.Append(`SELECT ti.index_id, ti.index_name, ti.index_type, total_reads, last_read, - indexdef + prettify_statement(indexdef, $, $, $) FROM crdb_internal.index_usage_statistics AS us JOIN crdb_internal.table_indexes AS ti ON us.index_id = ti.index_id AND us.table_id = ti.descriptor_id @@ -238,6 +237,9 @@ func getTableIndexUsageStats( JOIN pg_catalog.pg_indexes AS pgidxs ON pgidxs.crdb_oid = indexrelid AND indexname = ti.index_name WHERE ti.descriptor_id = $::REGCLASS`, + tree.ConsoleLineWidth, + tree.PrettyAlignAndDeindent, + tree.UpperCase, tableID, ) diff --git a/pkg/server/node.go b/pkg/server/node.go index 592ccee94208..5e0676d6351b 100644 --- a/pkg/server/node.go +++ b/pkg/server/node.go @@ -1375,6 +1375,13 @@ func (n *Node) GossipSubscription( } } +// TenantSettings implements the roachpb.InternalServer interface. +func (n *Node) TenantSettings( + args *roachpb.TenantSettingsRequest, stream roachpb.Internal_TenantSettingsServer, +) error { + return errors.AssertionFailedf("not implemented") +} + // Join implements the roachpb.InternalServer service. This is the // "connectivity" API; individual CRDB servers are passed in a --join list and // the join targets are addressed through this API. diff --git a/pkg/server/server_sql.go b/pkg/server/server_sql.go index 15771b9f7ffd..38ccd9ad15c6 100644 --- a/pkg/server/server_sql.go +++ b/pkg/server/server_sql.go @@ -63,6 +63,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" "github.com/cockroachdb/cockroach/pkg/sql/colexec" + "github.com/cockroachdb/cockroach/pkg/sql/commenter" "github.com/cockroachdb/cockroach/pkg/sql/contention" "github.com/cockroachdb/cockroach/pkg/sql/distsql" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -572,6 +573,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { ExternalStorage: cfg.externalStorage, ExternalStorageFromURI: cfg.externalStorageFromURI, + DistSender: cfg.distSender, RangeCache: cfg.distSender.RangeDescriptorCache(), SQLSQLResponseAdmissionQ: cfg.sqlSQLResponseAdmissionQ, CollectionFactory: collectionFactory, @@ -808,6 +810,9 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { sql.ValidateInvertedIndexes, sql.NewFakeSessionData, ) + execCfg.CommentUpdaterFactory = commenter.NewCommentUpdaterFactory( + ieFactory, + sql.MakeConstraintOidBuilder) execCfg.InternalExecutorFactory = ieFactory distSQLServer.ServerConfig.ProtectedTimestampProvider = execCfg.ProtectedTimestampProvider @@ -902,7 +907,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { spanConfigKnobs, ) - execCfg.SpanConfigReconciliationJobDeps = spanConfig.manager + execCfg.SpanConfigReconciler = spanConfigReconciler } execCfg.SpanConfigKVAccessor = cfg.sqlServerOptionalKVArgs.spanConfigKVAccessor diff --git a/pkg/server/settingswatcher/BUILD.bazel b/pkg/server/settingswatcher/BUILD.bazel index 65e71063bcea..84e789570626 100644 --- a/pkg/server/settingswatcher/BUILD.bazel +++ b/pkg/server/settingswatcher/BUILD.bazel @@ -17,13 +17,11 @@ go_library( "//pkg/settings", "//pkg/settings/cluster", "//pkg/sql/catalog", - "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/systemschema", - "//pkg/sql/row", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util/encoding", "//pkg/util/grpcutil", "//pkg/util/hlc", "//pkg/util/log", diff --git a/pkg/server/settingswatcher/row_decoder.go b/pkg/server/settingswatcher/row_decoder.go index 427932291bc8..fdb9406044a8 100644 --- a/pkg/server/settingswatcher/row_decoder.go +++ b/pkg/server/settingswatcher/row_decoder.go @@ -14,21 +14,20 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" - "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/errors" ) // RowDecoder decodes rows from the settings table. type RowDecoder struct { - codec keys.SQLCodec - alloc rowenc.DatumAlloc - colIdxMap catalog.TableColMap + codec keys.SQLCodec + alloc tree.DatumAlloc + columns []catalog.Column + decoder valueside.Decoder } // RawValue contains a raw-value / value-type pair, corresponding to the value @@ -40,11 +39,11 @@ type RawValue struct { // MakeRowDecoder makes a new RowDecoder for the settings table. func MakeRowDecoder(codec keys.SQLCodec) RowDecoder { + columns := systemschema.SettingsTable.PublicColumns() return RowDecoder{ - codec: codec, - colIdxMap: row.ColIDtoRowIndexFromCols( - systemschema.SettingsTable.PublicColumns(), - ), + codec: codec, + columns: columns, + decoder: valueside.MakeDecoder(columns), } } @@ -54,10 +53,9 @@ func MakeRowDecoder(codec keys.SQLCodec) RowDecoder { func (d *RowDecoder) DecodeRow( kv roachpb.KeyValue, ) (setting string, val RawValue, tombstone bool, _ error) { - tbl := systemschema.SettingsTable // First we need to decode the setting name field from the index key. { - types := []*types.T{tbl.PublicColumns()[0].GetType()} + types := []*types.T{d.columns[0].GetType()} nameRow := make([]rowenc.EncDatum, 1) _, matches, _, err := rowenc.DecodeIndexKey(d.codec, types, nameRow, nil, kv.Key) if err != nil { @@ -75,43 +73,26 @@ func (d *RowDecoder) DecodeRow( return setting, RawValue{}, true, nil } - // The rest of the columns are stored as a family, packed with diff-encoded - // column IDs followed by their values. - { - // column valueType can be null (missing) so we default it to "s". + // The rest of the columns are stored as a family. + bytes, err := kv.Value.GetTuple() + if err != nil { + return "", RawValue{}, false, err + } + + datums, err := d.decoder.Decode(&d.alloc, bytes) + if err != nil { + return "", RawValue{}, false, err + } + + if value := datums[1]; value != tree.DNull { + val.Value = string(tree.MustBeDString(value)) + } + if typ := datums[3]; typ != tree.DNull { + val.Type = string(tree.MustBeDString(typ)) + } else { + // Column valueType is missing; default it to "s". val.Type = "s" - bytes, err := kv.Value.GetTuple() - if err != nil { - return "", RawValue{}, false, err - } - var colIDDiff uint32 - var lastColID descpb.ColumnID - var res tree.Datum - for len(bytes) > 0 { - _, _, colIDDiff, _, err = encoding.DecodeValueTag(bytes) - if err != nil { - return "", RawValue{}, false, err - } - colID := lastColID + descpb.ColumnID(colIDDiff) - lastColID = colID - if idx, ok := d.colIdxMap.Get(colID); ok { - res, bytes, err = rowenc.DecodeTableValue(&d.alloc, tbl.PublicColumns()[idx].GetType(), bytes) - if err != nil { - return "", RawValue{}, false, err - } - switch colID { - case tbl.PublicColumns()[1].GetID(): // value - val.Value = string(tree.MustBeDString(res)) - case tbl.PublicColumns()[3].GetID(): // valueType - val.Type = string(tree.MustBeDString(res)) - case tbl.PublicColumns()[2].GetID(): // lastUpdated - // TODO(dt): we could decode just the len and then seek `bytes` past - // it, without allocating/decoding the unused timestamp. - default: - return "", RawValue{}, false, errors.Errorf("unknown column: %v", colID) - } - } - } } + return setting, val, false, nil } diff --git a/pkg/server/settingswatcher/settings_watcher.go b/pkg/server/settingswatcher/settings_watcher.go index 5a24b40b2252..b0602d1b12b2 100644 --- a/pkg/server/settingswatcher/settings_watcher.go +++ b/pkg/server/settingswatcher/settings_watcher.go @@ -128,7 +128,7 @@ func (s *SettingsWatcher) Start(ctx context.Context) error { rf, err := s.f.RangeFeed(ctx, "settings", []roachpb.Span{settingsTableSpan}, now, func( ctx context.Context, kv *roachpb.RangeFeedValue, ) { - setting, val, tombstone, err := s.dec.DecodeRow(roachpb.KeyValue{ + name, val, tombstone, err := s.dec.DecodeRow(roachpb.KeyValue{ Key: kv.Key, Value: kv.Value, }) @@ -136,19 +136,32 @@ func (s *SettingsWatcher) Start(ctx context.Context) error { log.Warningf(ctx, "failed to decode settings row %v: %v", kv.Key, err) return } + + if !s.codec.ForSystemTenant() { + setting, ok := settings.Lookup(name, settings.LookupForLocalAccess, s.codec.ForSystemTenant()) + if !ok { + log.Warningf(ctx, "unknown setting %s, skipping update", log.Safe(name)) + return + } + if setting.Class() != settings.TenantWritable { + log.Warningf(ctx, "ignoring read-only setting %s", log.Safe(name)) + return + } + } + s.mu.Lock() defer s.mu.Unlock() - _, hasOverride := s.mu.overrides[setting] + _, hasOverride := s.mu.overrides[name] if tombstone { // This event corresponds to a deletion. - delete(s.mu.values, setting) + delete(s.mu.values, name) if !hasOverride { - s.setDefault(ctx, u, setting) + s.setDefault(ctx, u, name) } } else { - s.mu.values[setting] = val + s.mu.values[name] = val if !hasOverride { - s.set(ctx, u, setting, val) + s.set(ctx, u, name, val) } } }, rangefeed.WithInitialScan(func(ctx context.Context) { @@ -218,7 +231,7 @@ func (s *SettingsWatcher) set(ctx context.Context, u settings.Updater, key strin // setDefault sets a setting to its default value. func (s *SettingsWatcher) setDefault(ctx context.Context, u settings.Updater, key string) { - setting, ok := settings.Lookup(key, settings.LookupForLocalAccess) + setting, ok := settings.Lookup(key, settings.LookupForLocalAccess, s.codec.ForSystemTenant()) if !ok { log.Warningf(ctx, "failed to find setting %s, skipping update", log.Safe(key)) return diff --git a/pkg/server/settingswatcher/settings_watcher_external_test.go b/pkg/server/settingswatcher/settings_watcher_external_test.go index 7405f453d401..23752cfb03a8 100644 --- a/pkg/server/settingswatcher/settings_watcher_external_test.go +++ b/pkg/server/settingswatcher/settings_watcher_external_test.go @@ -52,6 +52,9 @@ func TestSettingWatcherOnTenant(t *testing.T) { "kv.queue.process.guaranteed_time_budget": {"17s", "20s"}, "sql.txn_stats.sample_rate": {.23, .55}, "cluster.organization": {"foobar", "bazbax"}, + // Include a system-only setting to verify that we don't try to change its + // value (which would cause a panic in test builds). + "kv.snapshot_rebalance.max_rate": {1024, 2048}, } fakeTenant := roachpb.MakeTenantID(2) systemTable := keys.SystemSQLCodec.TablePrefix(keys.SettingsTableID) @@ -73,9 +76,12 @@ func TestSettingWatcherOnTenant(t *testing.T) { } } checkSettingsValuesMatch := func(a, b *cluster.Settings) error { - for _, k := range settings.Keys() { - s, ok := settings.Lookup(k, settings.LookupForLocalAccess) + for _, k := range settings.Keys(false /* forSystemTenant */) { + s, ok := settings.Lookup(k, settings.LookupForLocalAccess, false /* forSystemTenant */) require.True(t, ok) + if s.Class() == settings.SystemOnly { + continue + } if av, bv := s.String(&a.SV), s.String(&b.SV); av != bv { return errors.Errorf("values do not match for %s: %s != %s", k, av, bv) } @@ -87,18 +93,19 @@ func TestSettingWatcherOnTenant(t *testing.T) { } copySettingsFromSystemToFakeTenant() s0 := tc.Server(0) - fakeSettings := cluster.MakeTestingClusterSettings() - sw := settingswatcher.New(s0.Clock(), fakeCodec, fakeSettings, + tenantSettings := cluster.MakeTestingClusterSettings() + tenantSettings.SV.SetNonSystemTenant() + sw := settingswatcher.New(s0.Clock(), fakeCodec, tenantSettings, s0.ExecutorConfig().(sql.ExecutorConfig).RangeFeedFactory, tc.Stopper()) require.NoError(t, sw.Start(ctx)) - require.NoError(t, checkSettingsValuesMatch(s0.ClusterSettings(), fakeSettings)) + require.NoError(t, checkSettingsValuesMatch(s0.ClusterSettings(), tenantSettings)) for k, v := range toSet { tdb.Exec(t, "SET CLUSTER SETTING "+k+" = $1", v[1]) } copySettingsFromSystemToFakeTenant() testutils.SucceedsSoon(t, func() error { - return checkSettingsValuesMatch(s0.ClusterSettings(), fakeSettings) + return checkSettingsValuesMatch(s0.ClusterSettings(), tenantSettings) }) } @@ -133,14 +140,14 @@ func TestSettingsWatcherWithOverrides(t *testing.T) { expect := func(setting, value string) { t.Helper() - s, ok := settings.Lookup(setting, settings.LookupForLocalAccess) + s, ok := settings.Lookup(setting, settings.LookupForLocalAccess, settings.ForSystemTenant) require.True(t, ok) require.Equal(t, value, s.String(&st.SV)) } expectSoon := func(setting, value string) { t.Helper() - s, ok := settings.Lookup(setting, settings.LookupForLocalAccess) + s, ok := settings.Lookup(setting, settings.LookupForLocalAccess, settings.ForSystemTenant) require.True(t, ok) testutils.SucceedsSoon(t, func() error { if actual := s.String(&st.SV); actual != value { @@ -189,7 +196,7 @@ func TestSettingsWatcherWithOverrides(t *testing.T) { expectSoon("i1", "10") // Verify that version cannot be overridden. - version, ok := settings.Lookup("version", settings.LookupForLocalAccess) + version, ok := settings.Lookup("version", settings.LookupForLocalAccess, settings.ForSystemTenant) require.True(t, ok) versionValue := version.String(&st.SV) diff --git a/pkg/server/statement_diagnostics_requests.go b/pkg/server/statement_diagnostics_requests.go index 9d82ef69ccc8..58443d31f8c7 100644 --- a/pkg/server/statement_diagnostics_requests.go +++ b/pkg/server/statement_diagnostics_requests.go @@ -73,7 +73,7 @@ func (s *statusServer) CreateStatementDiagnosticsReport( ctx = propagateGatewayMetadata(ctx) ctx = s.AnnotateCtx(ctx) - if _, err := s.privilegeChecker.requireViewActivityPermission(ctx); err != nil { + if err := s.privilegeChecker.requireViewActivityAndNoViewActivityRedactedPermission(ctx); err != nil { return nil, err } @@ -101,7 +101,7 @@ func (s *statusServer) CancelStatementDiagnosticsReport( ctx = propagateGatewayMetadata(ctx) ctx = s.AnnotateCtx(ctx) - if _, err := s.privilegeChecker.requireViewActivityPermission(ctx); err != nil { + if err := s.privilegeChecker.requireViewActivityAndNoViewActivityRedactedPermission(ctx); err != nil { return nil, err } @@ -125,7 +125,7 @@ func (s *statusServer) StatementDiagnosticsRequests( ctx = propagateGatewayMetadata(ctx) ctx = s.AnnotateCtx(ctx) - if _, err := s.privilegeChecker.requireViewActivityPermission(ctx); err != nil { + if err := s.privilegeChecker.requireViewActivityAndNoViewActivityRedactedPermission(ctx); err != nil { return nil, err } @@ -217,7 +217,7 @@ func (s *statusServer) StatementDiagnostics( ctx = propagateGatewayMetadata(ctx) ctx = s.AnnotateCtx(ctx) - if _, err := s.privilegeChecker.requireViewActivityPermission(ctx); err != nil { + if err := s.privilegeChecker.requireViewActivityAndNoViewActivityRedactedPermission(ctx); err != nil { return nil, err } diff --git a/pkg/server/statements.go b/pkg/server/statements.go index b153c5837aaf..e245026871a8 100644 --- a/pkg/server/statements.go +++ b/pkg/server/statements.go @@ -36,7 +36,7 @@ func (s *statusServer) Statements( ctx = propagateGatewayMetadata(ctx) ctx = s.AnnotateCtx(ctx) - if _, err := s.privilegeChecker.requireViewActivityPermission(ctx); err != nil { + if err := s.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } diff --git a/pkg/server/status.go b/pkg/server/status.go index 61af4c8dc32e..f501b97b3eff 100644 --- a/pkg/server/status.go +++ b/pkg/server/status.go @@ -158,17 +158,13 @@ func (b *baseStatusServer) getLocalSessions( return nil, err } - hasViewActivity, err := b.privilegeChecker.hasRoleOption(ctx, sessionUser, roleoption.VIEWACTIVITY) - if err != nil { - return nil, err - } - reqUsername, err := security.MakeSQLUsernameFromPreNormalizedStringChecked(req.Username) if err != nil { return nil, err } - if !isAdmin && !hasViewActivity { + errViewActivity := b.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx) + if !isAdmin && errViewActivity != nil { // For non-superusers, requests with an empty username is // implicitly a request for the client's own sessions. if reqUsername.Undefined() { @@ -299,29 +295,6 @@ func (b *baseStatusServer) checkCancelPrivilege( return nil } -// hasViewActivityPermissions checks whether the session user has permissions to -// view the activity on the server (which is the case when it is a superuser or -// has VIEWACTIVITY permission) and returns an error if not. -func (b *baseStatusServer) hasViewActivityPermissions(ctx context.Context) error { - sessionUser, isAdmin, err := b.privilegeChecker.getUserAndRole(ctx) - if err != nil { - return err - } - hasViewActivity, err := b.privilegeChecker.hasRoleOption(ctx, sessionUser, roleoption.VIEWACTIVITY) - if err != nil { - return err - } - if !isAdmin && !hasViewActivity { - // Only superusers and users with VIEWACTIVITY permission are allowed - // to view the activity on the server. - return status.Errorf( - codes.PermissionDenied, - "client user %q does not have permission to view the activity", - sessionUser) - } - return nil -} - // ListLocalContentionEvents returns a list of contention events on this node. func (b *baseStatusServer) ListLocalContentionEvents( ctx context.Context, _ *serverpb.ListContentionEventsRequest, @@ -329,7 +302,7 @@ func (b *baseStatusServer) ListLocalContentionEvents( ctx = propagateGatewayMetadata(ctx) ctx = b.AnnotateCtx(ctx) - if err := b.hasViewActivityPermissions(ctx); err != nil { + if err := b.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } @@ -344,7 +317,7 @@ func (b *baseStatusServer) ListLocalDistSQLFlows( ctx = propagateGatewayMetadata(ctx) ctx = b.AnnotateCtx(ctx) - if err := b.hasViewActivityPermissions(ctx); err != nil { + if err := b.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } @@ -2456,7 +2429,7 @@ func (s *statusServer) ListContentionEvents( ctx = s.AnnotateCtx(ctx) // Check permissions early to avoid fan-out to all nodes. - if err := s.hasViewActivityPermissions(ctx); err != nil { + if err := s.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } @@ -2501,7 +2474,7 @@ func (s *statusServer) ListDistSQLFlows( ctx = s.AnnotateCtx(ctx) // Check permissions early to avoid fan-out to all nodes. - if err := s.hasViewActivityPermissions(ctx); err != nil { + if err := s.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } diff --git a/pkg/server/status_test.go b/pkg/server/status_test.go index 7adb33721657..b03b820d9f7a 100644 --- a/pkg/server/status_test.go +++ b/pkg/server/status_test.go @@ -90,6 +90,15 @@ func getStatusJSONProtoWithAdminOption( return serverutils.GetJSONProtoWithAdminOption(ts, statusPrefix+path, response, isAdmin) } +func postStatusJSONProtoWithAdminOption( + ts serverutils.TestServerInterface, + path string, + request, response protoutil.Message, + isAdmin bool, +) error { + return serverutils.PostJSONProtoWithAdminOption(ts, statusPrefix+path, request, response, isAdmin) +} + // TestStatusJson verifies that status endpoints return expected Json results. // The content type of the responses is always httputil.JSONContentType. func TestStatusJson(t *testing.T) { @@ -1843,9 +1852,6 @@ func TestStatusAPIStatements(t *testing.T) { t.Fatalf("expected privilege error, got %v", err) } - // Grant VIEWACTIVITY. - thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s VIEWACTIVITY", authenticatedUserNameNoAdmin().Normalized())) - testPath := func(path string, expectedStmts []string) { // Hit query endpoint. if err := getStatusJSONProtoWithAdminOption(firstServerProto, path, &resp, false); err != nil { @@ -1894,7 +1900,20 @@ func TestStatusAPIStatements(t *testing.T) { expectedStatements = append(expectedStatements, expectedStmt) } - // Test no params + // Grant VIEWACTIVITY. + thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s VIEWACTIVITY", authenticatedUserNameNoAdmin().Normalized())) + + // Test no params. + testPath("statements", expectedStatements) + // Test combined=true forwards to CombinedStatements + testPath(fmt.Sprintf("statements?combined=true&start=%d", aggregatedTs+60), nil) + + // Remove VIEWACTIVITY so we can test with just the VIEWACTIVITYREDACTED role. + thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s NOVIEWACTIVITY", authenticatedUserNameNoAdmin().Normalized())) + // Grant VIEWACTIVITYREDACTED. + thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s VIEWACTIVITYREDACTED", authenticatedUserNameNoAdmin().Normalized())) + + // Test no params. testPath("statements", expectedStatements) // Test combined=true forwards to CombinedStatements testPath(fmt.Sprintf("statements?combined=true&start=%d", aggregatedTs+60), nil) @@ -1922,17 +1941,33 @@ func TestStatusAPICombinedStatements(t *testing.T) { thirdServerSQL := sqlutils.MakeSQLRunner(testCluster.ServerConn(2)) statements := []struct { - stmt string - fingerprinted string + stmt string + formattedStmt string + fingerprint string + formattedFingerprint string }{ - {stmt: `CREATE DATABASE roachblog`}, - {stmt: `SET database = roachblog`}, - {stmt: `CREATE TABLE posts (id INT8 PRIMARY KEY, body STRING)`}, { - stmt: `INSERT INTO posts VALUES (1, 'foo')`, - fingerprinted: `INSERT INTO posts VALUES (_, '_')`, + stmt: `CREATE DATABASE roachblog`, + formattedStmt: "CREATE DATABASE roachblog\n", + }, + { + stmt: `SET database = roachblog`, + formattedStmt: "SET database = roachblog\n", + }, + { + stmt: `CREATE TABLE posts (id INT8 PRIMARY KEY, body STRING)`, + formattedStmt: "CREATE TABLE posts (id INT8 PRIMARY KEY, body STRING)\n", + }, + { + stmt: `INSERT INTO posts VALUES (1, 'foo')`, + formattedStmt: "INSERT INTO posts VALUES (1, 'foo')\n", + fingerprint: `INSERT INTO posts VALUES (_, '_')`, + formattedFingerprint: "INSERT INTO posts VALUES (_, '_')\n", + }, + { + stmt: `SELECT * FROM posts`, + formattedStmt: "SELECT * FROM posts\n", }, - {stmt: `SELECT * FROM posts`}, } for _, stmt := range statements { @@ -1946,9 +1981,6 @@ func TestStatusAPICombinedStatements(t *testing.T) { t.Fatalf("expected privilege error, got %v", err) } - // Grant VIEWACTIVITY. - thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s VIEWACTIVITY", authenticatedUserNameNoAdmin().Normalized())) - testPath := func(path string, expectedStmts []string) { // Hit query endpoint. if err := getStatusJSONProtoWithAdminOption(firstServerProto, path, &resp, false); err != nil { @@ -1992,14 +2024,17 @@ func TestStatusAPICombinedStatements(t *testing.T) { var expectedStatements []string for _, stmt := range statements { - var expectedStmt = stmt.stmt - if stmt.fingerprinted != "" { - expectedStmt = stmt.fingerprinted + var expectedStmt = stmt.formattedStmt + if stmt.fingerprint != "" { + expectedStmt = stmt.formattedFingerprint } expectedStatements = append(expectedStatements, expectedStmt) } - // Test with no query params + // Grant VIEWACTIVITY. + thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s VIEWACTIVITY", authenticatedUserNameNoAdmin().Normalized())) + + // Test with no query params. testPath("combinedstmts", expectedStatements) oneMinAfterAggregatedTs := aggregatedTs + 60 @@ -2009,6 +2044,20 @@ func TestStatusAPICombinedStatements(t *testing.T) { testPath(fmt.Sprintf("combinedstmts?start=%d&end=%d", aggregatedTs-3600, oneMinAfterAggregatedTs), expectedStatements) // Test with start = 1 min after aggregatedTs; should give no results testPath(fmt.Sprintf("combinedstmts?start=%d", oneMinAfterAggregatedTs), nil) + + // Remove VIEWACTIVITY so we can test with just the VIEWACTIVITYREDACTED role. + thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s NOVIEWACTIVITY", authenticatedUserNameNoAdmin().Normalized())) + // Grant VIEWACTIVITYREDACTED. + thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s VIEWACTIVITYREDACTED", authenticatedUserNameNoAdmin().Normalized())) + + // Test with no query params. + testPath("combinedstmts", expectedStatements) + // Test with end = 1 min after aggregatedTs; should give the same results as get all. + testPath(fmt.Sprintf("combinedstmts?end=%d", oneMinAfterAggregatedTs), expectedStatements) + // Test with start = 1 hour before aggregatedTs end = 1 min after aggregatedTs; should give same results as get all. + testPath(fmt.Sprintf("combinedstmts?start=%d&end=%d", aggregatedTs-3600, oneMinAfterAggregatedTs), expectedStatements) + // Test with start = 1 min after aggregatedTs; should give no results + testPath(fmt.Sprintf("combinedstmts?start=%d", oneMinAfterAggregatedTs), nil) } func TestListSessionsSecurity(t *testing.T) { @@ -2097,7 +2146,7 @@ func TestListActivitySecurity(t *testing.T) { ts := s.(*TestServer) defer ts.Stopper().Stop(ctx) - expectedErrNoPermission := "does not have permission to view the activity" + expectedErrNoPermission := "this operation requires the VIEWACTIVITY or VIEWACTIVITYREDACTED role options" contentionMsg := &serverpb.ListContentionEventsResponse{} flowsMsg := &serverpb.ListDistSQLFlowsResponse{} getErrors := func(msg protoutil.Message) []serverpb.ListActivityError { @@ -2404,6 +2453,52 @@ func TestCreateStatementDiagnosticsReport(t *testing.T) { } } +func TestCreateStatementDiagnosticsReportWithViewActivityOptions(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer s.Stopper().Stop(context.Background()) + db := sqlutils.MakeSQLRunner(sqlDB) + + if err := getStatusJSONProtoWithAdminOption(s, "stmtdiagreports", &serverpb.CreateStatementDiagnosticsReportRequest{}, false); err != nil { + if !testutils.IsError(err, "status: 403") { + t.Fatalf("expected privilege error, got %v", err) + } + } + + // Grant VIEWACTIVITY and all test should work. + db.Exec(t, fmt.Sprintf("ALTER USER %s VIEWACTIVITY", authenticatedUserNameNoAdmin().Normalized())) + req := &serverpb.CreateStatementDiagnosticsReportRequest{ + StatementFingerprint: "INSERT INTO test VALUES (_)", + } + var resp serverpb.CreateStatementDiagnosticsReportResponse + if err := postStatusJSONProtoWithAdminOption(s, "stmtdiagreports", req, &resp, false); err != nil { + t.Fatal(err) + } + var respGet serverpb.StatementDiagnosticsReportsResponse + if err := getStatusJSONProtoWithAdminOption(s, "stmtdiagreports", &respGet, false); err != nil { + t.Fatal(err) + } + if respGet.Reports[0].StatementFingerprint != req.StatementFingerprint { + t.Fatal("statement diagnostics request was not persisted") + } + + // Grant VIEWACTIVITYREDACTED and all test should get permission errors. + db.Exec(t, fmt.Sprintf("ALTER USER %s VIEWACTIVITYREDACTED", authenticatedUserNameNoAdmin().Normalized())) + + if err := postStatusJSONProtoWithAdminOption(s, "stmtdiagreports", req, &resp, false); err != nil { + if !testutils.IsError(err, "status: 403") { + t.Fatalf("expected privilege error, got %v", err) + } + } + if err := getStatusJSONProtoWithAdminOption(s, "stmtdiagreports", &respGet, false); err != nil { + if !testutils.IsError(err, "status: 403") { + t.Fatalf("expected privilege error, got %v", err) + } + } +} + func TestStatementDiagnosticsCompleted(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/server/tenant_status.go b/pkg/server/tenant_status.go index 77f71b9b8b52..6da680e4aa26 100644 --- a/pkg/server/tenant_status.go +++ b/pkg/server/tenant_status.go @@ -115,7 +115,7 @@ func (t *tenantStatusServer) ListSessions( ctx = propagateGatewayMetadata(ctx) ctx = t.AnnotateCtx(ctx) - if _, err := t.privilegeChecker.requireViewActivityPermission(ctx); err != nil { + if err := t.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } if t.sqlServer.SQLInstanceID() == 0 { @@ -307,7 +307,7 @@ func (t *tenantStatusServer) ListContentionEvents( ctx = t.AnnotateCtx(ctx) // Check permissions early to avoid fan-out to all nodes. - if err := t.hasViewActivityPermissions(ctx); err != nil { + if err := t.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } @@ -454,7 +454,7 @@ func (t *tenantStatusServer) CombinedStatementStats( ctx = propagateGatewayMetadata(ctx) ctx = t.AnnotateCtx(ctx) - if _, err := t.privilegeChecker.requireViewActivityPermission(ctx); err != nil { + if err := t.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } @@ -488,7 +488,7 @@ func (t *tenantStatusServer) Statements( ctx = propagateGatewayMetadata(ctx) ctx = t.AnnotateCtx(ctx) - if _, err := t.privilegeChecker.requireViewActivityPermission(ctx); err != nil { + if err := t.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } @@ -730,7 +730,7 @@ func (t *tenantStatusServer) IndexUsageStatistics( ctx = propagateGatewayMetadata(ctx) ctx = t.AnnotateCtx(ctx) - if _, err := t.privilegeChecker.requireViewActivityPermission(ctx); err != nil { + if err := t.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } @@ -873,7 +873,7 @@ func (t *tenantStatusServer) TableIndexStats( ctx = propagateGatewayMetadata(ctx) ctx = t.AnnotateCtx(ctx) - if _, err := t.privilegeChecker.requireViewActivityPermission(ctx); err != nil { + if err := t.privilegeChecker.requireViewActivityOrViewActivityRedactedPermission(ctx); err != nil { return nil, err } diff --git a/pkg/settings/BUILD.bazel b/pkg/settings/BUILD.bazel index 513fc9a378bc..7f52aef43c52 100644 --- a/pkg/settings/BUILD.bazel +++ b/pkg/settings/BUILD.bazel @@ -22,6 +22,7 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/settings", visibility = ["//visibility:public"], deps = [ + "//pkg/util/buildutil", "//pkg/util/humanizeutil", "//pkg/util/syncutil", "@com_github_cockroachdb_errors//:errors", @@ -35,6 +36,7 @@ go_test( deps = [ ":settings", "//pkg/testutils", + "//pkg/testutils/skip", "//pkg/util/protoutil", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", diff --git a/pkg/settings/bool.go b/pkg/settings/bool.go index 312c27cb6955..e983e46c4a66 100644 --- a/pkg/settings/bool.go +++ b/pkg/settings/bool.go @@ -24,7 +24,7 @@ var _ internalSetting = &BoolSetting{} // Get retrieves the bool value in the setting. func (b *BoolSetting) Get(sv *Values) bool { - return sv.getInt64(b.slotIdx) != 0 + return sv.getInt64(b.slot) != 0 } func (b *BoolSetting) String(sv *Values) string { @@ -65,7 +65,7 @@ func (b *BoolSetting) Override(ctx context.Context, sv *Values, v bool) { if v { vInt = 1 } - sv.setDefaultOverrideInt64(b.slotIdx, vInt) + sv.setDefaultOverrideInt64(b.slot, vInt) } func (b *BoolSetting) set(ctx context.Context, sv *Values, v bool) { @@ -73,12 +73,12 @@ func (b *BoolSetting) set(ctx context.Context, sv *Values, v bool) { if v { vInt = 1 } - sv.setInt64(ctx, b.slotIdx, vInt) + sv.setInt64(ctx, b.slot, vInt) } func (b *BoolSetting) setToDefault(ctx context.Context, sv *Values) { // See if the default value was overridden. - ok, val, _ := sv.getDefaultOverride(b.slotIdx) + ok, val, _ := sv.getDefaultOverride(b.slot) if ok { b.set(ctx, sv, val > 0) return diff --git a/pkg/settings/common.go b/pkg/settings/common.go index 40c835cca61c..aa5acf649f66 100644 --- a/pkg/settings/common.go +++ b/pkg/settings/common.go @@ -17,59 +17,60 @@ import ( // common implements basic functionality used by all setting types. type common struct { - key string - description string - class Class - visibility Visibility - // Each setting has a slotIdx which is used as a handle with Values. - slotIdx int + class Class + key string + description string + visibility Visibility + slot slotIdx nonReportable bool retired bool } +// slotIdx is an integer in the range [0, MaxSetting) which is uniquely +// associated with a registered setting. Slot indexes are used as "handles" for +// manipulating the setting values. They are generated sequentially, in the +// order of registration. +type slotIdx int32 + // init must be called to initialize the fields that don't have defaults. -func (i *common) init(class Class, slotIdx int, key string, description string) { - i.class = class - if slotIdx < 1 { - panic(fmt.Sprintf("Invalid slot index %d", slotIdx)) +func (c *common) init(class Class, key string, description string, slot slotIdx) { + c.class = class + c.key = key + c.description = description + if slot < 0 { + panic(fmt.Sprintf("Invalid slot index %d", slot)) } - if slotIdx > MaxSettings { + if slot >= MaxSettings { panic("too many settings; increase MaxSettings") } - i.slotIdx = slotIdx - i.key = key - i.description = description -} - -func (i *common) isRetired() bool { - return i.retired + c.slot = slot } -func (i *common) getSlotIdx() int { - return i.slotIdx +func (c common) Class() Class { + return c.class } -func (i common) Key() string { - return i.key +func (c common) Key() string { + return c.key } -func (i common) Description() string { - return i.description +func (c common) Description() string { + return c.description } -func (i common) Class() Class { - return i.class +func (c common) Visibility() Visibility { + return c.visibility } -func (i common) Visibility() Visibility { - return i.visibility +func (c common) isReportable() bool { + return !c.nonReportable } -func (i common) isReportable() bool { - return !i.nonReportable +func (c *common) isRetired() bool { + return c.retired } -func (i *common) ErrorHint() (bool, string) { +func (c *common) ErrorHint() (bool, string) { return false, "" } @@ -83,36 +84,36 @@ func (i *common) ErrorHint() (bool, string) { // // All string settings are also non-reportable by default and must be // opted in to reports manually with SetReportable(true). -func (i *common) SetReportable(reportable bool) { - i.nonReportable = !reportable +func (c *common) SetReportable(reportable bool) { + c.nonReportable = !reportable } // SetVisibility customizes the visibility of a setting. -func (i *common) SetVisibility(v Visibility) { - i.visibility = v +func (c *common) SetVisibility(v Visibility) { + c.visibility = v } // SetRetired marks the setting as obsolete. It also hides // it from the output of SHOW CLUSTER SETTINGS. -func (i *common) SetRetired() { - i.description = "do not use - " + i.description - i.retired = true +func (c *common) SetRetired() { + c.description = "do not use - " + c.description + c.retired = true } // SetOnChange installs a callback to be called when a setting's value changes. // `fn` should avoid doing long-running or blocking work as it is called on the // goroutine which handles all settings updates. -func (i *common) SetOnChange(sv *Values, fn func(ctx context.Context)) { - sv.setOnChange(i.slotIdx, fn) +func (c *common) SetOnChange(sv *Values, fn func(ctx context.Context)) { + sv.setOnChange(c.slot, fn) } type internalSetting interface { NonMaskedSetting - init(class Class, slotIdx int, key string, desc string) + init(class Class, key, description string, slot slotIdx) isRetired() bool setToDefault(ctx context.Context, sv *Values) - getSlotIdx() int + // isReportable indicates whether the value of the setting can be // included in user-facing reports such as that produced by SHOW ALL // CLUSTER SETTINGS. @@ -126,5 +127,5 @@ type internalSetting interface { // numericSetting is used for settings that can be set using an integer value. type numericSetting interface { internalSetting - set(ctx context.Context, sv *Values, i int64) error + set(ctx context.Context, sv *Values, value int64) error } diff --git a/pkg/settings/duration.go b/pkg/settings/duration.go index 08bca8d6c9cb..f3c5fa5280f6 100644 --- a/pkg/settings/duration.go +++ b/pkg/settings/duration.go @@ -43,7 +43,7 @@ func (d *DurationSettingWithExplicitUnit) ErrorHint() (bool, string) { // Get retrieves the duration value in the setting. func (d *DurationSetting) Get(sv *Values) time.Duration { - return time.Duration(sv.getInt64(d.slotIdx)) + return time.Duration(sv.getInt64(d.slot)) } func (d *DurationSetting) String(sv *Values) string { @@ -88,21 +88,21 @@ func (d *DurationSetting) Validate(v time.Duration) error { // // For testing usage only. func (d *DurationSetting) Override(ctx context.Context, sv *Values, v time.Duration) { - sv.setInt64(ctx, d.slotIdx, int64(v)) - sv.setDefaultOverrideInt64(d.slotIdx, int64(v)) + sv.setInt64(ctx, d.slot, int64(v)) + sv.setDefaultOverrideInt64(d.slot, int64(v)) } func (d *DurationSetting) set(ctx context.Context, sv *Values, v time.Duration) error { if err := d.Validate(v); err != nil { return err } - sv.setInt64(ctx, d.slotIdx, int64(v)) + sv.setInt64(ctx, d.slot, int64(v)) return nil } func (d *DurationSetting) setToDefault(ctx context.Context, sv *Values) { // See if the default value was overridden. - ok, val, _ := sv.getDefaultOverride(d.slotIdx) + ok, val, _ := sv.getDefaultOverride(d.slot) if ok { // As per the semantics of override, these values don't go through // validation. diff --git a/pkg/settings/float.go b/pkg/settings/float.go index 0a1bc5b841bd..5571440940a9 100644 --- a/pkg/settings/float.go +++ b/pkg/settings/float.go @@ -30,7 +30,7 @@ var _ internalSetting = &FloatSetting{} // Get retrieves the float value in the setting. func (f *FloatSetting) Get(sv *Values) float64 { - return math.Float64frombits(uint64(sv.getInt64(f.slotIdx))) + return math.Float64frombits(uint64(sv.getInt64(f.slot))) } func (f *FloatSetting) String(sv *Values) string { @@ -68,7 +68,7 @@ func (f *FloatSetting) Override(ctx context.Context, sv *Values, v float64) { if err := f.set(ctx, sv, v); err != nil { panic(err) } - sv.setDefaultOverrideInt64(f.slotIdx, int64(math.Float64bits(v))) + sv.setDefaultOverrideInt64(f.slot, int64(math.Float64bits(v))) } // Validate that a value conforms with the validation function. @@ -85,13 +85,13 @@ func (f *FloatSetting) set(ctx context.Context, sv *Values, v float64) error { if err := f.Validate(v); err != nil { return err } - sv.setInt64(ctx, f.slotIdx, int64(math.Float64bits(v))) + sv.setInt64(ctx, f.slot, int64(math.Float64bits(v))) return nil } func (f *FloatSetting) setToDefault(ctx context.Context, sv *Values) { // See if the default value was overridden. - ok, val, _ := sv.getDefaultOverride(f.slotIdx) + ok, val, _ := sv.getDefaultOverride(f.slot) if ok { // As per the semantics of override, these values don't go through // validation. diff --git a/pkg/settings/int.go b/pkg/settings/int.go index 1fd73e71eaa1..21c07cd31eef 100644 --- a/pkg/settings/int.go +++ b/pkg/settings/int.go @@ -29,7 +29,7 @@ var _ numericSetting = &IntSetting{} // Get retrieves the int value in the setting. func (i *IntSetting) Get(sv *Values) int64 { - return sv.container.getInt64(i.slotIdx) + return sv.container.getInt64(i.slot) } func (i *IntSetting) String(sv *Values) string { @@ -74,21 +74,21 @@ func (i *IntSetting) Validate(v int64) error { // // For testing usage only. func (i *IntSetting) Override(ctx context.Context, sv *Values, v int64) { - sv.setInt64(ctx, i.slotIdx, v) - sv.setDefaultOverrideInt64(i.slotIdx, v) + sv.setInt64(ctx, i.slot, v) + sv.setDefaultOverrideInt64(i.slot, v) } func (i *IntSetting) set(ctx context.Context, sv *Values, v int64) error { if err := i.Validate(v); err != nil { return err } - sv.setInt64(ctx, i.slotIdx, v) + sv.setInt64(ctx, i.slot, v) return nil } func (i *IntSetting) setToDefault(ctx context.Context, sv *Values) { // See if the default value was overridden. - ok, val, _ := sv.getDefaultOverride(i.slotIdx) + ok, val, _ := sv.getDefaultOverride(i.slot) if ok { // As per the semantics of override, these values don't go through // validation. diff --git a/pkg/settings/registry.go b/pkg/settings/registry.go index 9bef2a05f42b..396b945eb5c4 100644 --- a/pkg/settings/registry.go +++ b/pkg/settings/registry.go @@ -27,6 +27,10 @@ import ( // read concurrently by different callers. var registry = make(map[string]internalSetting) +// slotTable stores the same settings as the registry, but accessible by the +// slot index. +var slotTable [MaxSettings]internalSetting + // TestingSaveRegistry can be used in tests to save/restore the current // contents of the registry. func TestingSaveRegistry() func() { @@ -142,19 +146,23 @@ func register(class Class, key, desc string, s internalSetting) { )) } } + slot := slotIdx(len(registry)) + s.init(class, key, desc, slot) registry[key] = s - slotIdx := len(registry) - s.init(class, slotIdx, key, desc) + slotTable[slot] = s } // NumRegisteredSettings returns the number of registered settings. func NumRegisteredSettings() int { return len(registry) } // Keys returns a sorted string array with all the known keys. -func Keys() (res []string) { +func Keys(forSystemTenant bool) (res []string) { res = make([]string, 0, len(registry)) - for k := range registry { - if registry[k].isRetired() { + for k, v := range registry { + if v.isRetired() { + continue + } + if !forSystemTenant && v.Class() == SystemOnly { continue } res = append(res, k) @@ -166,13 +174,18 @@ func Keys() (res []string) { // Lookup returns a Setting by name along with its description. // For non-reportable setting, it instantiates a MaskedSetting // to masquerade for the underlying setting. -func Lookup(name string, purpose LookupPurpose) (Setting, bool) { - v, ok := registry[name] - var setting Setting = v - if ok && purpose == LookupForReporting && !v.isReportable() { - setting = &MaskedSetting{setting: v} +func Lookup(name string, purpose LookupPurpose, forSystemTenant bool) (Setting, bool) { + s, ok := registry[name] + if !ok { + return nil, false } - return setting, ok + if !forSystemTenant && s.Class() == SystemOnly { + return nil, false + } + if purpose == LookupForReporting && !s.isReportable() { + return &MaskedSetting{setting: s}, true + } + return s, true } // LookupPurpose indicates what is being done with the setting. @@ -188,6 +201,10 @@ const ( LookupForLocalAccess ) +// ForSystemTenant can be passed to Lookup for code that runs only on the system +// tenant. +const ForSystemTenant = true + // ReadableTypes maps our short type identifiers to friendlier names. var ReadableTypes = map[string]string{ "s": "string", @@ -204,8 +221,8 @@ var ReadableTypes = map[string]string{ // RedactedValue returns a string representation of the value for settings // types the are not considered sensitive (numbers, bools, etc) or // for those with values could store sensitive things (i.e. strings). -func RedactedValue(name string, values *Values) string { - if setting, ok := Lookup(name, LookupForReporting); ok { +func RedactedValue(name string, values *Values, forSystemTenant bool) string { + if setting, ok := Lookup(name, LookupForReporting, forSystemTenant); ok { return setting.String(values) } return "" diff --git a/pkg/settings/setting.go b/pkg/settings/setting.go index a9b74673e9a9..db8f777c3c20 100644 --- a/pkg/settings/setting.go +++ b/pkg/settings/setting.go @@ -22,6 +22,12 @@ import ( // multiple "instances" (values) for each setting (e.g. for multiple test // servers in the same process). type Setting interface { + // Class returns the scope of the setting in multi-tenant scenarios. + Class() Class + + // Key returns the name of the specific cluster setting. + Key() string + // Typ returns the short (1 char) string denoting the type of setting. Typ() string @@ -30,9 +36,6 @@ type Setting interface { // CLUSTER SETTING `. String(sv *Values) string - // Key returns the name of the specific cluster setting. - Key() string - // Description contains a helpful text explaining what the specific cluster // setting is for. Description() string @@ -41,9 +44,6 @@ type Setting interface { // Reserved settings are still accessible to users, but they don't get listed // out when retrieving all settings. Visibility() Visibility - - // Class returns the scope of the setting in multi-tenant scenarios. - Class() Class } // NonMaskedSetting is the exported interface of non-masked settings. diff --git a/pkg/settings/settings_test.go b/pkg/settings/settings_test.go index c7c9100cd6c2..9e0af4ad256c 100644 --- a/pkg/settings/settings_test.go +++ b/pkg/settings/settings_test.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" @@ -152,11 +153,11 @@ var strFooA = settings.RegisterStringSetting(settings.TenantWritable, "str.foo", var strBarA = settings.RegisterStringSetting(settings.SystemOnly, "str.bar", "desc", "bar") var i1A = settings.RegisterIntSetting(settings.TenantWritable, "i.1", "desc", 0) var i2A = settings.RegisterIntSetting(settings.TenantWritable, "i.2", "desc", 5) -var fA = settings.RegisterFloatSetting(settings.TenantWritable, "f", "desc", 5.4) +var fA = settings.RegisterFloatSetting(settings.TenantReadOnly, "f", "desc", 5.4) var dA = settings.RegisterDurationSetting(settings.TenantWritable, "d", "desc", time.Second) var duA = settings.RegisterPublicDurationSettingWithExplicitUnit(settings.TenantWritable, "d_with_explicit_unit", "desc", time.Second, settings.NonNegativeDuration) var _ = settings.RegisterDurationSetting(settings.TenantWritable, "d_with_maximum", "desc", time.Second, settings.NonNegativeDurationWithMaximum(time.Hour)) -var eA = settings.RegisterEnumSetting(settings.TenantWritable, "e", "desc", "foo", map[int64]string{1: "foo", 2: "bar", 3: "baz"}) +var eA = settings.RegisterEnumSetting(settings.SystemOnly, "e", "desc", "foo", map[int64]string{1: "foo", 2: "bar", 3: "baz"}) var byteSize = settings.RegisterByteSizeSetting(settings.TenantWritable, "zzz", "desc", mb) var mA = func() *settings.VersionSetting { s := settings.MakeVersionSetting(&dummyVersionSettingImpl{}) @@ -346,36 +347,34 @@ func TestCache(t *testing.T) { // doesn't have one and it would crash. }) - t.Run("lookup", func(t *testing.T) { - if actual, ok := settings.Lookup("i.1", settings.LookupForLocalAccess); !ok || i1A != actual { - t.Fatalf("expected %v, got %v (exists: %v)", i1A, actual, ok) - } - if actual, ok := settings.Lookup("i.Val", settings.LookupForLocalAccess); !ok || iVal != actual { - t.Fatalf("expected %v, got %v (exists: %v)", iVal, actual, ok) - } - if actual, ok := settings.Lookup("f", settings.LookupForLocalAccess); !ok || fA != actual { - t.Fatalf("expected %v, got %v (exists: %v)", fA, actual, ok) - } - if actual, ok := settings.Lookup("fVal", settings.LookupForLocalAccess); !ok || fVal != actual { - t.Fatalf("expected %v, got %v (exists: %v)", fVal, actual, ok) - } - if actual, ok := settings.Lookup("d", settings.LookupForLocalAccess); !ok || dA != actual { - t.Fatalf("expected %v, got %v (exists: %v)", dA, actual, ok) - } - if actual, ok := settings.Lookup("dVal", settings.LookupForLocalAccess); !ok || dVal != actual { - t.Fatalf("expected %v, got %v (exists: %v)", dVal, actual, ok) - } - if actual, ok := settings.Lookup("e", settings.LookupForLocalAccess); !ok || eA != actual { - t.Fatalf("expected %v, got %v (exists: %v)", eA, actual, ok) - } - if actual, ok := settings.Lookup("v.1", settings.LookupForLocalAccess); !ok || mA != actual { - t.Fatalf("expected %v, got %v (exists: %v)", mA, actual, ok) + t.Run("lookup-system", func(t *testing.T) { + for _, s := range []settings.Setting{i1A, iVal, fA, fVal, dA, dVal, eA, mA, duA} { + result, ok := settings.Lookup(s.Key(), settings.LookupForLocalAccess, settings.ForSystemTenant) + if !ok { + t.Fatalf("lookup(%s) failed", s.Key()) + } + if result != s { + t.Fatalf("expected %v, got %v", s, result) + } } - if actual, ok := settings.Lookup("d_with_explicit_unit", settings.LookupForLocalAccess); !ok || duA != actual { - t.Fatalf("expected %v, got %v (exists: %v)", duA, actual, ok) + }) + t.Run("lookup-tenant", func(t *testing.T) { + for _, s := range []settings.Setting{i1A, fA, dA, duA} { + result, ok := settings.Lookup(s.Key(), settings.LookupForLocalAccess, false /* forSystemTenant */) + if !ok { + t.Fatalf("lookup(%s) failed", s.Key()) + } + if result != s { + t.Fatalf("expected %v, got %v", s, result) + } } - if actual, ok := settings.Lookup("dne", settings.LookupForLocalAccess); ok { - t.Fatalf("expected nothing, got %v", actual) + }) + t.Run("lookup-tenant-fail", func(t *testing.T) { + for _, s := range []settings.Setting{iVal, fVal, dVal, eA, mA} { + _, ok := settings.Lookup(s.Key(), settings.LookupForLocalAccess, false /* forSystemTenant */) + if ok { + t.Fatalf("lookup(%s) should have failed", s.Key()) + } } }) @@ -687,10 +686,14 @@ func TestCache(t *testing.T) { } func TestIsReportable(t *testing.T) { - if v, ok := settings.Lookup("bool.t", settings.LookupForLocalAccess); !ok || !settings.TestingIsReportable(v) { + if v, ok := settings.Lookup( + "bool.t", settings.LookupForLocalAccess, settings.ForSystemTenant, + ); !ok || !settings.TestingIsReportable(v) { t.Errorf("expected 'bool.t' to be marked as isReportable() = true") } - if v, ok := settings.Lookup("sekretz", settings.LookupForLocalAccess); !ok || settings.TestingIsReportable(v) { + if v, ok := settings.Lookup( + "sekretz", settings.LookupForLocalAccess, settings.ForSystemTenant, + ); !ok || settings.TestingIsReportable(v) { t.Errorf("expected 'sekretz' to be marked as isReportable() = false") } } @@ -707,7 +710,7 @@ func TestOnChangeWithMaxSettings(t *testing.T) { sv := &settings.Values{} sv.Init(ctx, settings.TestOpaque) var changes int - s, ok := settings.Lookup(maxName, settings.LookupForLocalAccess) + s, ok := settings.Lookup(maxName, settings.LookupForLocalAccess, settings.ForSystemTenant) if !ok { t.Fatalf("expected lookup of %s to succeed", maxName) } @@ -798,6 +801,31 @@ func TestOverride(t *testing.T) { require.Equal(t, 42.0, overrideFloat.Get(sv)) } +func TestSystemOnlyDisallowedOnTenant(t *testing.T) { + skip.UnderNonTestBuild(t) + + ctx := context.Background() + sv := &settings.Values{} + sv.Init(ctx, settings.TestOpaque) + sv.SetNonSystemTenant() + + // Check that we can still read non-SystemOnly settings. + if expected, actual := "", strFooA.Get(sv); expected != actual { + t.Fatalf("expected %v, got %v", expected, actual) + } + + func() { + defer func() { + if r := recover(); r == nil { + t.Error("Get did not panic") + } else if !strings.Contains(fmt.Sprint(r), "attempted to set forbidden setting") { + t.Errorf("received unexpected panic: %v", r) + } + }() + strBarA.Get(sv) + }() +} + func setDummyVersion(dv dummyVersion, vs *settings.VersionSetting, sv *settings.Values) error { // This is a bit round about because the VersionSetting doesn't get updated // through the updater, like most other settings. In order to set it, we set diff --git a/pkg/settings/string.go b/pkg/settings/string.go index 2080f44c0e19..8447c0cef7ee 100644 --- a/pkg/settings/string.go +++ b/pkg/settings/string.go @@ -56,7 +56,7 @@ var _ = (*StringSetting).Default // Get retrieves the string value in the setting. func (s *StringSetting) Get(sv *Values) string { - loaded := sv.getGeneric(s.slotIdx) + loaded := sv.getGeneric(s.slot) if loaded == nil { return "" } @@ -84,7 +84,7 @@ func (s *StringSetting) set(ctx context.Context, sv *Values, v string) error { return err } if s.Get(sv) != v { - sv.setGeneric(ctx, s.slotIdx, v) + sv.setGeneric(ctx, s.slot, v) } return nil } diff --git a/pkg/settings/updater.go b/pkg/settings/updater.go index 12016a41e15c..9499c809ad09 100644 --- a/pkg/settings/updater.go +++ b/pkg/settings/updater.go @@ -136,6 +136,10 @@ func (u updater) Set(ctx context.Context, key, rawValue string, vt string) error // ResetRemaining sets all settings not updated by the updater to their default values. func (u updater) ResetRemaining(ctx context.Context) { for k, v := range registry { + if u.sv.NonSystemTenant() && v.Class() == SystemOnly { + // Don't try to reset system settings on a non-system tenant. + continue + } if _, ok := u.m[k]; !ok { v.setToDefault(ctx, u.sv) } diff --git a/pkg/settings/values.go b/pkg/settings/values.go index cebc61f0570a..dae86127e37e 100644 --- a/pkg/settings/values.go +++ b/pkg/settings/values.go @@ -14,12 +14,14 @@ import ( "context" "sync/atomic" + "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/cockroach/pkg/util/syncutil" + "github.com/cockroachdb/errors" ) // MaxSettings is the maximum number of settings that the system supports. // Exported for tests. -const MaxSettings = 512 +const MaxSettings = 511 // Values is a container that stores values for all registered settings. // Each setting is assigned a unique slot (up to MaxSettings). @@ -28,13 +30,15 @@ const MaxSettings = 512 type Values struct { container valuesContainer + nonSystemTenant bool + overridesMu struct { syncutil.Mutex // defaultOverrides maintains the set of overridden default values (see // Override()). defaultOverrides valuesContainer // setOverrides is the list of slots with values in defaultOverrides. - setOverrides map[int]struct{} + setOverrides map[slotIdx]struct{} } changeMu struct { @@ -48,17 +52,53 @@ type Values struct { opaque interface{} } +const numSlots = MaxSettings + 1 + type valuesContainer struct { - intVals [MaxSettings]int64 - genericVals [MaxSettings]atomic.Value + intVals [numSlots]int64 + genericVals [numSlots]atomic.Value + + // If forbidden[slot] is true, that setting is not allowed to be used from the + // current context (i.e. it is a SystemOnly setting and the container is for a + // tenant). Reading or writing such a setting causes panics in test builds. + forbidden [numSlots]bool } -func (c *valuesContainer) setGenericVal(slotIdx int, newVal interface{}) { - c.genericVals[slotIdx].Store(newVal) +func (c *valuesContainer) setGenericVal(slot slotIdx, newVal interface{}) { + if !c.checkForbidden(slot) { + return + } + c.genericVals[slot].Store(newVal) } -func (c *valuesContainer) setInt64Val(slotIdx int, newVal int64) bool { - return atomic.SwapInt64(&c.intVals[slotIdx], newVal) != newVal +func (c *valuesContainer) setInt64Val(slot slotIdx, newVal int64) (changed bool) { + if !c.checkForbidden(slot) { + return false + } + return atomic.SwapInt64(&c.intVals[slot], newVal) != newVal +} + +func (c *valuesContainer) getInt64(slot slotIdx) int64 { + c.checkForbidden(slot) + return atomic.LoadInt64(&c.intVals[slot]) +} + +func (c *valuesContainer) getGeneric(slot slotIdx) interface{} { + c.checkForbidden(slot) + return c.genericVals[slot].Load() +} + +// checkForbidden checks if the setting in the given slot is allowed to be used +// from the current context. If not, it panics in test builds and returns false +// in non-test builds. +func (c *valuesContainer) checkForbidden(slot slotIdx) bool { + if c.forbidden[slot] { + if buildutil.CrdbTestBuild { + panic(errors.AssertionFailedf("attempted to set forbidden setting %s", slotTable[slot].Key())) + } + return false + } + return true } type testOpaqueType struct{} @@ -78,49 +118,58 @@ func (sv *Values) Init(ctx context.Context, opaque interface{}) { } } +// SetNonSystemTenant marks this container as pertaining to a non-system tenant, +// after which use of SystemOnly values is disallowed. +func (sv *Values) SetNonSystemTenant() { + sv.nonSystemTenant = true + for slot, setting := range slotTable { + if setting != nil && setting.Class() == SystemOnly { + sv.container.forbidden[slot] = true + } + } +} + +// NonSystemTenant returns true if this container is for a non-system tenant +// (i.e. SetNonSystemTenant() was called). +func (sv *Values) NonSystemTenant() bool { + return sv.nonSystemTenant +} + // Opaque returns the argument passed to Init. func (sv *Values) Opaque() interface{} { return sv.opaque } -func (sv *Values) settingChanged(ctx context.Context, slotIdx int) { +func (sv *Values) settingChanged(ctx context.Context, slot slotIdx) { sv.changeMu.Lock() - funcs := sv.changeMu.onChange[slotIdx-1] + funcs := sv.changeMu.onChange[slot] sv.changeMu.Unlock() for _, fn := range funcs { fn(ctx) } } -func (c *valuesContainer) getInt64(slotIdx int) int64 { - return atomic.LoadInt64(&c.intVals[slotIdx-1]) -} - -func (c *valuesContainer) getGeneric(slotIdx int) interface{} { - return c.genericVals[slotIdx-1].Load() -} - -func (sv *Values) setInt64(ctx context.Context, slotIdx int, newVal int64) { - if sv.container.setInt64Val(slotIdx-1, newVal) { - sv.settingChanged(ctx, slotIdx) +func (sv *Values) setInt64(ctx context.Context, slot slotIdx, newVal int64) { + if sv.container.setInt64Val(slot, newVal) { + sv.settingChanged(ctx, slot) } } // setDefaultOverrideInt64 overrides the default value for the respective // setting to newVal. -func (sv *Values) setDefaultOverrideInt64(slotIdx int, newVal int64) { +func (sv *Values) setDefaultOverrideInt64(slot slotIdx, newVal int64) { sv.overridesMu.Lock() defer sv.overridesMu.Unlock() - sv.overridesMu.defaultOverrides.setInt64Val(slotIdx-1, newVal) - sv.setDefaultOverrideLocked(slotIdx) + sv.overridesMu.defaultOverrides.setInt64Val(slot, newVal) + sv.setDefaultOverrideLocked(slot) } -// setDefaultOverrideLocked marks slotIdx-1 as having an overridden default value. -func (sv *Values) setDefaultOverrideLocked(slotIdx int) { +// setDefaultOverrideLocked marks the slot as having an overridden default value. +func (sv *Values) setDefaultOverrideLocked(slot slotIdx) { if sv.overridesMu.setOverrides == nil { - sv.overridesMu.setOverrides = make(map[int]struct{}) + sv.overridesMu.setOverrides = make(map[slotIdx]struct{}) } - sv.overridesMu.setOverrides[slotIdx-1] = struct{}{} + sv.overridesMu.setOverrides[slot] = struct{}{} } // getDefaultOverrides checks whether there's a default override for slotIdx-1. @@ -128,36 +177,35 @@ func (sv *Values) setDefaultOverrideLocked(slotIdx int) { // true, the second is the int64 override and the last is a pointer to the // generic value override. Callers are expected to only use the override value // corresponding to their setting type. -func (sv *Values) getDefaultOverride(slotIdx int) (bool, int64, *atomic.Value) { - slotIdx-- +func (sv *Values) getDefaultOverride(slot slotIdx) (bool, int64, *atomic.Value) { sv.overridesMu.Lock() defer sv.overridesMu.Unlock() - if _, ok := sv.overridesMu.setOverrides[slotIdx]; !ok { + if _, ok := sv.overridesMu.setOverrides[slot]; !ok { return false, 0, nil } return true, - sv.overridesMu.defaultOverrides.intVals[slotIdx], - &sv.overridesMu.defaultOverrides.genericVals[slotIdx] + sv.overridesMu.defaultOverrides.intVals[slot], + &sv.overridesMu.defaultOverrides.genericVals[slot] } -func (sv *Values) setGeneric(ctx context.Context, slotIdx int, newVal interface{}) { - sv.container.setGenericVal(slotIdx-1, newVal) - sv.settingChanged(ctx, slotIdx) +func (sv *Values) setGeneric(ctx context.Context, slot slotIdx, newVal interface{}) { + sv.container.setGenericVal(slot, newVal) + sv.settingChanged(ctx, slot) } -func (sv *Values) getInt64(slotIdx int) int64 { - return sv.container.getInt64(slotIdx) +func (sv *Values) getInt64(slot slotIdx) int64 { + return sv.container.getInt64(slot) } -func (sv *Values) getGeneric(slotIdx int) interface{} { - return sv.container.getGeneric(slotIdx) +func (sv *Values) getGeneric(slot slotIdx) interface{} { + return sv.container.getGeneric(slot) } // setOnChange installs a callback to be called when a setting's value changes. // `fn` should avoid doing long-running or blocking work as it is called on the // goroutine which handles all settings updates. -func (sv *Values) setOnChange(slotIdx int, fn func(ctx context.Context)) { +func (sv *Values) setOnChange(slot slotIdx, fn func(ctx context.Context)) { sv.changeMu.Lock() - sv.changeMu.onChange[slotIdx-1] = append(sv.changeMu.onChange[slotIdx-1], fn) + sv.changeMu.onChange[slot] = append(sv.changeMu.onChange[slot], fn) sv.changeMu.Unlock() } diff --git a/pkg/settings/version.go b/pkg/settings/version.go index 0c02f21ac662..d971e90aafc5 100644 --- a/pkg/settings/version.go +++ b/pkg/settings/version.go @@ -140,19 +140,19 @@ func (v *VersionSetting) EncodedDefault() string { func (v *VersionSetting) Get(sv *Values) string { encV := v.GetInternal(sv) if encV == nil { - panic(fmt.Sprintf("missing value for version setting in slot %d", v.getSlotIdx())) + panic(fmt.Sprintf("missing value for version setting in slot %d", v.slot)) } return string(encV.([]byte)) } // GetInternal returns the setting's current value. func (v *VersionSetting) GetInternal(sv *Values) interface{} { - return sv.getGeneric(v.getSlotIdx()) + return sv.getGeneric(v.slot) } // SetInternal updates the setting's value in the provided Values container. func (v *VersionSetting) SetInternal(ctx context.Context, sv *Values, newVal interface{}) { - sv.setGeneric(ctx, v.getSlotIdx(), newVal) + sv.setGeneric(ctx, v.slot, newVal) } // setToDefault is part of the extendingSetting interface. This is a no-op for diff --git a/pkg/spanconfig/spanconfig.go b/pkg/spanconfig/spanconfig.go index 111956cb98ba..9d6aa5091cfd 100644 --- a/pkg/spanconfig/spanconfig.go +++ b/pkg/spanconfig/spanconfig.go @@ -160,25 +160,24 @@ type Reconciler interface { // timestamp. If it does not find MVCC history going far back enough[1], it // falls back to a scan of all descriptors and zone configs before being // able to do more incremental work. The provided callback is invoked - // with timestamps that can be safely checkpointed. A future Reconciliation - // attempt can make use of this timestamp to reduce the amount of necessary - // work (provided the MVCC history is still available). + // whenever incremental progress has been made and a Checkpoint() timestamp + // is available. A future Reconcile() attempt can make use of this timestamp + // to reduce the amount of necessary work (provided the MVCC history is + // still available). // // [1]: It's possible for system.{zones,descriptor} to have been GC-ed away; // think suspended tenants. Reconcile( ctx context.Context, startTS hlc.Timestamp, - callback func(checkpoint hlc.Timestamp) error, + onCheckpoint func() error, ) error -} -// ReconciliationDependencies captures what's needed by the span config -// reconciliation job to perform its task. The job is responsible for -// reconciling a tenant's zone configurations with the clusters span -// configurations. -type ReconciliationDependencies interface { - Reconciler + // Checkpoint returns a timestamp suitable for checkpointing. A future + // Reconcile() attempt can make use of this timestamp to reduce the + // amount of necessary work (provided the MVCC history is + // still available). + Checkpoint() hlc.Timestamp } // Store is a data structure used to store spans and their corresponding diff --git a/pkg/spanconfig/spanconfigjob/job.go b/pkg/spanconfig/spanconfigjob/job.go index 5d73ee97e042..cfadc4d9ef86 100644 --- a/pkg/spanconfig/spanconfigjob/job.go +++ b/pkg/spanconfig/spanconfigjob/job.go @@ -30,7 +30,7 @@ var _ jobs.Resumer = (*resumer)(nil) // Resume implements the jobs.Resumer interface. func (r *resumer) Resume(ctx context.Context, execCtxI interface{}) error { execCtx := execCtxI.(sql.JobExecContext) - rc := execCtx.SpanConfigReconciliationJobDeps() + rc := execCtx.SpanConfigReconciler() // TODO(irfansharif): #73086 bubbles up retryable errors from the // reconciler/underlying watcher in the (very) unlikely event that it's @@ -41,10 +41,10 @@ func (r *resumer) Resume(ctx context.Context, execCtxI interface{}) error { // the job all over again after some time, it's just that the checks for // failed jobs happen infrequently. - if err := rc.Reconcile(ctx, hlc.Timestamp{}, func(checkpoint hlc.Timestamp) error { + if err := rc.Reconcile(ctx, hlc.Timestamp{}, func() error { // TODO(irfansharif): Stash this checkpoint somewhere and use it when // starting back up. - _ = checkpoint + _ = rc.Checkpoint() return nil }); err != nil { return err diff --git a/pkg/spanconfig/spanconfigkvsubscriber/BUILD.bazel b/pkg/spanconfig/spanconfigkvsubscriber/BUILD.bazel index 2790e0aba9fe..be77fc414a90 100644 --- a/pkg/spanconfig/spanconfigkvsubscriber/BUILD.bazel +++ b/pkg/spanconfig/spanconfigkvsubscriber/BUILD.bazel @@ -18,13 +18,11 @@ go_library( "//pkg/spanconfig", "//pkg/spanconfig/spanconfigstore", "//pkg/sql/catalog", - "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/systemschema", - "//pkg/sql/row", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/util/encoding", "//pkg/util/grpcutil", "//pkg/util/hlc", "//pkg/util/log", diff --git a/pkg/spanconfig/spanconfigkvsubscriber/span_config_decoder.go b/pkg/spanconfig/spanconfigkvsubscriber/span_config_decoder.go index 1d244903e8b5..d433ccb3c749 100644 --- a/pkg/spanconfig/spanconfigkvsubscriber/span_config_decoder.go +++ b/pkg/spanconfig/spanconfigkvsubscriber/span_config_decoder.go @@ -14,13 +14,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" - "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" ) @@ -28,26 +26,26 @@ import ( // spanConfigDecoder decodes rows from system.span_configurations. It's not // safe for concurrent use. type spanConfigDecoder struct { - alloc rowenc.DatumAlloc - colIdxMap catalog.TableColMap + alloc tree.DatumAlloc + columns []catalog.Column + decoder valueside.Decoder } // newSpanConfigDecoder instantiates a spanConfigDecoder. func newSpanConfigDecoder() *spanConfigDecoder { + columns := systemschema.SpanConfigurationsTable.PublicColumns() return &spanConfigDecoder{ - colIdxMap: row.ColIDtoRowIndexFromCols( - systemschema.SpanConfigurationsTable.PublicColumns(), - ), + columns: columns, + decoder: valueside.MakeDecoder(columns), } } // decode a span config entry given a KV from the // system.span_configurations table. func (sd *spanConfigDecoder) decode(kv roachpb.KeyValue) (entry roachpb.SpanConfigEntry, _ error) { - tbl := systemschema.SpanConfigurationsTable // First we need to decode the start_key field from the index key. { - types := []*types.T{tbl.PublicColumns()[0].GetType()} + types := []*types.T{sd.columns[0].GetType()} startKeyRow := make([]rowenc.EncDatum, 1) _, matches, _, err := rowenc.DecodeIndexKey(keys.SystemSQLCodec, types, startKeyRow, nil /* colDirs */, kv.Key) if err != nil { @@ -69,40 +67,22 @@ func (sd *spanConfigDecoder) decode(kv roachpb.KeyValue) (entry roachpb.SpanConf errors.AssertionFailedf("missing value for start key: %s", entry.Span.Key) } - // The remaining columns are stored as a family, packed with diff-encoded - // column IDs followed by their values. - { - bytes, err := kv.Value.GetTuple() - if err != nil { - return roachpb.SpanConfigEntry{}, err - } - var colIDDiff uint32 - var lastColID descpb.ColumnID - var res tree.Datum - for len(bytes) > 0 { - _, _, colIDDiff, _, err = encoding.DecodeValueTag(bytes) - if err != nil { - return roachpb.SpanConfigEntry{}, err - } - colID := lastColID + descpb.ColumnID(colIDDiff) - lastColID = colID - if idx, ok := sd.colIdxMap.Get(colID); ok { - res, bytes, err = rowenc.DecodeTableValue(&sd.alloc, tbl.PublicColumns()[idx].GetType(), bytes) - if err != nil { - return roachpb.SpanConfigEntry{}, err - } + // The remaining columns are stored as a family. + bytes, err := kv.Value.GetTuple() + if err != nil { + return roachpb.SpanConfigEntry{}, err + } - switch colID { - case tbl.PublicColumns()[1].GetID(): // end_key - entry.Span.EndKey = []byte(tree.MustBeDBytes(res)) - case tbl.PublicColumns()[2].GetID(): // config - if err := protoutil.Unmarshal([]byte(tree.MustBeDBytes(res)), &entry.Config); err != nil { - return roachpb.SpanConfigEntry{}, err - } - default: - return roachpb.SpanConfigEntry{}, errors.AssertionFailedf("unknown column: %v", colID) - } - } + datums, err := sd.decoder.Decode(&sd.alloc, bytes) + if err != nil { + return roachpb.SpanConfigEntry{}, err + } + if endKey := datums[1]; endKey != tree.DNull { + entry.Span.EndKey = []byte(tree.MustBeDBytes(endKey)) + } + if config := datums[2]; config != tree.DNull { + if err := protoutil.Unmarshal([]byte(tree.MustBeDBytes(config)), &entry.Config); err != nil { + return roachpb.SpanConfigEntry{}, err } } diff --git a/pkg/spanconfig/spanconfigmanager/manager.go b/pkg/spanconfig/spanconfigmanager/manager.go index 6df2708278df..a82bcc466d76 100644 --- a/pkg/spanconfig/spanconfigmanager/manager.go +++ b/pkg/spanconfig/spanconfigmanager/manager.go @@ -31,11 +31,11 @@ import ( // checkReconciliationJobInterval is a cluster setting to control how often we // check if the span config reconciliation job exists. If it's not found, it // will be started. It has no effect unless -// spanconfig.experimental_reconciliation.enabled is configured. For host +// spanconfig.reconciliation_job.enabled is configured. For host // tenants, COCKROACH_EXPERIMENTAL_SPAN_CONFIGS needs to be additionally set. var checkReconciliationJobInterval = settings.RegisterDurationSetting( settings.TenantWritable, - "spanconfig.experimental_reconciliation_job.check_interval", + "spanconfig.reconciliation_job.check_interval", "the frequency at which to check if the span config reconciliation job exists (and to start it if not)", 10*time.Minute, settings.NonNegativeDuration, @@ -45,14 +45,20 @@ var checkReconciliationJobInterval = settings.RegisterDurationSetting( // // For the host tenant it has no effect unless // COCKROACH_EXPERIMENTAL_SPAN_CONFIGS is also set. +// +// TODO(irfansharif): This should be a tenant read-only setting once the work +// for #73349 is completed. var jobEnabledSetting = settings.RegisterBoolSetting( settings.TenantWritable, - "spanconfig.experimental_reconciliation_job.enabled", + "spanconfig.reconciliation_job.enabled", "enable the use of the kv accessor", false) // Manager is the coordinator of the span config subsystem. It ensures that -// there's only one span config reconciliation job for every tenant. It also +// there's only one span config reconciliation job[1] for every tenant. It also // captures all relevant dependencies for the job. +// +// [1]: The reconciliation job is responsible for reconciling a tenant's zone +// configurations with the clusters span configurations. type Manager struct { db *kv.DB jr *jobs.Registry @@ -64,8 +70,6 @@ type Manager struct { spanconfig.Reconciler } -var _ spanconfig.ReconciliationDependencies = &Manager{} - // New constructs a new Manager. func New( db *kv.DB, diff --git a/pkg/spanconfig/spanconfigmanager/manager_test.go b/pkg/spanconfig/spanconfigmanager/manager_test.go index 371fdbbbeca0..9600ebef2bfd 100644 --- a/pkg/spanconfig/spanconfigmanager/manager_test.go +++ b/pkg/spanconfig/spanconfigmanager/manager_test.go @@ -201,7 +201,7 @@ func TestManagerCheckJobConditions(t *testing.T) { ts := tc.Server(0) tdb := sqlutils.MakeSQLRunner(tc.ServerConn(0)) - tdb.Exec(t, `SET CLUSTER SETTING spanconfig.experimental_reconciliation_job.enabled = false;`) + tdb.Exec(t, `SET CLUSTER SETTING spanconfig.reconciliation_job.enabled = false;`) var interceptCount int32 checkInterceptCountGreaterThan := func(min int32) int32 { @@ -233,9 +233,9 @@ func TestManagerCheckJobConditions(t *testing.T) { require.NoError(t, manager.Start(ctx)) currentCount = checkInterceptCountGreaterThan(currentCount) // wait for an initial check - tdb.Exec(t, `SET CLUSTER SETTING spanconfig.experimental_reconciliation_job.enabled = true;`) + tdb.Exec(t, `SET CLUSTER SETTING spanconfig.reconciliation_job.enabled = true;`) currentCount = checkInterceptCountGreaterThan(currentCount) // the job enablement setting triggers a check - tdb.Exec(t, `SET CLUSTER SETTING spanconfig.experimental_reconciliation_job.check_interval = '25m'`) + tdb.Exec(t, `SET CLUSTER SETTING spanconfig.reconciliation_job.check_interval = '25m'`) _ = checkInterceptCountGreaterThan(currentCount) // the job check interval setting triggers a check } diff --git a/pkg/spanconfig/spanconfigreconciler/BUILD.bazel b/pkg/spanconfig/spanconfigreconciler/BUILD.bazel index 48eb36f5a59e..125692ab1058 100644 --- a/pkg/spanconfig/spanconfigreconciler/BUILD.bazel +++ b/pkg/spanconfig/spanconfigreconciler/BUILD.bazel @@ -17,6 +17,7 @@ go_library( "//pkg/sql/catalog/descs", "//pkg/sql/sem/tree", "//pkg/util/hlc", + "//pkg/util/syncutil", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/spanconfig/spanconfigreconciler/reconciler.go b/pkg/spanconfig/spanconfigreconciler/reconciler.go index 943dffdafde0..8e8d19eb6457 100644 --- a/pkg/spanconfig/spanconfigreconciler/reconciler.go +++ b/pkg/spanconfig/spanconfigreconciler/reconciler.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/errors" ) @@ -38,6 +39,11 @@ type Reconciler struct { codec keys.SQLCodec tenID roachpb.TenantID knobs *spanconfig.TestingKnobs + + mu struct { + syncutil.RWMutex + lastCheckpoint hlc.Timestamp + } } var _ spanconfig.Reconciler = &Reconciler{} @@ -122,7 +128,7 @@ func New( // checkpoint. For changes to, say, RANGE DEFAULT, the RPC request proto is // proportional to the number of schema objects. func (r *Reconciler) Reconcile( - ctx context.Context, startTS hlc.Timestamp, callback func(checkpoint hlc.Timestamp) error, + ctx context.Context, startTS hlc.Timestamp, onCheckpoint func() error, ) error { // TODO(irfansharif): Check system.{zones,descriptors} for last GC timestamp // and avoid the full reconciliation pass if the startTS provided is @@ -136,15 +142,20 @@ func (r *Reconciler) Reconcile( codec: r.codec, tenID: r.tenID, } - latestStore, reconciledUpto, err := full.reconcile(ctx) + latestStore, reconciledUpUntil, err := full.reconcile(ctx) if err != nil { return err } - if err := callback(reconciledUpto); err != nil { + r.mu.Lock() + r.mu.lastCheckpoint = reconciledUpUntil + r.mu.Unlock() + + if err := onCheckpoint(); err != nil { return err } + incrementalStartTS := reconciledUpUntil incremental := incrementalReconciler{ sqlTranslator: r.sqlTranslator, sqlWatcher: r.sqlWatcher, @@ -154,7 +165,21 @@ func (r *Reconciler) Reconcile( codec: r.codec, knobs: r.knobs, } - return incremental.reconcile(ctx, reconciledUpto, callback) + return incremental.reconcile(ctx, incrementalStartTS, func(reconciledUpUntil hlc.Timestamp) error { + r.mu.Lock() + r.mu.lastCheckpoint = reconciledUpUntil + r.mu.Unlock() + + return onCheckpoint() + }) +} + +// Checkpoint is part of the spanconfig.Reconciler interface. +func (r *Reconciler) Checkpoint() hlc.Timestamp { + r.mu.RLock() + defer r.mu.RUnlock() + + return r.mu.lastCheckpoint } // fullReconciler is a single-use orchestrator for the full reconciliation diff --git a/pkg/spanconfig/spanconfigsqlwatcher/zonesdecoder.go b/pkg/spanconfig/spanconfigsqlwatcher/zonesdecoder.go index 7d27e77b91d3..ecc05871bb0a 100644 --- a/pkg/spanconfig/spanconfigsqlwatcher/zonesdecoder.go +++ b/pkg/spanconfig/spanconfigsqlwatcher/zonesdecoder.go @@ -24,7 +24,7 @@ import ( // zonesDecoder decodes the zone ID (primary key) of rows from system.zones. // It's not safe for concurrent use. type zonesDecoder struct { - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc codec keys.SQLCodec } diff --git a/pkg/spanconfig/spanconfigstore/store.go b/pkg/spanconfig/spanconfigstore/store.go index 1aa417b7d9b6..fea96b40fa4d 100644 --- a/pkg/spanconfig/spanconfigstore/store.go +++ b/pkg/spanconfig/spanconfigstore/store.go @@ -32,7 +32,7 @@ import ( // is set. var EnabledSetting = settings.RegisterBoolSetting( settings.SystemOnly, - "spanconfig.experimental_store.enabled", + "spanconfig.store.enabled", `use the span config infrastructure in KV instead of the system config span`, false, ) diff --git a/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/cluster.go b/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/cluster.go index aed8435355b7..bca31c7371f4 100644 --- a/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/cluster.go +++ b/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/cluster.go @@ -32,17 +32,21 @@ import ( // cluster while providing convenient, scoped access to each tenant's specific // span config primitives. It's not safe for concurrent use. type Handle struct { - t *testing.T - tc *testcluster.TestCluster - ts map[roachpb.TenantID]*Tenant + t *testing.T + tc *testcluster.TestCluster + ts map[roachpb.TenantID]*Tenant + scKnobs *spanconfig.TestingKnobs } // NewHandle returns a new Handle. -func NewHandle(t *testing.T, tc *testcluster.TestCluster) *Handle { +func NewHandle( + t *testing.T, tc *testcluster.TestCluster, scKnobs *spanconfig.TestingKnobs, +) *Handle { return &Handle{ - t: t, - tc: tc, - ts: make(map[roachpb.TenantID]*Tenant), + t: t, + tc: tc, + ts: make(map[roachpb.TenantID]*Tenant), + scKnobs: scKnobs, } } @@ -56,7 +60,12 @@ func (h *Handle) InitializeTenant(ctx context.Context, tenID roachpb.TenantID) * tenantState.db = sqlutils.MakeSQLRunner(h.tc.ServerConn(0)) tenantState.cleanup = func() {} // noop } else { - tenantArgs := base.TestTenantArgs{TenantID: tenID} + tenantArgs := base.TestTenantArgs{ + TenantID: tenID, + TestingKnobs: base.TestingKnobs{ + SpanConfig: h.scKnobs, + }, + } var err error tenantState.TestTenantInterface, err = testServer.StartTenant(ctx, tenantArgs) require.NoError(h.t, err) diff --git a/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/tenant_state.go b/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/tenant_state.go index 1e84f18a5e81..dc7f7b933c16 100644 --- a/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/tenant_state.go +++ b/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/tenant_state.go @@ -66,9 +66,11 @@ func (s *Tenant) TimestampAfterLastExec() hlc.Timestamp { return s.mu.tsAfterLastExec } -// Checkpoint is used to record a checkpointed timestamp, retrievable via -// LastCheckpoint. -func (s *Tenant) Checkpoint(ts hlc.Timestamp) { +// RecordCheckpoint is used to record the reconciliation checkpoint, retrievable +// via LastCheckpoint. +func (s *Tenant) RecordCheckpoint() { + ts := s.Reconciler().Checkpoint() + s.mu.Lock() defer s.mu.Unlock() s.mu.lastCheckpoint = ts @@ -140,6 +142,7 @@ func (s *Tenant) LookupTableByName( CommonLookupFlags: tree.CommonLookupFlags{ Required: true, IncludeOffline: true, + AvoidLeased: true, }, }, ) @@ -164,6 +167,7 @@ func (s *Tenant) LookupDatabaseByName( tree.DatabaseLookupFlags{ Required: true, IncludeOffline: true, + AvoidLeased: true, }, ) if err != nil { diff --git a/pkg/sql/BUILD.bazel b/pkg/sql/BUILD.bazel index 0cc023ef4494..b0b1a30722ac 100644 --- a/pkg/sql/BUILD.bazel +++ b/pkg/sql/BUILD.bazel @@ -72,7 +72,6 @@ go_library( "distsql_plan_bulk.go", "distsql_plan_ctas.go", "distsql_plan_join.go", - "distsql_plan_scrub_physical.go", "distsql_plan_set_op.go", "distsql_plan_stats.go", "distsql_plan_window.go", @@ -177,7 +176,6 @@ go_library( "scrub_constraint.go", "scrub_fk.go", "scrub_index.go", - "scrub_physical.go", "select_name_resolution.go", "sequence.go", "sequence_select.go", @@ -290,6 +288,7 @@ go_library( "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/catconstants", "//pkg/sql/catalog/catformat", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/catprivilege", "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/dbdesc", @@ -306,6 +305,7 @@ go_library( "//pkg/sql/catalog/typedesc", "//pkg/sql/colexec", "//pkg/sql/colflow", + "//pkg/sql/commenter", "//pkg/sql/contention", "//pkg/sql/covering", "//pkg/sql/delegate", @@ -354,7 +354,6 @@ go_library( "//pkg/sql/schemachanger/scdeps", "//pkg/sql/schemachanger/scerrors", "//pkg/sql/schemachanger/scexec", - "//pkg/sql/schemachanger/scgraphviz", "//pkg/sql/schemachanger/scop", "//pkg/sql/schemachanger/scpb", "//pkg/sql/schemachanger/scplan", @@ -420,7 +419,7 @@ go_library( "//pkg/util/tracing/tracingpb", "//pkg/util/uint128", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_errors//hintdetail", "@com_github_cockroachdb_logtags//:logtags", @@ -446,6 +445,7 @@ go_test( "ambiguous_commit_test.go", "as_of_test.go", "backfill_num_ranges_in_span_test.go", + "backfill_test.go", "builtin_mem_usage_test.go", "builtin_test.go", "comment_on_column_test.go", @@ -582,6 +582,7 @@ go_test( "//pkg/sql/catalog/catalogkeys", "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/catconstants", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", "//pkg/sql/catalog/lease", @@ -610,6 +611,7 @@ go_test( "//pkg/sql/row", "//pkg/sql/rowenc", "//pkg/sql/rowenc/rowencpb", + "//pkg/sql/rowenc/valueside", "//pkg/sql/rowexec", "//pkg/sql/rowinfra", "//pkg/sql/scrub", @@ -667,7 +669,7 @@ go_test( "//pkg/util/tracing", "//pkg/util/tracing/tracingpb", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_cockroach_go_v2//crdb", "@com_github_cockroachdb_datadriven//:datadriven", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/sql/alter_database.go b/pkg/sql/alter_database.go index 2681b0159816..7b6635d45212 100644 --- a/pkg/sql/alter_database.go +++ b/pkg/sql/alter_database.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/multiregion" @@ -206,7 +207,7 @@ func (n *alterDatabaseAddRegionNode) startExec(params runParams) error { if err := params.p.checkRegionIsCurrentlyActive( params.ctx, - descpb.RegionName(n.n.Region), + catpb.RegionName(n.n.Region), ); err != nil { return err } @@ -351,7 +352,7 @@ func (p *planner) AlterDatabaseDropRegion( removingPrimaryRegion := false var toDrop []*typedesc.Mutable - if dbDesc.RegionConfig.PrimaryRegion == descpb.RegionName(n.Region) { + if dbDesc.RegionConfig.PrimaryRegion == catpb.RegionName(n.Region) { removingPrimaryRegion = true typeID, err := dbDesc.MultiRegionEnumID() @@ -519,7 +520,7 @@ func removeLocalityConfigFromAllTablesInDB( } switch t := tbDesc.LocalityConfig.Locality.(type) { - case *descpb.TableDescriptor_LocalityConfig_Global_: + case *catpb.LocalityConfig_Global_: if err := ApplyZoneConfigForMultiRegionTable( ctx, p.txn, @@ -530,7 +531,7 @@ func removeLocalityConfigFromAllTablesInDB( ); err != nil { return err } - case *descpb.TableDescriptor_LocalityConfig_RegionalByTable_: + case *catpb.LocalityConfig_RegionalByTable_: if t.RegionalByTable.Region != nil { // This should error during the type descriptor changes. return errors.AssertionFailedf( @@ -538,7 +539,7 @@ func removeLocalityConfigFromAllTablesInDB( tbDesc.Name, ) } - case *descpb.TableDescriptor_LocalityConfig_RegionalByRow_: + case *catpb.LocalityConfig_RegionalByRow_: // This should error during the type descriptor changes. return errors.AssertionFailedf( "unexpected REGIONAL BY ROW on table %s during DROP REGION", @@ -689,7 +690,7 @@ func (n *alterDatabasePrimaryRegionNode) switchPrimaryRegion(params runParams) e } found := false for _, r := range prevRegionConfig.Regions() { - if r == descpb.RegionName(n.n.PrimaryRegion) { + if r == catpb.RegionName(n.n.PrimaryRegion) { found = true break } @@ -719,7 +720,7 @@ func (n *alterDatabasePrimaryRegionNode) switchPrimaryRegion(params runParams) e // To update the primary region we need to modify the database descriptor, // update the multi-region enum, and write a new zone configuration. - n.desc.RegionConfig.PrimaryRegion = descpb.RegionName(n.n.PrimaryRegion) + n.desc.RegionConfig.PrimaryRegion = catpb.RegionName(n.n.PrimaryRegion) if err := params.p.writeNonDropDatabaseChange( params.ctx, n.desc, @@ -729,7 +730,7 @@ func (n *alterDatabasePrimaryRegionNode) switchPrimaryRegion(params runParams) e } // Update the primary region in the type descriptor, and write it back out. - typeDesc.RegionConfig.PrimaryRegion = descpb.RegionName(n.n.PrimaryRegion) + typeDesc.RegionConfig.PrimaryRegion = catpb.RegionName(n.n.PrimaryRegion) if err := params.p.writeTypeDesc(params.ctx, typeDesc); err != nil { return err } diff --git a/pkg/sql/alter_primary_key.go b/pkg/sql/alter_primary_key.go index ca19413d48e2..0d79cedc328a 100644 --- a/pkg/sql/alter_primary_key.go +++ b/pkg/sql/alter_primary_key.go @@ -15,6 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -171,7 +172,7 @@ func (p *planner) AlterPrimaryKey( // If the new index is requested to be sharded, set up the index descriptor // to be sharded, and add the new shard column if it is missing. if alterPKNode.Sharded != nil { - shardCol, newColumns, newColumn, err := setupShardedIndex( + shardCol, newColumns, err := setupShardedIndex( ctx, p.EvalContext(), &p.semaCtx, @@ -186,15 +187,13 @@ func (p *planner) AlterPrimaryKey( return err } alterPKNode.Columns = newColumns - if newColumn { - if err := p.setupConstraintForShard( - ctx, - tableDesc, - shardCol, - newPrimaryIndexDesc.Sharded.ShardBuckets, - ); err != nil { - return err - } + if err := p.maybeSetupConstraintForShard( + ctx, + tableDesc, + shardCol, + newPrimaryIndexDesc.Sharded.ShardBuckets, + ); err != nil { + return err } telemetry.Inc(sqltelemetry.HashShardedIndexCounter) } @@ -242,15 +241,15 @@ func (p *planner) AlterPrimaryKey( if alterPrimaryKeyLocalitySwap != nil { localityConfigSwap := alterPrimaryKeyLocalitySwap.localityConfigSwap switch to := localityConfigSwap.NewLocalityConfig.Locality.(type) { - case *descpb.TableDescriptor_LocalityConfig_RegionalByRow_: + case *catpb.LocalityConfig_RegionalByRow_: // Check we are migrating from a known locality. switch localityConfigSwap.OldLocalityConfig.Locality.(type) { - case *descpb.TableDescriptor_LocalityConfig_RegionalByRow_: + case *catpb.LocalityConfig_RegionalByRow_: // We want to drop the old PARTITION ALL BY clause in this case for all // the indexes if we were from a REGIONAL BY ROW. dropPartitionAllBy = true - case *descpb.TableDescriptor_LocalityConfig_Global_, - *descpb.TableDescriptor_LocalityConfig_RegionalByTable_: + case *catpb.LocalityConfig_Global_, + *catpb.LocalityConfig_RegionalByTable_: default: return errors.AssertionFailedf( "unknown locality config swap: %T to %T", @@ -281,8 +280,8 @@ func (p *planner) AlterPrimaryKey( *alterPrimaryKeyLocalitySwap.newColumnName, ) } - case *descpb.TableDescriptor_LocalityConfig_Global_, - *descpb.TableDescriptor_LocalityConfig_RegionalByTable_: + case *catpb.LocalityConfig_Global_, + *catpb.LocalityConfig_RegionalByTable_: // We should only migrating from a REGIONAL BY ROW. if localityConfigSwap.OldLocalityConfig.GetRegionalByRow() == nil { return errors.AssertionFailedf( diff --git a/pkg/sql/alter_table.go b/pkg/sql/alter_table.go index 196ec49e92a9..02034c8d1acc 100644 --- a/pkg/sql/alter_table.go +++ b/pkg/sql/alter_table.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" @@ -1126,8 +1127,8 @@ func applyColumnMutation( for _, fk := range tableDesc.OutboundFKs { for _, colID := range fk.OriginColumnIDs { if colID == col.GetID() && - fk.OnUpdate != descpb.ForeignKeyReference_NO_ACTION && - fk.OnUpdate != descpb.ForeignKeyReference_RESTRICT { + fk.OnUpdate != catpb.ForeignKeyAction_NO_ACTION && + fk.OnUpdate != catpb.ForeignKeyAction_RESTRICT { return pgerror.Newf( pgcode.InvalidColumnDefinition, "column %s(%d) cannot have both an ON UPDATE expression and a foreign"+ diff --git a/pkg/sql/alter_table_locality.go b/pkg/sql/alter_table_locality.go index 782e3f3a03b5..014bf2794886 100644 --- a/pkg/sql/alter_table_locality.go +++ b/pkg/sql/alter_table_locality.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" @@ -80,9 +81,11 @@ func (p *planner) AlterTableLocality( } if !dbDesc.IsMultiRegion() { - return nil, pgerror.Newf( + return nil, errors.WithHint(pgerror.Newf( pgcode.InvalidTableDefinition, "cannot alter a table's LOCALITY if its database is not multi-region enabled", + ), + "database must first be multi-region enabled using ALTER DATABASE ... SET PRIMARY REGION ", ) } @@ -385,7 +388,7 @@ func (n *alterTableSetLocalityNode) alterTableLocalityToRegionalByRow( // SET LOCALITY where the before OR after state is REGIONAL BY ROW. func (n *alterTableSetLocalityNode) alterTableLocalityFromOrToRegionalByRow( params runParams, - newLocalityConfig descpb.TableDescriptor_LocalityConfig, + newLocalityConfig catpb.LocalityConfig, mutationIdxAllowedInSameTxn *int, newColumnName *tree.Name, newColumnID *descpb.ColumnID, @@ -473,7 +476,7 @@ func (n *alterTableSetLocalityNode) startExec(params runParams) error { // Look at the existing locality, and implement any changes required to move to // the new locality. switch existingLocality.Locality.(type) { - case *descpb.TableDescriptor_LocalityConfig_Global_: + case *catpb.LocalityConfig_Global_: switch newLocality.LocalityLevel { case tree.LocalityLevelGlobal: if err := n.alterTableLocalityToGlobal(params); err != nil { @@ -493,7 +496,7 @@ func (n *alterTableSetLocalityNode) startExec(params runParams) error { default: return errors.AssertionFailedf("unknown table locality: %v", newLocality) } - case *descpb.TableDescriptor_LocalityConfig_RegionalByTable_: + case *catpb.LocalityConfig_RegionalByTable_: switch newLocality.LocalityLevel { case tree.LocalityLevelGlobal: if err := n.alterTableLocalityToGlobal(params); err != nil { @@ -513,7 +516,7 @@ func (n *alterTableSetLocalityNode) startExec(params runParams) error { default: return errors.AssertionFailedf("unknown table locality: %v", newLocality) } - case *descpb.TableDescriptor_LocalityConfig_RegionalByRow_: + case *catpb.LocalityConfig_RegionalByRow_: explicitColStart := n.tableDesc.PrimaryIndex.Partitioning.NumImplicitColumns switch newLocality.LocalityLevel { case tree.LocalityLevelGlobal: @@ -646,7 +649,7 @@ func setNewLocalityConfig( desc *tabledesc.Mutable, txn *kv.Txn, b *kv.Batch, - config descpb.TableDescriptor_LocalityConfig, + config catpb.LocalityConfig, kvTrace bool, descsCol *descs.Collection, ) error { diff --git a/pkg/sql/as_of_test.go b/pkg/sql/as_of_test.go index e87c95cd3742..fc6586fd8d8d 100644 --- a/pkg/sql/as_of_test.go +++ b/pkg/sql/as_of_test.go @@ -16,7 +16,7 @@ import ( "fmt" "testing" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/kv" diff --git a/pkg/sql/authorization.go b/pkg/sql/authorization.go index 44ec7aa496c0..715f9b0b8640 100644 --- a/pkg/sql/authorization.go +++ b/pkg/sql/authorization.go @@ -766,3 +766,27 @@ func (p *planner) HasOwnershipOnSchema( return hasOwnership, nil } + +func (p *planner) HasViewActivityOrViewActivityRedactedRole(ctx context.Context) (bool, error) { + hasAdmin, err := p.HasAdminRole(ctx) + if err != nil { + return hasAdmin, err + } + if !hasAdmin { + hasViewActivity, err := p.HasRoleOption(ctx, roleoption.VIEWACTIVITY) + if err != nil { + return hasViewActivity, err + } + if !hasViewActivity { + hasViewActivityRedacted, err := p.HasRoleOption(ctx, roleoption.VIEWACTIVITYREDACTED) + if err != nil { + return hasViewActivityRedacted, err + } + if !hasViewActivityRedacted { + return false, nil + } + } + } + + return true, nil +} diff --git a/pkg/sql/backfill.go b/pkg/sql/backfill.go index 5e1143409fec..4aac7b7b1893 100644 --- a/pkg/sql/backfill.go +++ b/pkg/sql/backfill.go @@ -38,7 +38,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/row" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowexec" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -251,19 +250,18 @@ func (sc *SchemaChanger) runBackfill(ctx context.Context) error { addedIndexSpans = append(addedIndexSpans, tableDesc.IndexSpan(sc.execCfg.Codec, idx.GetID())) addedIndexes = append(addedIndexes, idx.GetID()) } else if c := m.AsConstraint(); c != nil { - isValidating := false - if c.IsCheck() { - isValidating = c.Check().Validity == descpb.ConstraintValidity_Validating - } else if c.IsForeignKey() { - isValidating = c.ForeignKey().Validity == descpb.ConstraintValidity_Validating - } else if c.IsUniqueWithoutIndex() { - isValidating = c.UniqueWithoutIndex().Validity == descpb.ConstraintValidity_Validating - } else if c.IsNotNull() { - // NOT NULL constraints are always validated before they can be added - isValidating = true + isValidating := c.IsCheck() && c.Check().Validity == descpb.ConstraintValidity_Validating || + c.IsForeignKey() && c.ForeignKey().Validity == descpb.ConstraintValidity_Validating || + c.IsUniqueWithoutIndex() && c.UniqueWithoutIndex().Validity == descpb.ConstraintValidity_Validating || + c.IsNotNull() + isSkippingValidation, err := shouldSkipConstraintValidation(tableDesc, c) + if err != nil { + return err } if isValidating { constraintsToAddBeforeValidation = append(constraintsToAddBeforeValidation, c) + } + if isValidating && !isSkippingValidation { constraintsToValidate = append(constraintsToValidate, c) } } else if mvRefresh := m.AsMaterializedViewRefresh(); mvRefresh != nil { @@ -362,6 +360,35 @@ func (sc *SchemaChanger) runBackfill(ctx context.Context) error { return nil } +// shouldSkipConstraintValidation checks if a validating constraint should skip +// validation and be added directly. A Check Constraint can skip validation if it's +// created for a shard column internally. +func shouldSkipConstraintValidation( + tableDesc *tabledesc.Mutable, c catalog.ConstraintToUpdate, +) (bool, error) { + if !c.IsCheck() { + return false, nil + } + + check := c.Check() + // The check constraint on shard column is always on the shard column itself. + if len(check.ColumnIDs) != 1 { + return false, nil + } + + checkCol, err := tableDesc.FindColumnWithID(check.ColumnIDs[0]) + if err != nil { + return false, err + } + + // We only want to skip validation when the shard column is first added and + // the constraint is created internally since the shard column computation is + // well defined. Note that we show the shard column in `SHOW CREATE TABLE`, + // and we don't prevent users from adding other constraints on it. For those + // constraints, we still want to validate. + return tableDesc.IsShardColumn(checkCol) && checkCol.Adding(), nil +} + // dropConstraints publishes a new version of the given table descriptor with // the given constraint removed from it, and waits until the entire cluster is // on the new version of the table descriptor. It returns the new table descs. @@ -647,7 +674,7 @@ func (sc *SchemaChanger) validateConstraints( } if fn := sc.testingKnobs.RunBeforeConstraintValidation; fn != nil { - if err := fn(); err != nil { + if err := fn(constraints); err != nil { return err } } @@ -766,7 +793,7 @@ func (sc *SchemaChanger) truncateIndexes( ) error { log.Infof(ctx, "clearing data for %d indexes", len(dropped)) - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} droppedIndexIDs := make([]uint32, len(dropped)) for i, idx := range dropped { droppedIndexIDs[i] = uint32(idx.GetID()) @@ -2411,7 +2438,7 @@ func indexTruncateInTxn( idx catalog.Index, traceKV bool, ) error { - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} var sp roachpb.Span for done := false; !done; done = sp.Key == nil { internal := evalCtx.SessionData().Internal diff --git a/pkg/sql/backfill/backfill.go b/pkg/sql/backfill/backfill.go index 3d436d0f7fbd..ff5c9c699c8e 100644 --- a/pkg/sql/backfill/backfill.go +++ b/pkg/sql/backfill/backfill.go @@ -78,7 +78,7 @@ type ColumnBackfiller struct { evalCtx *tree.EvalContext fetcher row.Fetcher - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc // mon is a memory monitor linked with the ColumnBackfiller on creation. mon *mon.BytesMonitor @@ -156,8 +156,7 @@ func (cb *ColumnBackfiller) init( false, /* reverse */ descpb.ScanLockingStrength_FOR_NONE, descpb.ScanLockingWaitPolicy_BLOCK, - 0, /* lockTimeout */ - false, /* isCheck */ + 0, /* lockTimeout */ &cb.alloc, cb.mon, tableArgs, @@ -214,7 +213,7 @@ func (cb *ColumnBackfiller) InitForDistributedUse( // Install type metadata in the target descriptors, as well as resolve any // user defined types in the column expressions. if err := flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - resolver := flowCtx.TypeResolverFactory.NewTypeResolver(txn) + resolver := flowCtx.NewTypeResolver(txn) // Hydrate all the types present in the table. if err := typedesc.HydrateTypesInTableDescriptor(ctx, desc.TableDesc(), &resolver); err != nil { return err @@ -248,7 +247,7 @@ func (cb *ColumnBackfiller) InitForDistributedUse( // Release leases on any accessed types now that type metadata is installed. // We do this so that leases on any accessed types are not held for the // entire backfill process. - flowCtx.TypeResolverFactory.Descriptors.ReleaseAll(ctx) + flowCtx.Descriptors.ReleaseAll(ctx) rowMetrics := flowCtx.GetRowMetrics() return cb.init(evalCtx, defaultExprs, computedExprs, desc, mon, rowMetrics) @@ -450,7 +449,7 @@ type IndexBackfiller struct { valNeededForCol util.FastIntSet - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc // mon is a memory monitor linked with the IndexBackfiller on creation. mon *mon.BytesMonitor @@ -627,7 +626,7 @@ func (ib *IndexBackfiller) InitForDistributedUse( // Install type metadata in the target descriptors, as well as resolve any // user defined types in partial index predicate expressions. if err := flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - resolver := flowCtx.TypeResolverFactory.NewTypeResolver(txn) + resolver := flowCtx.NewTypeResolver(txn) // Hydrate all the types present in the table. if err = typedesc.HydrateTypesInTableDescriptor( ctx, desc.TableDesc(), &resolver, @@ -648,7 +647,7 @@ func (ib *IndexBackfiller) InitForDistributedUse( // Release leases on any accessed types now that type metadata is installed. // We do this so that leases on any accessed types are not held for the // entire backfill process. - flowCtx.TypeResolverFactory.Descriptors.ReleaseAll(ctx) + flowCtx.Descriptors.ReleaseAll(ctx) // Add the columns referenced in the predicate to valNeededForCol so that // columns necessary to evaluate the predicate expression are fetched. @@ -839,8 +838,7 @@ func (ib *IndexBackfiller) BuildIndexEntriesChunk( false, /* reverse */ descpb.ScanLockingStrength_FOR_NONE, descpb.ScanLockingWaitPolicy_BLOCK, - 0, /* lockTimeout */ - false, /* isCheck */ + 0, /* lockTimeout */ &ib.alloc, ib.mon, tableArgs, diff --git a/pkg/sql/backfill_test.go b/pkg/sql/backfill_test.go new file mode 100644 index 000000000000..b5e9ad9f6bcc --- /dev/null +++ b/pkg/sql/backfill_test.go @@ -0,0 +1,184 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package sql + +import ( + "testing" + + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/stretchr/testify/require" +) + +// constraintToUpdateForTest implements the catalog.ConstraintToUpdate interface. +// It's only used for testing +type constraintToUpdateForTest struct { + catalog.ConstraintToUpdate + desc *descpb.ConstraintToUpdate +} + +// IsCheck returns true iff this is an update for a check constraint. +func (c constraintToUpdateForTest) IsCheck() bool { + return c.desc.ConstraintType == descpb.ConstraintToUpdate_CHECK +} + +// Check returns the underlying check constraint, if there is one. +func (c constraintToUpdateForTest) Check() descpb.TableDescriptor_CheckConstraint { + return c.desc.Check +} + +func TestShouldSkipConstraintValidation(t *testing.T) { + defer leaktest.AfterTest(t)() + + tableDesc := &tabledesc.Mutable{} + tableDesc.TableDescriptor = descpb.TableDescriptor{ + ID: 2, + ParentID: 1, + Name: "foo", + FormatVersion: descpb.InterleavedFormatVersion, + Columns: []descpb.ColumnDescriptor{ + {ID: 1, Name: "c1"}, + }, + Families: []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary", ColumnIDs: []descpb.ColumnID{1, 2}, ColumnNames: []string{"c1", "c2"}}, + }, + PrimaryIndex: descpb.IndexDescriptor{ + ID: 1, Name: "pri", KeyColumnIDs: []descpb.ColumnID{1}, + KeyColumnNames: []string{"c1"}, + KeyColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC}, + EncodingType: descpb.PrimaryIndexEncoding, + Version: descpb.LatestPrimaryIndexDescriptorVersion, + }, + Mutations: []descpb.DescriptorMutation{ + { + Descriptor_: &descpb.DescriptorMutation_Index{ + Index: &descpb.IndexDescriptor{ + ID: 2, Name: "new_hash_index", KeyColumnIDs: []descpb.ColumnID{2, 3}, + KeyColumnNames: []string{"c2", "c3"}, + KeyColumnDirections: []descpb.IndexDescriptor_Direction{ + descpb.IndexDescriptor_ASC, + descpb.IndexDescriptor_ASC, + }, + EncodingType: descpb.PrimaryIndexEncoding, + Version: descpb.LatestPrimaryIndexDescriptorVersion, + Sharded: catpb.ShardedDescriptor{ + IsSharded: true, + Name: "c3", + ShardBuckets: 8, + ColumnNames: []string{"c2"}, + }, + }, + }, + Direction: descpb.DescriptorMutation_ADD, + }, + { + Descriptor_: &descpb.DescriptorMutation_Column{ + Column: &descpb.ColumnDescriptor{ + ID: 2, + Name: "c2", + Virtual: true, + }, + }, + Direction: descpb.DescriptorMutation_ADD, + }, + { + Descriptor_: &descpb.DescriptorMutation_Column{ + Column: &descpb.ColumnDescriptor{ + ID: 3, + Name: "c3", + Virtual: true, + }, + }, + Direction: descpb.DescriptorMutation_ADD, + }, + }, + } + + type testCase struct { + name string + constraint constraintToUpdateForTest + expectedResult bool + } + + tcs := []testCase{ + { + name: "test_adding_shard_col_check_constraint", + constraint: constraintToUpdateForTest{ + desc: &descpb.ConstraintToUpdate{ + ConstraintType: descpb.ConstraintToUpdate_CHECK, + Check: descpb.TableDescriptor_CheckConstraint{ + Expr: "some fake expr", + Name: "some fake name", + Validity: descpb.ConstraintValidity_Validating, + ColumnIDs: []descpb.ColumnID{3}, + Hidden: true, + }, + }, + }, + expectedResult: true, + }, + { + name: "test_adding_non_shard_col_check_constraint", + constraint: constraintToUpdateForTest{ + desc: &descpb.ConstraintToUpdate{ + ConstraintType: descpb.ConstraintToUpdate_CHECK, + Check: descpb.TableDescriptor_CheckConstraint{ + Expr: "some fake expr", + Name: "some fake name", + Validity: descpb.ConstraintValidity_Validating, + ColumnIDs: []descpb.ColumnID{2}, + Hidden: false, + }, + }, + }, + expectedResult: false, + }, + { + name: "test_adding_multi_col_check_constraint", + constraint: constraintToUpdateForTest{ + desc: &descpb.ConstraintToUpdate{ + ConstraintType: descpb.ConstraintToUpdate_CHECK, + Check: descpb.TableDescriptor_CheckConstraint{ + Expr: "some fake expr", + Name: "some fake name", + Validity: descpb.ConstraintValidity_Validating, + ColumnIDs: []descpb.ColumnID{2, 3}, + Hidden: false, + }, + }, + }, + expectedResult: false, + }, + { + name: "test_adding_non_check_constraint", + constraint: constraintToUpdateForTest{ + desc: &descpb.ConstraintToUpdate{ + ConstraintType: descpb.ConstraintToUpdate_FOREIGN_KEY, + }, + }, + expectedResult: false, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + isSkipping, err := shouldSkipConstraintValidation(tableDesc, tc.constraint) + if err != nil { + t.Fatal("Failed to run function being tested:", err) + } + require.Equal(t, tc.expectedResult, isSkipping) + }) + } + +} diff --git a/pkg/sql/buffer_util.go b/pkg/sql/buffer_util.go index 67d83ff95b15..597add446372 100644 --- a/pkg/sql/buffer_util.go +++ b/pkg/sql/buffer_util.go @@ -136,7 +136,7 @@ type rowContainerIterator struct { typs []*types.T datums tree.Datums - da rowenc.DatumAlloc + da tree.DatumAlloc } // newRowContainerIterator returns a new rowContainerIterator that must be diff --git a/pkg/sql/catalog/BUILD.bazel b/pkg/sql/catalog/BUILD.bazel index 8f7101b11ac6..9612694eff9d 100644 --- a/pkg/sql/catalog/BUILD.bazel +++ b/pkg/sql/catalog/BUILD.bazel @@ -24,6 +24,7 @@ go_library( "//pkg/kv", "//pkg/roachpb:with-mocks", "//pkg/server/telemetry", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", @@ -47,6 +48,7 @@ go_test( "table_col_set_test.go", ], embed = [":catalog"], + tags = ["no-remote"], deps = [ "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/dbdesc", diff --git a/pkg/sql/catalog/catformat/BUILD.bazel b/pkg/sql/catalog/catformat/BUILD.bazel index 354449a08101..191390ca0e78 100644 --- a/pkg/sql/catalog/catformat/BUILD.bazel +++ b/pkg/sql/catalog/catformat/BUILD.bazel @@ -22,6 +22,7 @@ go_test( srcs = ["index_test.go"], embed = [":catformat"], deps = [ + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", "//pkg/sql/sem/tree", diff --git a/pkg/sql/catalog/catformat/index_test.go b/pkg/sql/catalog/catformat/index_test.go index 3ce6adea0f5e..ca866d7ee024 100644 --- a/pkg/sql/catalog/catformat/index_test.go +++ b/pkg/sql/catalog/catformat/index_test.go @@ -15,6 +15,7 @@ import ( "strconv" "testing" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -106,7 +107,7 @@ func TestIndexForDisplay(t *testing.T) { shardedIndex := baseIndex shardedIndex.KeyColumnNames = []string{"bucket_col", "a"} shardedIndex.KeyColumnIDs = descpb.ColumnIDs{0, 1} - shardedIndex.Sharded = descpb.ShardedDescriptor{ + shardedIndex.Sharded = catpb.ShardedDescriptor{ IsSharded: true, ShardBuckets: 8, ColumnNames: []string{"a"}, diff --git a/pkg/sql/catalog/catpb/BUILD.bazel b/pkg/sql/catalog/catpb/BUILD.bazel new file mode 100644 index 000000000000..dd9910b71a21 --- /dev/null +++ b/pkg/sql/catalog/catpb/BUILD.bazel @@ -0,0 +1,37 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "catpb_proto", + srcs = ["catalog.proto"], + strip_import_prefix = "/pkg", + visibility = ["//visibility:public"], + deps = ["@com_github_gogo_protobuf//gogoproto:gogo_proto"], +) + +go_proto_library( + name = "catpb_go_proto", + compilers = ["//pkg/cmd/protoc-gen-gogoroach:protoc-gen-gogoroach_compiler"], + importpath = "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb", + proto = ":catpb_proto", + visibility = ["//visibility:public"], + deps = ["@com_github_gogo_protobuf//gogoproto"], +) + +go_library( + name = "catpb", + srcs = [ + "constraint.go", + "doc.go", + "multiregion.go", + ], + embed = [":catpb_go_proto"], + importpath = "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb", + visibility = ["//visibility:public"], + deps = [ + "//pkg/sql/sem/tree", + "@com_github_cockroachdb_errors//:errors", + "@com_github_cockroachdb_redact//:redact", + ], +) diff --git a/pkg/sql/catalog/catpb/catalog.proto b/pkg/sql/catalog/catpb/catalog.proto new file mode 100644 index 000000000000..121a02e36649 --- /dev/null +++ b/pkg/sql/catalog/catpb/catalog.proto @@ -0,0 +1,125 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Cannot be proto3 because we are moving definitions from descpb which +// relies on proto2 for its nullable primitives and for its different +// enum interactions with json marshaling. +syntax = "proto2"; + +package cockroach.sql.catalog.catpb; +option go_package = "catpb"; + +import "gogoproto/gogo.proto"; + +// ForeignKeyAction describes the action which should be taken when a foreign +// key constraint reference is acted upon. +enum ForeignKeyAction { + option (gogoproto.goproto_enum_stringer) = false; + NO_ACTION = 0; + RESTRICT = 1; + SET_NULL = 2; + SET_DEFAULT = 3; + CASCADE = 4; +} + +// LocalityConfig is used to figure the locality of a table. +message LocalityConfig { + option (gogoproto.equal) = true; + // REGIONAL BY TABLE tables have an "implicit" bidirectional dependency with + // the multi-region enum. The dependency is described "implicit" because + // even though no column on the table uses the multi-region type descriptor + // to store the homing region, a value from the type descriptor is stored in + // the locality config below (when the table is homed in the non-primary + // region). + // This changes how type dependencies are constructed for table descriptors. + // After the introduction of REGIONAL BY TABLE tables, a column on the table + // descriptor using a type is no longer a necessary (note it is still a + // sufficient) condition to establish a type dependency. As is the case with + // adding and dropping columns, this type dependency must be negotiated. As + // such, switching locality patterns or adding new locality configs must be + // done so that back references to the multi-region type descriptor are + // kept sane. + message RegionalByTable { + option (gogoproto.equal) = true; + // Region is set if the table has an affinity with a non-primary region. + optional string region = 1 [(gogoproto.casttype)="RegionName"]; + } + message RegionalByRow { + option (gogoproto.equal) = true; + // As is set if the table has a REGIONAL BY ROW AS ... set to a specific column. + optional string as = 1; + } + message Global { + option (gogoproto.equal) = true; + } + oneof locality { + Global global = 1; + RegionalByTable regional_by_table = 2; + RegionalByRow regional_by_row = 3; + } +} + +// SystemColumnKind is an enum representing the different kind of system +// columns that can be synthesized by the execution engine. +enum SystemColumnKind { + // Default value, unused. + NONE = 0; + // A system column containing the value of the MVCC timestamp associated + // with the kv's corresponding to the row. + MVCCTIMESTAMP = 1; + // A system column containing the OID of the table that the row came from. + TABLEOID = 2; +} + +// GeneratedAsIdentityType is an enum representing how the creation of +// a column is associated with the GENERATED {ALWAYS | BY DEFAULT} AS IDENTITY +// syntax. +enum GeneratedAsIdentityType { + // A column created without `GENERATED ... AS IDENTITY` syntax. + NOT_IDENTITY_COLUMN = 0; + // A column created with `GENERATED ALWAYS AS IDENTITY` syntax. + // Such a column does not allow override without `OVERRIDING SYSTEM VALUE` + // syntax. + GENERATED_ALWAYS = 1; + // A column created with `GENERATED BY DEFAULT AS IDENTITY` syntax. + // Such a column can be overridden without `OVERRIDING SYSTEM VALUE` syntax. + GENERATED_BY_DEFAULT = 2; +} + +// ShardedDescriptor represents an index (either primary or secondary) that is hash +// sharded into a user-specified number of buckets. +// +// As as example, sample field values for the following table: +// +// CREATE TABLE abc ( +// a INT PRIMARY KEY USING HASH WITH BUCKET_COUNT=10, // column id: 1 +// b BYTES +// ); +// +// Sharded descriptor: +// name: "a_shard" +// shard_buckets: 10 +// column_names: ["a"] +message ShardedDescriptor { + option (gogoproto.equal) = true; + + // IsSharded indicates whether the index in question is a sharded one. + optional bool is_sharded = 1 [(gogoproto.nullable) = false]; + // Name is the name of the shard column. + optional string name = 2 [(gogoproto.nullable) = false]; + + // ShardBuckets indicates the number of shards this index is divided into. + optional int32 shard_buckets = 3 [(gogoproto.nullable) = false, + (gogoproto.customname) = "ShardBuckets"]; + + // ColumnNames lists the names of the columns used to compute the shard column's + // values. + repeated string column_names = 4; +} diff --git a/pkg/sql/catalog/catpb/constraint.go b/pkg/sql/catalog/catpb/constraint.go new file mode 100644 index 000000000000..3f951d5bf80a --- /dev/null +++ b/pkg/sql/catalog/catpb/constraint.go @@ -0,0 +1,38 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package catpb + +import ( + "strconv" + + "github.com/cockroachdb/redact" +) + +// String implements the fmt.Stringer interface. +func (x ForeignKeyAction) String() string { + switch x { + case ForeignKeyAction_RESTRICT: + return "RESTRICT" + case ForeignKeyAction_SET_DEFAULT: + return "SET DEFAULT" + case ForeignKeyAction_SET_NULL: + return "SET NULL" + case ForeignKeyAction_CASCADE: + return "CASCADE" + default: + return strconv.Itoa(int(x)) + } +} + +var _ redact.SafeValue = ForeignKeyAction(0) + +// SafeValue implements redact.SafeValue. +func (x ForeignKeyAction) SafeValue() {} diff --git a/pkg/sql/catalog/catpb/doc.go b/pkg/sql/catalog/catpb/doc.go new file mode 100644 index 000000000000..940cc3108437 --- /dev/null +++ b/pkg/sql/catalog/catpb/doc.go @@ -0,0 +1,13 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Package catpb contains definitions of low-level serializations of catalog +// concepts which can be shared by descriptors and schema change elements. +package catpb diff --git a/pkg/sql/catalog/descpb/multiregion.go b/pkg/sql/catalog/catpb/multiregion.go similarity index 83% rename from pkg/sql/catalog/descpb/multiregion.go rename to pkg/sql/catalog/catpb/multiregion.go index 99416143f85b..7e0b48d11205 100644 --- a/pkg/sql/catalog/descpb/multiregion.go +++ b/pkg/sql/catalog/catpb/multiregion.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package descpb +package catpb import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -18,6 +18,11 @@ import ( // RegionName is an alias for a region stored on the database. type RegionName string +// String implements fmt.Stringer. +func (r RegionName) String() string { + return string(r) +} + // RegionNames is an alias for a slice of regions. type RegionNames []RegionName @@ -31,16 +36,16 @@ func (regions RegionNames) ToStrings() []string { } // TelemetryName returns the name to use for the given locality. -func (cfg *TableDescriptor_LocalityConfig) TelemetryName() (string, error) { +func (cfg *LocalityConfig) TelemetryName() (string, error) { switch l := cfg.Locality.(type) { - case *TableDescriptor_LocalityConfig_Global_: + case *LocalityConfig_Global_: return tree.TelemetryNameGlobal, nil - case *TableDescriptor_LocalityConfig_RegionalByTable_: + case *LocalityConfig_RegionalByTable_: if l.RegionalByTable.Region != nil { return tree.TelemetryNameRegionalByTableIn, nil } return tree.TelemetryNameRegionalByTable, nil - case *TableDescriptor_LocalityConfig_RegionalByRow_: + case *LocalityConfig_RegionalByRow_: if l.RegionalByRow.As != nil { return tree.TelemetryNameRegionalByRowAs, nil } diff --git a/pkg/sql/catalog/colinfo/BUILD.bazel b/pkg/sql/catalog/colinfo/BUILD.bazel index 8fb6c4ae4925..ddcd60f3f12e 100644 --- a/pkg/sql/catalog/colinfo/BUILD.bazel +++ b/pkg/sql/catalog/colinfo/BUILD.bazel @@ -17,6 +17,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/sql/catalog", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", diff --git a/pkg/sql/catalog/colinfo/col_type_info.go b/pkg/sql/catalog/colinfo/col_type_info.go index f188587c2309..74f4eaf5d3ee 100644 --- a/pkg/sql/catalog/colinfo/col_type_info.go +++ b/pkg/sql/catalog/colinfo/col_type_info.go @@ -14,7 +14,6 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -154,49 +153,3 @@ func MustBeValueEncoded(semanticType *types.T) bool { } return false } - -// GetColumnTypes populates the types of the columns with the given IDs into the -// outTypes slice, returning it. You must use the returned slice, as this -// function might allocate a new slice. -func GetColumnTypes( - desc catalog.TableDescriptor, columnIDs []descpb.ColumnID, outTypes []*types.T, -) ([]*types.T, error) { - if cap(outTypes) < len(columnIDs) { - outTypes = make([]*types.T, len(columnIDs)) - } else { - outTypes = outTypes[:len(columnIDs)] - } - for i, id := range columnIDs { - col, err := desc.FindColumnWithID(id) - if err != nil { - return nil, err - } - if !col.Public() { - return nil, fmt.Errorf("column-id \"%d\" does not exist", id) - } - outTypes[i] = col.GetType() - } - return outTypes, nil -} - -// GetColumnTypesFromColDescs populates the types of the columns with the given -// IDs into the outTypes slice, returning it. You must use the returned slice, -// as this function might allocate a new slice. -func GetColumnTypesFromColDescs( - cols []catalog.Column, columnIDs []descpb.ColumnID, outTypes []*types.T, -) []*types.T { - if cap(outTypes) < len(columnIDs) { - outTypes = make([]*types.T, len(columnIDs)) - } else { - outTypes = outTypes[:len(columnIDs)] - } - for i, id := range columnIDs { - for j := range cols { - if id == cols[j].GetID() { - outTypes[i] = cols[j].GetType() - break - } - } - } - return outTypes -} diff --git a/pkg/sql/catalog/colinfo/system_columns.go b/pkg/sql/catalog/colinfo/system_columns.go index 5fb2ec979386..01296f11e422 100644 --- a/pkg/sql/catalog/colinfo/system_columns.go +++ b/pkg/sql/catalog/colinfo/system_columns.go @@ -14,6 +14,7 @@ import ( "math" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/types" ) @@ -62,7 +63,7 @@ var MVCCTimestampColumnDesc = descpb.ColumnDescriptor{ Type: MVCCTimestampColumnType, Hidden: true, Nullable: true, - SystemColumnKind: descpb.SystemColumnKind_MVCCTIMESTAMP, + SystemColumnKind: catpb.SystemColumnKind_MVCCTIMESTAMP, ID: MVCCTimestampColumnID, } @@ -78,7 +79,7 @@ var TableOIDColumnDesc = descpb.ColumnDescriptor{ Type: types.Oid, Hidden: true, Nullable: true, - SystemColumnKind: descpb.SystemColumnKind_TABLEOID, + SystemColumnKind: catpb.SystemColumnKind_TABLEOID, ID: TableOIDColumnID, } @@ -87,18 +88,18 @@ const TableOIDColumnName = "tableoid" // IsColIDSystemColumn returns whether a column ID refers to a system column. func IsColIDSystemColumn(colID descpb.ColumnID) bool { - return GetSystemColumnKindFromColumnID(colID) != descpb.SystemColumnKind_NONE + return GetSystemColumnKindFromColumnID(colID) != catpb.SystemColumnKind_NONE } // GetSystemColumnKindFromColumnID returns the kind of system column that colID // refers to. -func GetSystemColumnKindFromColumnID(colID descpb.ColumnID) descpb.SystemColumnKind { +func GetSystemColumnKindFromColumnID(colID descpb.ColumnID) catpb.SystemColumnKind { for i := range AllSystemColumnDescs { if AllSystemColumnDescs[i].ID == colID { return AllSystemColumnDescs[i].SystemColumnKind } } - return descpb.SystemColumnKind_NONE + return catpb.SystemColumnKind_NONE } // IsSystemColumnName returns whether or not a name is a reserved system diff --git a/pkg/sql/catalog/dbdesc/BUILD.bazel b/pkg/sql/catalog/dbdesc/BUILD.bazel index 1f09b27b09cc..f43972d80604 100644 --- a/pkg/sql/catalog/dbdesc/BUILD.bazel +++ b/pkg/sql/catalog/dbdesc/BUILD.bazel @@ -12,6 +12,7 @@ go_library( "//pkg/keys", "//pkg/security", "//pkg/sql/catalog", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/catprivilege", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/multiregion", diff --git a/pkg/sql/catalog/dbdesc/database_desc.go b/pkg/sql/catalog/dbdesc/database_desc.go index e252e3451074..446ec1b9e825 100644 --- a/pkg/sql/catalog/dbdesc/database_desc.go +++ b/pkg/sql/catalog/dbdesc/database_desc.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catprivilege" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/multiregion" @@ -154,7 +155,7 @@ func (desc *immutable) IsMultiRegion() bool { } // PrimaryRegionName implements the DatabaseDescriptor interface. -func (desc *immutable) PrimaryRegionName() (descpb.RegionName, error) { +func (desc *immutable) PrimaryRegionName() (catpb.RegionName, error) { if !desc.IsMultiRegion() { return "", errors.AssertionFailedf( "can not get the primary region of a non multi-region database") diff --git a/pkg/sql/catalog/descpb/BUILD.bazel b/pkg/sql/catalog/descpb/BUILD.bazel index 15ebdddd232d..a2d80132e529 100644 --- a/pkg/sql/catalog/descpb/BUILD.bazel +++ b/pkg/sql/catalog/descpb/BUILD.bazel @@ -13,7 +13,6 @@ go_library( "index.go", "join_type.go", "locking.go", - "multiregion.go", "privilege.go", "structured.go", ":gen-formatversion-stringer", # keep @@ -26,11 +25,13 @@ go_library( "//pkg/keys", "//pkg/security", "//pkg/sql/catalog/catconstants", + "//pkg/sql/catalog/catpb", "//pkg/sql/parser", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/privilege", "//pkg/sql/protoreflect", + "//pkg/sql/sem/catid", "//pkg/sql/sem/tree", "//pkg/sql/types", "//pkg/util", @@ -74,6 +75,7 @@ proto_library( deps = [ "//pkg/geo/geoindex:geoindex_proto", "//pkg/roachpb:roachpb_proto", + "//pkg/sql/catalog/catpb:catpb_proto", "//pkg/sql/types:types_proto", "//pkg/util/hlc:hlc_proto", "@com_github_gogo_protobuf//gogoproto:gogo_proto", @@ -89,6 +91,7 @@ go_proto_library( deps = [ "//pkg/geo/geoindex", "//pkg/roachpb:with-mocks", # keep + "//pkg/sql/catalog/catpb", "//pkg/sql/types", "//pkg/util/hlc", "@com_github_gogo_protobuf//gogoproto", diff --git a/pkg/sql/catalog/descpb/constraint.go b/pkg/sql/catalog/descpb/constraint.go index 155654565c74..8a825b97afc7 100644 --- a/pkg/sql/catalog/descpb/constraint.go +++ b/pkg/sql/catalog/descpb/constraint.go @@ -13,6 +13,7 @@ package descpb import ( "strconv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" ) @@ -50,37 +51,21 @@ func (x ForeignKeyReference_Match) String() string { // ForeignKeyReferenceActionType allows the conversion between a // tree.ReferenceAction and a ForeignKeyReference_Action. var ForeignKeyReferenceActionType = [...]tree.ReferenceAction{ - ForeignKeyReference_NO_ACTION: tree.NoAction, - ForeignKeyReference_RESTRICT: tree.Restrict, - ForeignKeyReference_SET_DEFAULT: tree.SetDefault, - ForeignKeyReference_SET_NULL: tree.SetNull, - ForeignKeyReference_CASCADE: tree.Cascade, + catpb.ForeignKeyAction_NO_ACTION: tree.NoAction, + catpb.ForeignKeyAction_RESTRICT: tree.Restrict, + catpb.ForeignKeyAction_SET_DEFAULT: tree.SetDefault, + catpb.ForeignKeyAction_SET_NULL: tree.SetNull, + catpb.ForeignKeyAction_CASCADE: tree.Cascade, } // ForeignKeyReferenceActionValue allows the conversion between a -// ForeignKeyReference_Action and a tree.ReferenceAction. -var ForeignKeyReferenceActionValue = [...]ForeignKeyReference_Action{ - tree.NoAction: ForeignKeyReference_NO_ACTION, - tree.Restrict: ForeignKeyReference_RESTRICT, - tree.SetDefault: ForeignKeyReference_SET_DEFAULT, - tree.SetNull: ForeignKeyReference_SET_NULL, - tree.Cascade: ForeignKeyReference_CASCADE, -} - -// String implements the fmt.Stringer interface. -func (x ForeignKeyReference_Action) String() string { - switch x { - case ForeignKeyReference_RESTRICT: - return "RESTRICT" - case ForeignKeyReference_SET_DEFAULT: - return "SET DEFAULT" - case ForeignKeyReference_SET_NULL: - return "SET NULL" - case ForeignKeyReference_CASCADE: - return "CASCADE" - default: - return strconv.Itoa(int(x)) - } +// catpb.ForeignKeyAction_Action and a tree.ReferenceAction. +var ForeignKeyReferenceActionValue = [...]catpb.ForeignKeyAction{ + tree.NoAction: catpb.ForeignKeyAction_NO_ACTION, + tree.Restrict: catpb.ForeignKeyAction_RESTRICT, + tree.SetDefault: catpb.ForeignKeyAction_SET_DEFAULT, + tree.SetNull: catpb.ForeignKeyAction_SET_NULL, + tree.Cascade: catpb.ForeignKeyAction_CASCADE, } // ConstraintType is used to identify the type of a constraint. diff --git a/pkg/sql/catalog/descpb/structured.go b/pkg/sql/catalog/descpb/structured.go index 41737e0526cf..daa33d5fa900 100644 --- a/pkg/sql/catalog/descpb/structured.go +++ b/pkg/sql/catalog/descpb/structured.go @@ -14,6 +14,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catconstants" "github.com/cockroachdb/cockroach/pkg/sql/protoreflect" + "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -37,10 +38,7 @@ func (dir IndexDescriptor_Direction) ToEncodingDirection() (encoding.Direction, // another is expected. // ID is a custom type for {Database,Table}Descriptor IDs. -type ID tree.ID - -// SafeValue implements the redact.SafeValue interface. -func (ID) SafeValue() {} +type ID = catid.DescID // InvalidID is the uninitialised descriptor id. const InvalidID ID = 0 @@ -71,16 +69,10 @@ const ( ) // FamilyID is a custom type for ColumnFamilyDescriptor IDs. -type FamilyID uint32 - -// SafeValue implements the redact.SafeValue interface. -func (FamilyID) SafeValue() {} +type FamilyID = catid.FamilyID // IndexID is a custom type for IndexDescriptor IDs. -type IndexID tree.IndexID - -// SafeValue implements the redact.SafeValue interface. -func (IndexID) SafeValue() {} +type IndexID = catid.IndexID // DescriptorVersion is a custom type for TableDescriptor Versions. type DescriptorVersion uint64 @@ -135,10 +127,7 @@ const ( ) // ColumnID is a custom type for ColumnDescriptor IDs. -type ColumnID tree.ColumnID - -// SafeValue implements the redact.SafeValue interface. -func (ColumnID) SafeValue() {} +type ColumnID = catid.ColumnID // ColumnIDs is a slice of ColumnDescriptor IDs. type ColumnIDs []ColumnID diff --git a/pkg/sql/catalog/descpb/structured.proto b/pkg/sql/catalog/descpb/structured.proto index 4c7854a815a0..29f7deb8bd81 100644 --- a/pkg/sql/catalog/descpb/structured.proto +++ b/pkg/sql/catalog/descpb/structured.proto @@ -14,6 +14,7 @@ package cockroach.sql.sqlbase; option go_package = "descpb"; import "util/hlc/timestamp.proto"; +import "sql/catalog/catpb/catalog.proto"; import "sql/catalog/descpb/privilege.proto"; import "sql/types/types.proto"; import "geo/geoindex/config.proto"; @@ -42,14 +43,6 @@ enum ConstraintValidity { // traditional foreign key references. message ForeignKeyReference { option (gogoproto.equal) = true; - enum Action { - option (gogoproto.goproto_enum_stringer) = false; - NO_ACTION = 0; - RESTRICT = 1; - SET_NULL = 2; - SET_DEFAULT = 3; - CASCADE = 4; - } // Match is the algorithm used to compare composite keys. enum Match { @@ -66,8 +59,8 @@ message ForeignKeyReference { // If this FK only uses a prefix of the columns in its index, we record how // many to avoid spuriously counting the additional cols as used by this FK. optional int32 shared_prefix_len = 5 [(gogoproto.nullable) = false]; - optional Action on_delete = 6 [(gogoproto.nullable) = false]; - optional Action on_update = 7 [(gogoproto.nullable) = false]; + optional cockroach.sql.catalog.catpb.ForeignKeyAction on_delete = 6 [(gogoproto.nullable) = false]; + optional cockroach.sql.catalog.catpb.ForeignKeyAction on_update = 7 [(gogoproto.nullable) = false]; // This is only important for composite keys. For all prior matches before // the addition of this value, MATCH SIMPLE will be used. optional Match match = 8 [(gogoproto.nullable) = false]; @@ -92,8 +85,8 @@ message ForeignKeyConstraint { (gogoproto.casttype) = "ID"]; optional string name = 5 [(gogoproto.nullable) = false]; optional ConstraintValidity validity = 6 [(gogoproto.nullable) = false]; - optional ForeignKeyReference.Action on_delete = 7 [(gogoproto.nullable) = false]; - optional ForeignKeyReference.Action on_update = 8 [(gogoproto.nullable) = false]; + optional cockroach.sql.catalog.catpb.ForeignKeyAction on_delete = 7 [(gogoproto.nullable) = false]; + optional cockroach.sql.catalog.catpb.ForeignKeyAction on_update = 8 [(gogoproto.nullable) = false]; // This is only important for composite keys. For all prior matches before // the addition of this value, MATCH SIMPLE will be used. optional ForeignKeyReference.Match match = 9 [(gogoproto.nullable) = false]; @@ -164,7 +157,7 @@ message ColumnDescriptor { // If the column is created without using GENERATED ... AS IDENTITY syntax, // GeneratedAsIdentityType for this column will be set to the default // NOT_IDENTITY_COLUMN. - optional GeneratedAsIdentityType generated_as_identity_type = 19 [(gogoproto.nullable) = false]; + optional cockroach.sql.catalog.catpb.GeneratedAsIdentityType generated_as_identity_type = 19 [(gogoproto.nullable) = false]; // Expression to specify the sequence option for a `GENERATED AS IDENTITY` // column. @@ -199,34 +192,7 @@ message ColumnDescriptor { // SystemColumnKind represents what kind of system column this column // descriptor represents, if any. - optional SystemColumnKind system_column_kind = 15 [(gogoproto.nullable) = false]; -} - -// SystemColumnKind is an enum representing the different kind of system -// columns that can be synthesized by the execution engine. -enum SystemColumnKind { - // Default value, unused. - NONE = 0; - // A system column containing the value of the MVCC timestamp associated - // with the kv's corresponding to the row. - MVCCTIMESTAMP = 1; - // A system column containing the OID of the table that the row came from. - TABLEOID = 2; -} - -// GeneratedAsIdentityType is an enum representing how the creation of -// a column is associated with the GENERATED {ALWAYS | BY DEFAULT} AS IDENTITY -// syntax. -enum GeneratedAsIdentityType { - // A column created without `GENERATED ... AS IDENTITY` syntax. - NOT_IDENTITY_COLUMN = 0; - // A column created with `GENERATED ALWAYS AS IDENTITY` syntax. - // Such a column does not allow override without `OVERRIDING SYSTEM VALUE` - // syntax. - GENERATED_ALWAYS = 1; - // A column created with `GENERATED BY DEFAULT AS IDENTITY` syntax. - // Such a column can be overridden without `OVERRIDING SYSTEM VALUE` syntax. - GENERATED_BY_DEFAULT = 2; + optional cockroach.sql.catalog.catpb.SystemColumnKind system_column_kind = 15 [(gogoproto.nullable) = false]; } // ColumnFamilyDescriptor is set of columns stored together in one kv entry. @@ -299,37 +265,6 @@ message InterleaveDescriptor { repeated Ancestor ancestors = 1 [(gogoproto.nullable) = false]; } -// ShardedDescriptor represents an index (either primary or secondary) that is hash -// sharded into a user-specified number of buckets. -// -// As as example, sample field values for the following table: -// -// CREATE TABLE abc ( -// a INT PRIMARY KEY USING HASH WITH BUCKET_COUNT=10, // column id: 1 -// b BYTES -// ); -// -// Sharded descriptor: -// name: "a_shard" -// shard_buckets: 10 -// column_names: ["a"] -message ShardedDescriptor { - option (gogoproto.equal) = true; - - // IsSharded indicates whether the index in question is a sharded one. - optional bool is_sharded = 1 [(gogoproto.nullable) = false]; - // Name is the name of the shard column. - optional string name = 2 [(gogoproto.nullable) = false]; - - // ShardBuckets indicates the number of shards this index is divided into. - optional int32 shard_buckets = 3 [(gogoproto.nullable) = false, - (gogoproto.customname) = "ShardBuckets"]; - - // ColumnNames lists the names of the columns used to compute the shard column's - // values. - repeated string column_names = 4; -} - // PartitioningDescriptor represents the partitioning of an index into spans // of keys addressable by a zone config. The key encoding is unchanged. Each // partition may optionally be itself divided into further partitions, called @@ -546,7 +481,7 @@ message IndexDescriptor { [(gogoproto.nullable) = false, (gogoproto.casttype) = "IndexDescriptorEncodingType"]; // Sharded, if it's not the zero value, describes how this index is sharded. - optional ShardedDescriptor sharded = 20 [(gogoproto.nullable) = false]; + optional cockroach.sql.catalog.catpb.ShardedDescriptor sharded = 20 [(gogoproto.nullable) = false]; // Disabled is used by the DROP PRIMARY KEY command to mark // that this index is disabled for further use. @@ -634,8 +569,8 @@ message PrimaryKeySwap { message LocalityConfigSwap { option (gogoproto.equal) = true; - optional TableDescriptor.LocalityConfig old_locality_config = 1 [(gogoproto.nullable) = false]; - optional TableDescriptor.LocalityConfig new_locality_config = 2 [(gogoproto.nullable) = false]; + optional cockroach.sql.catalog.catpb.LocalityConfig old_locality_config = 1 [(gogoproto.nullable) = false]; + optional cockroach.sql.catalog.catpb.LocalityConfig new_locality_config = 2 [(gogoproto.nullable) = false]; // NewRegionalByRowColumnID is set when we are creating a new column for a // REGIONAL BY ROW table. It is used by NewRegionalByRowColumnDefaultExpr @@ -1110,7 +1045,7 @@ message TableDescriptor { } optional SequenceOwner sequence_owner = 6 [(gogoproto.nullable) = false]; - + // The number of values (which have already been created in KV) // that a node can cache locally. optional int64 cache_size = 7 [(gogoproto.nullable) = false]; @@ -1209,42 +1144,7 @@ message TableDescriptor { // the table is persistent. optional bool temporary = 39 [(gogoproto.nullable) = false]; - message LocalityConfig { - option (gogoproto.equal) = true; - // REGIONAL BY TABLE tables have an "implicit" bidirectional dependency with - // the multi-region enum. The dependency is described "implicit" because - // even though no column on the table uses the multi-region type descriptor - // to store the homing region, a value from the type descriptor is stored in - // the locality config below (when the table is homed in the non-primary - // region). - // This changes how type dependencies are constructed for table descriptors. - // After the introduction of REGIONAL BY TABLE tables, a column on the table - // descriptor using a type is no longer a necessary (note it is still a - // sufficient) condition to establish a type dependency. As is the case with - // adding and dropping columns, this type dependency must be negotiated. As - // such, switching locality patterns or adding new locality configs must be - // done so that back references to the multi-region type descriptor are - // kept sane. - message RegionalByTable { - option (gogoproto.equal) = true; - // Region is set if the table has an affinity with a non-primary region. - optional string region = 1 [(gogoproto.casttype)="RegionName"]; - } - message RegionalByRow { - option (gogoproto.equal) = true; - // As is set if the table has a REGIONAL BY ROW AS ... set to a specific column. - optional string as = 1; - } - message Global { - option (gogoproto.equal) = true; - } - oneof locality { - Global global = 1; - RegionalByTable regional_by_table = 2; - RegionalByRow regional_by_row = 3; - } - } - optional LocalityConfig locality_config = 42; + optional cockroach.sql.catalog.catpb.LocalityConfig locality_config = 42; // PartitionAllBy is set if PARTITION ALL BY is set on the table. // This means that all indexes implicitly inherit all partitioning @@ -1317,7 +1217,7 @@ message DatabaseDescriptor { reserved 1; optional SurvivalGoal survival_goal = 2 [(gogoproto.nullable)=false]; - optional string primary_region = 3 [(gogoproto.nullable)=false,(gogoproto.casttype)="RegionName"]; + optional string primary_region = 3 [(gogoproto.nullable)=false,(gogoproto.casttype)="github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb.RegionName"]; // RegionEnumID represents ID of the type descriptor corresponding to the // region enum for a multi-region database. If the database is not a @@ -1450,7 +1350,7 @@ message TypeDescriptor { option (gogoproto.equal) = true; // PrimaryRegion represents the PrimaryRegion for a multi-region enum. - optional string primary_region = 1 [(gogoproto.nullable) = false, (gogoproto.casttype)="RegionName"]; + optional string primary_region = 1 [(gogoproto.nullable) = false, (gogoproto.casttype)="github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb.RegionName"]; } optional RegionConfig region_config = 16; diff --git a/pkg/sql/catalog/descriptor.go b/pkg/sql/catalog/descriptor.go index 7a0faf28e813..2473b192e7fc 100644 --- a/pkg/sql/catalog/descriptor.go +++ b/pkg/sql/catalog/descriptor.go @@ -15,6 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -215,7 +216,7 @@ type DatabaseDescriptor interface { // configured. If so, GetRegionConfig can be used. IsMultiRegion() bool // PrimaryRegionName returns the primary region for a multi-region database. - PrimaryRegionName() (descpb.RegionName, error) + PrimaryRegionName() (catpb.RegionName, error) // MultiRegionEnumID returns the ID of the multi-region enum if the database // is a multi-region database, and an error otherwise. MultiRegionEnumID() (descpb.ID, error) @@ -459,6 +460,30 @@ type TableDescriptor interface { // colinfo.AllSystemColumnDescs. SystemColumns() []Column + // IndexColumns returns a slice of Column interfaces containing all + // columns present in the specified Index in any capacity. + IndexColumns(idx Index) []Column + // IndexKeyColumns returns a slice of Column interfaces containing all + // key columns in the specified Index. + IndexKeyColumns(idx Index) []Column + // IndexKeyColumnDirections returns a slice of column directions for all + // key columns in the specified Index. + IndexKeyColumnDirections(idx Index) []descpb.IndexDescriptor_Direction + // IndexKeySuffixColumns returns a slice of Column interfaces containing all + // key suffix columns in the specified Index. + IndexKeySuffixColumns(idx Index) []Column + // IndexFullColumns returns a slice of Column interfaces containing all + // key columns in the specified Index, plus all key suffix columns if that + // index is not a unique index. + IndexFullColumns(idx Index) []Column + // IndexFullColumnDirections returns a slice of column directions for all + // key columns in the specified Index, plus all key suffix columns if that + // index is not a unique index. + IndexFullColumnDirections(idx Index) []descpb.IndexDescriptor_Direction + // IndexStoredColumns returns a slice of Column interfaces containing all + // stored columns in the specified Index. + IndexStoredColumns(idx Index) []Column + // FindColumnWithID returns the first column found whose ID matches the // provided target ID, in the canonical order. // If no column is found then an error is also returned. @@ -585,7 +610,7 @@ type TableDescriptor interface { // GetLocalityConfig returns the locality config for this table, which // describes the table's multi-region locality policy if one is set (e.g. // GLOBAL or REGIONAL BY ROW). - GetLocalityConfig() *descpb.TableDescriptor_LocalityConfig + GetLocalityConfig() *catpb.LocalityConfig // IsLocalityRegionalByRow returns true if the table is REGIONAL BY ROW. IsLocalityRegionalByRow() bool // IsLocalityRegionalByTable returns true if the table is REGIONAL BY TABLE. @@ -594,7 +619,7 @@ type TableDescriptor interface { IsLocalityGlobal() bool // GetRegionalByTableRegion returns the region a REGIONAL BY TABLE table is // homed in. - GetRegionalByTableRegion() (descpb.RegionName, error) + GetRegionalByTableRegion() (catpb.RegionName, error) // GetRegionalByRowTableRegionColumnName returns the region column name of a // REGIONAL BY ROW table. GetRegionalByRowTableRegionColumnName() (tree.Name, error) @@ -636,13 +661,13 @@ type TypeDescriptor interface { // The following fields are only valid for multi-region enum types. // PrimaryRegionName returns the primary region for a multi-region enum. - PrimaryRegionName() (descpb.RegionName, error) + PrimaryRegionName() (catpb.RegionName, error) // RegionNames returns all `PUBLIC` regions on the multi-region enum. Regions // that are in the process of being added/removed (`READ_ONLY`) are omitted. - RegionNames() (descpb.RegionNames, error) + RegionNames() (catpb.RegionNames, error) // RegionNamesIncludingTransitioning returns all the regions on a multi-region // enum, including `READ ONLY` regions which are in the process of transitioning. - RegionNamesIncludingTransitioning() (descpb.RegionNames, error) + RegionNamesIncludingTransitioning() (catpb.RegionNames, error) // RegionNamesForValidation returns all regions on the multi-region // enum to make validation with the public zone configs and partitons // possible. @@ -652,10 +677,10 @@ type TypeDescriptor interface { // being dropped (since they will not be dropped from the zone configuration // until they are fully removed from the type descriptor, again, at the end // of the transaction). - RegionNamesForValidation() (descpb.RegionNames, error) + RegionNamesForValidation() (catpb.RegionNames, error) // TransitioningRegionNames returns regions which are transitioning to PUBLIC // or are being removed. - TransitioningRegionNames() (descpb.RegionNames, error) + TransitioningRegionNames() (catpb.RegionNames, error) // The following fields are set if the type is an enum or a multi-region enum. diff --git a/pkg/sql/catalog/descs/dist_sql_type_resolver.go b/pkg/sql/catalog/descs/dist_sql_type_resolver.go index d37a5a447f35..3f33695c9c41 100644 --- a/pkg/sql/catalog/descs/dist_sql_type_resolver.go +++ b/pkg/sql/catalog/descs/dist_sql_type_resolver.go @@ -25,36 +25,6 @@ import ( "github.com/lib/pq/oid" ) -// DistSQLTypeResolverFactory is an object that constructs TypeResolver objects -// that are bound under a transaction. These TypeResolvers access descriptors -// through the descs.Collection and eventually the lease.Manager. It cannot be -// used concurrently, and neither can the constructed TypeResolvers. After the -// DistSQLTypeResolverFactory is finished being used, all descriptors need to -// be released from Descriptors. It is intended to be used to resolve type -// references during the initialization of DistSQL flows. -type DistSQLTypeResolverFactory struct { - Descriptors *Collection - CleanupFunc func(ctx context.Context) -} - -// NewTypeResolver creates a new TypeResolver that is bound under the input -// transaction. It returns a nil resolver if the factory itself is nil. -func (df *DistSQLTypeResolverFactory) NewTypeResolver(txn *kv.Txn) DistSQLTypeResolver { - if df == nil { - return DistSQLTypeResolver{} - } - return NewDistSQLTypeResolver(df.Descriptors, txn) -} - -// NewSemaContext creates a new SemaContext with a TypeResolver bound to the -// input transaction. -func (df *DistSQLTypeResolverFactory) NewSemaContext(txn *kv.Txn) *tree.SemaContext { - resolver := df.NewTypeResolver(txn) - semaCtx := tree.MakeSemaContext() - semaCtx.TypeResolver = &resolver - return &semaCtx -} - // DistSQLTypeResolver is a TypeResolver that accesses TypeDescriptors through // a given descs.Collection and transaction. type DistSQLTypeResolver struct { diff --git a/pkg/sql/catalog/descs/leased_descriptors.go b/pkg/sql/catalog/descs/leased_descriptors.go index 6f6b8c37b59e..2ea5e1d72647 100644 --- a/pkg/sql/catalog/descs/leased_descriptors.go +++ b/pkg/sql/catalog/descs/leased_descriptors.go @@ -116,12 +116,8 @@ func (ld *leasedDescriptors) getByID( ctx context.Context, txn deadlineHolder, id descpb.ID, setTxnDeadline bool, ) (_ catalog.Descriptor, shouldReadFromStore bool, _ error) { // First, look to see if we already have the table in the shared cache. - if cached := ld.cache.GetByID(id); cached != nil { - if log.V(2) { - log.Eventf(ctx, "found descriptor in collection for (%d, %d, '%s'): %d", - cached.GetParentID(), cached.GetParentSchemaID(), cached.GetName(), id) - } - return cached.(lease.LeasedDescriptor).Underlying(), false, nil + if cached := ld.getCachedByID(ctx, id); cached != nil { + return cached, false, nil } if systemschema.IsUnleasableSystemDescriptorByID(id) { @@ -133,6 +129,18 @@ func (ld *leasedDescriptors) getByID( return ld.getResult(ctx, txn, setTxnDeadline, desc, err) } +func (ld *leasedDescriptors) getCachedByID(ctx context.Context, id descpb.ID) catalog.Descriptor { + cached := ld.cache.GetByID(id) + if cached == nil { + return nil + } + if log.V(2) { + log.Eventf(ctx, "found descriptor in collection for (%d, %d, '%s'): %d", + cached.GetParentID(), cached.GetParentSchemaID(), cached.GetName(), id) + } + return cached.(lease.LeasedDescriptor).Underlying() +} + // getResult is a helper to deal with the result that comes back from Acquire // or AcquireByName. func (ld *leasedDescriptors) getResult( diff --git a/pkg/sql/catalog/descs/table.go b/pkg/sql/catalog/descs/table.go index 4a6419c6ef08..1b16b552584e 100644 --- a/pkg/sql/catalog/descs/table.go +++ b/pkg/sql/catalog/descs/table.go @@ -58,6 +58,26 @@ func (tc *Collection) getTableByName( return true, desc.(catalog.TableDescriptor), nil } +// GetLeasedImmutableTableByID returns a leased immutable table descriptor by +// its ID. +func (tc *Collection) GetLeasedImmutableTableByID( + ctx context.Context, txn *kv.Txn, tableID descpb.ID, +) (catalog.TableDescriptor, error) { + desc, _, err := tc.leased.getByID(ctx, tc.deadlineHolder(txn), tableID, false /* setTxnDeadline */) + if err != nil || desc == nil { + return nil, err + } + table, err := catalog.AsTableDescriptor(desc) + if err != nil { + return nil, err + } + hydrated, err := tc.hydrateTypesInTableDesc(ctx, txn, table) + if err != nil { + return nil, err + } + return hydrated, nil +} + // GetUncommittedMutableTableByID returns an uncommitted mutable table by its // ID. func (tc *Collection) GetUncommittedMutableTableByID(id descpb.ID) (*tabledesc.Mutable, error) { diff --git a/pkg/sql/catalog/multiregion/BUILD.bazel b/pkg/sql/catalog/multiregion/BUILD.bazel index bdd751353be8..d75147a647ca 100644 --- a/pkg/sql/catalog/multiregion/BUILD.bazel +++ b/pkg/sql/catalog/multiregion/BUILD.bazel @@ -10,6 +10,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/sql/catalog", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", @@ -23,6 +24,7 @@ go_test( srcs = ["region_config_test.go"], deps = [ ":multiregion", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", "//pkg/testutils", "//pkg/util/leaktest", diff --git a/pkg/sql/catalog/multiregion/region_config.go b/pkg/sql/catalog/multiregion/region_config.go index 540df4deba6a..550291ea543a 100644 --- a/pkg/sql/catalog/multiregion/region_config.go +++ b/pkg/sql/catalog/multiregion/region_config.go @@ -13,6 +13,7 @@ package multiregion import ( + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -30,9 +31,9 @@ const minNumRegionsForSurviveRegionGoal = 3 // RegionConfig must be synthesized to pick up those changes. type RegionConfig struct { survivalGoal descpb.SurvivalGoal - regions descpb.RegionNames - transitioningRegions descpb.RegionNames - primaryRegion descpb.RegionName + regions catpb.RegionNames + transitioningRegions catpb.RegionNames + primaryRegion catpb.RegionName regionEnumID descpb.ID placement descpb.DataPlacement } @@ -43,12 +44,12 @@ func (r *RegionConfig) SurvivalGoal() descpb.SurvivalGoal { } // PrimaryRegion returns the primary region configured on the RegionConfig. -func (r *RegionConfig) PrimaryRegion() descpb.RegionName { +func (r *RegionConfig) PrimaryRegion() catpb.RegionName { return r.primaryRegion } // Regions returns the list of regions added to the RegionConfig. -func (r *RegionConfig) Regions() descpb.RegionNames { +func (r *RegionConfig) Regions() catpb.RegionNames { return r.regions } @@ -69,7 +70,7 @@ func (r RegionConfig) PrimaryRegionString() string { // TransitioningRegions returns all the regions which are currently transitioning // from or to being PUBLIC. -func (r RegionConfig) TransitioningRegions() descpb.RegionNames { +func (r RegionConfig) TransitioningRegions() catpb.RegionNames { return r.transitioningRegions } @@ -94,7 +95,7 @@ type MakeRegionConfigOption func(r *RegionConfig) // WithTransitioningRegions is an option to include transitioning // regions into MakeRegionConfig. -func WithTransitioningRegions(transitioningRegions descpb.RegionNames) MakeRegionConfigOption { +func WithTransitioningRegions(transitioningRegions catpb.RegionNames) MakeRegionConfigOption { return func(r *RegionConfig) { r.transitioningRegions = transitioningRegions } @@ -102,8 +103,8 @@ func WithTransitioningRegions(transitioningRegions descpb.RegionNames) MakeRegio // MakeRegionConfig constructs a RegionConfig. func MakeRegionConfig( - regions descpb.RegionNames, - primaryRegion descpb.RegionName, + regions catpb.RegionNames, + primaryRegion catpb.RegionName, survivalGoal descpb.SurvivalGoal, regionEnumID descpb.ID, placement descpb.DataPlacement, diff --git a/pkg/sql/catalog/multiregion/region_config_test.go b/pkg/sql/catalog/multiregion/region_config_test.go index 82fbaf45b36d..c1875b2a0601 100644 --- a/pkg/sql/catalog/multiregion/region_config_test.go +++ b/pkg/sql/catalog/multiregion/region_config_test.go @@ -13,6 +13,7 @@ package multiregion_test import ( "testing" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/multiregion" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -31,7 +32,7 @@ func TestValidateRegionConfig(t *testing.T) { }{ { err: "expected a valid multi-region enum ID", - regionConfig: multiregion.MakeRegionConfig(descpb.RegionNames{ + regionConfig: multiregion.MakeRegionConfig(catpb.RegionNames{ "region_a", "region_b", }, @@ -43,7 +44,7 @@ func TestValidateRegionConfig(t *testing.T) { }, { err: "3 regions are required for surviving a region failure", - regionConfig: multiregion.MakeRegionConfig(descpb.RegionNames{ + regionConfig: multiregion.MakeRegionConfig(catpb.RegionNames{ "region_a", "region_b", }, @@ -56,7 +57,7 @@ func TestValidateRegionConfig(t *testing.T) { { err: "expected > 0 number of regions in the region config", regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{}, + catpb.RegionNames{}, "region_b", descpb.SurvivalGoal_REGION_FAILURE, validRegionEnumID, @@ -66,7 +67,7 @@ func TestValidateRegionConfig(t *testing.T) { { err: "cannot have a database with restricted placement that is also region survivable", regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{"region_a", "region_b", "region_c"}, + catpb.RegionNames{"region_a", "region_b", "region_c"}, "region_b", descpb.SurvivalGoal_REGION_FAILURE, validRegionEnumID, diff --git a/pkg/sql/catalog/multiregion/validate_table.go b/pkg/sql/catalog/multiregion/validate_table.go index 4b7fb676d06c..82137bc183af 100644 --- a/pkg/sql/catalog/multiregion/validate_table.go +++ b/pkg/sql/catalog/multiregion/validate_table.go @@ -14,7 +14,7 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -114,7 +114,7 @@ func ValidateTableLocalityConfig( } columnTypesTypeIDs := catalog.MakeDescriptorIDSet(typeIDsReferencedByColumns...) switch lc := lc.Locality.(type) { - case *descpb.TableDescriptor_LocalityConfig_Global_: + case *catpb.LocalityConfig_Global_: if regionEnumIDReferenced { if !columnTypesTypeIDs.Contains(regionsEnumID) { return errors.AssertionFailedf( @@ -125,7 +125,7 @@ func ValidateTableLocalityConfig( ) } } - case *descpb.TableDescriptor_LocalityConfig_RegionalByRow_: + case *catpb.LocalityConfig_RegionalByRow_: if !desc.IsPartitionAllBy() { return errors.AssertionFailedf("expected REGIONAL BY ROW table to have PartitionAllBy set") } @@ -138,7 +138,7 @@ func ValidateTableLocalityConfig( if err != nil { return err } - regionNames := make(map[descpb.RegionName]struct{}, len(regions)) + regionNames := make(map[catpb.RegionName]struct{}, len(regions)) for _, region := range regions { regionNames[region] = struct{}{} } @@ -146,14 +146,14 @@ func ValidateTableLocalityConfig( if err != nil { return err } - transitioningRegionNames := make(map[descpb.RegionName]struct{}, len(regions)) + transitioningRegionNames := make(map[catpb.RegionName]struct{}, len(regions)) for _, region := range transitioningRegions { transitioningRegionNames[region] = struct{}{} } part := desc.GetPrimaryIndex().GetPartitioning() err = part.ForEachList(func(name string, _ [][]byte, _ catalog.Partitioning) error { - regionName := descpb.RegionName(name) + regionName := catpb.RegionName(name) // Any transitioning region names may exist. if _, ok := transitioningRegionNames[regionName]; ok { return nil @@ -183,7 +183,7 @@ func ValidateTableLocalityConfig( ) } - case *descpb.TableDescriptor_LocalityConfig_RegionalByTable_: + case *catpb.LocalityConfig_RegionalByTable_: // Table is homed in an explicit (non-primary) region. if lc.RegionalByTable.Region != nil { @@ -244,11 +244,11 @@ func ValidateTableLocalityConfig( } // FormatTableLocalityConfig formats the table locality. -func FormatTableLocalityConfig(c *descpb.TableDescriptor_LocalityConfig, f *tree.FmtCtx) error { +func FormatTableLocalityConfig(c *catpb.LocalityConfig, f *tree.FmtCtx) error { switch v := c.Locality.(type) { - case *descpb.TableDescriptor_LocalityConfig_Global_: + case *catpb.LocalityConfig_Global_: f.WriteString("GLOBAL") - case *descpb.TableDescriptor_LocalityConfig_RegionalByTable_: + case *catpb.LocalityConfig_RegionalByTable_: f.WriteString("REGIONAL BY TABLE IN ") if v.RegionalByTable.Region != nil { region := tree.Name(*v.RegionalByTable.Region) @@ -256,7 +256,7 @@ func FormatTableLocalityConfig(c *descpb.TableDescriptor_LocalityConfig, f *tree } else { f.WriteString("PRIMARY REGION") } - case *descpb.TableDescriptor_LocalityConfig_RegionalByRow_: + case *catpb.LocalityConfig_RegionalByRow_: f.WriteString("REGIONAL BY ROW") if v.RegionalByRow.As != nil { f.WriteString(" AS ") diff --git a/pkg/sql/catalog/systemschema/BUILD.bazel b/pkg/sql/catalog/systemschema/BUILD.bazel index 566bfd309558..6079d0e76b03 100644 --- a/pkg/sql/catalog/systemschema/BUILD.bazel +++ b/pkg/sql/catalog/systemschema/BUILD.bazel @@ -11,6 +11,7 @@ go_library( "//pkg/security", "//pkg/sql/catalog", "//pkg/sql/catalog/catconstants", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/catprivilege", "//pkg/sql/catalog/dbdesc", "//pkg/sql/catalog/descpb", diff --git a/pkg/sql/catalog/systemschema/system.go b/pkg/sql/catalog/systemschema/system.go index 5f4481c07c7c..574c4715f039 100644 --- a/pkg/sql/catalog/systemschema/system.go +++ b/pkg/sql/catalog/systemschema/system.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catconstants" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catprivilege" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -518,11 +519,6 @@ CREATE TABLE system.statement_statistics ( metadata, statistics, plan - ), - CONSTRAINT check_crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8 CHECK ( - crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8 IN ( - 0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8 - ) ) ) ` @@ -554,11 +550,6 @@ CREATE TABLE system.transaction_statistics ( agg_interval, metadata, statistics - ), - CONSTRAINT check_crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8 CHECK ( - crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8 IN ( - 0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8 - ) ) ); ` @@ -2006,7 +1997,7 @@ var ( }, KeyColumnIDs: []descpb.ColumnID{11, 1, 2, 3, 4, 5, 6}, Version: descpb.LatestNonPrimaryIndexDescriptorVersion, - Sharded: descpb.ShardedDescriptor{ + Sharded: catpb.ShardedDescriptor{ IsSharded: true, Name: "crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_hash_transaction_fingerprint_id_shard_8", ShardBuckets: 8, @@ -2106,7 +2097,7 @@ var ( }, KeyColumnIDs: []descpb.ColumnID{8, 1, 2, 3, 4}, Version: descpb.LatestNonPrimaryIndexDescriptorVersion, - Sharded: descpb.ShardedDescriptor{ + Sharded: catpb.ShardedDescriptor{ IsSharded: true, Name: "crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_shard_8", ShardBuckets: 8, diff --git a/pkg/sql/catalog/table_elements.go b/pkg/sql/catalog/table_elements.go index 88941cfe00d4..ac0a9696ed19 100644 --- a/pkg/sql/catalog/table_elements.go +++ b/pkg/sql/catalog/table_elements.go @@ -12,6 +12,7 @@ package catalog import ( "github.com/cockroachdb/cockroach/pkg/geo/geoindex" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -130,7 +131,7 @@ type Index interface { GetVersion() descpb.IndexDescriptorVersion GetEncodingType() descpb.IndexDescriptorEncodingType - GetSharded() descpb.ShardedDescriptor + GetSharded() catpb.ShardedDescriptor GetShardColumnName() string IsValidOriginIndex(originColIDs descpb.ColumnIDs) bool @@ -297,7 +298,7 @@ type Column interface { // if the column is created with `GENERATED BY DEFAULT AS IDENTITY` syntax, // it will return descpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT; // otherwise, returns descpb.GeneratedAsIdentityType_NOT_IDENTITY_COLUMN. - GetGeneratedAsIdentityType() descpb.GeneratedAsIdentityType + GetGeneratedAsIdentityType() catpb.GeneratedAsIdentityType // HasGeneratedAsIdentitySequenceOption returns true if there is a // customized sequence option when this column is created as a @@ -603,32 +604,6 @@ func FindDeleteOnlyNonPrimaryIndex(desc TableDescriptor, test func(idx Index) bo return findIndex(desc.DeleteOnlyNonPrimaryIndexes(), test) } -// FullIndexColumnIDs returns the index column IDs including any extra (implicit or -// stored (old STORING encoding)) column IDs for non-unique indexes. It also -// returns the direction with which each column was encoded. -func FullIndexColumnIDs(idx Index) ([]descpb.ColumnID, []descpb.IndexDescriptor_Direction) { - n := idx.NumKeyColumns() - if !idx.IsUnique() { - n += idx.NumKeySuffixColumns() - } - ids := make([]descpb.ColumnID, 0, n) - dirs := make([]descpb.IndexDescriptor_Direction, 0, n) - for i := 0; i < idx.NumKeyColumns(); i++ { - ids = append(ids, idx.GetKeyColumnID(i)) - dirs = append(dirs, idx.GetKeyColumnDirection(i)) - } - // Non-unique indexes have some of the primary-key columns appended to - // their key. - if !idx.IsUnique() { - for i := 0; i < idx.NumKeySuffixColumns(); i++ { - // Extra columns are encoded in ascending order. - ids = append(ids, idx.GetKeySuffixColumnID(i)) - dirs = append(dirs, descpb.IndexDescriptor_ASC) - } - } - return ids, dirs -} - // UserDefinedTypeColsHaveSameVersion returns whether one table descriptor's // columns with user defined type metadata have the same versions of metadata // as in the other descriptor. Note that this function is only valid on two diff --git a/pkg/sql/catalog/tabledesc/BUILD.bazel b/pkg/sql/catalog/tabledesc/BUILD.bazel index 5bcfdcd7fe21..db8cdd1de0f6 100644 --- a/pkg/sql/catalog/tabledesc/BUILD.bazel +++ b/pkg/sql/catalog/tabledesc/BUILD.bazel @@ -23,6 +23,7 @@ go_library( "//pkg/settings/cluster", "//pkg/sql/catalog", "//pkg/sql/catalog/catconstants", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/catprivilege", "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/descpb", @@ -46,7 +47,6 @@ go_library( "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", "@com_github_lib_pq//oid", - "@org_golang_google_protobuf//proto", ], ) @@ -77,6 +77,7 @@ go_test( "//pkg/sql/catalog/catalogkeys", "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/catconstants", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/dbdesc", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/typedesc", diff --git a/pkg/sql/catalog/tabledesc/column.go b/pkg/sql/catalog/tabledesc/column.go index 1a6ce7733b8e..18020b0ff03e 100644 --- a/pkg/sql/catalog/tabledesc/column.go +++ b/pkg/sql/catalog/tabledesc/column.go @@ -14,6 +14,7 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -200,25 +201,25 @@ func (w column) GetPGAttributeNum() uint32 { // IsSystemColumn returns true iff the column is a system column. func (w column) IsSystemColumn() bool { - return w.desc.SystemColumnKind != descpb.SystemColumnKind_NONE + return w.desc.SystemColumnKind != catpb.SystemColumnKind_NONE } // IsGeneratedAsIdentity returns true iff the column is created // with GENERATED {ALWAYS | BY DEFAULT} AS IDENTITY syntax. func (w column) IsGeneratedAsIdentity() bool { - return w.desc.GeneratedAsIdentityType != descpb.GeneratedAsIdentityType_NOT_IDENTITY_COLUMN + return w.desc.GeneratedAsIdentityType != catpb.GeneratedAsIdentityType_NOT_IDENTITY_COLUMN } // IsGeneratedAlwaysAsIdentity returns true iff the column is created // with GENERATED ALWAYS AS IDENTITY syntax. func (w column) IsGeneratedAlwaysAsIdentity() bool { - return w.desc.GeneratedAsIdentityType == descpb.GeneratedAsIdentityType_GENERATED_ALWAYS + return w.desc.GeneratedAsIdentityType == catpb.GeneratedAsIdentityType_GENERATED_ALWAYS } // IsGeneratedByDefaultAsIdentity returns true iff the column is created // with GENERATED BY DEFAULT AS IDENTITY syntax. func (w column) IsGeneratedByDefaultAsIdentity() bool { - return w.desc.GeneratedAsIdentityType == descpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT + return w.desc.GeneratedAsIdentityType == catpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT } // GetGeneratedAsIdentityType returns the type of how the column was @@ -228,7 +229,7 @@ func (w column) IsGeneratedByDefaultAsIdentity() bool { // if the column is created with `GENERATED BY DEFAULT AS IDENTITY` syntax, // it will return descpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT; // otherwise, returns descpb.GeneratedAsIdentityType_NOT_IDENTITY_COLUMN. -func (w column) GetGeneratedAsIdentityType() descpb.GeneratedAsIdentityType { +func (w column) GetGeneratedAsIdentityType() catpb.GeneratedAsIdentityType { return w.desc.GeneratedAsIdentityType } @@ -260,6 +261,18 @@ type columnCache struct { readable []catalog.Column withUDTs []catalog.Column system []catalog.Column + index []indexColumnCache +} + +type indexColumnCache struct { + all []catalog.Column + allDirs []descpb.IndexDescriptor_Direction + key []catalog.Column + keyDirs []descpb.IndexDescriptor_Direction + stored []catalog.Column + keySuffix []catalog.Column + full []catalog.Column + fullDirs []descpb.IndexDescriptor_Direction } // newColumnCache returns a fresh fully-populated columnCache struct for the @@ -293,7 +306,7 @@ func newColumnCache(desc *descpb.TableDescriptor, mutations *mutationCache) *col for i := range backingStructs[numPublic:] { c.all = append(c.all, &backingStructs[numPublic+i]) } - // Populate the remaining fields. + // Populate the remaining column slice fields. c.deletable = c.all[:numDeletable] c.system = c.all[numDeletable:] c.public = c.all[:numPublic] @@ -333,9 +346,58 @@ func newColumnCache(desc *descpb.TableDescriptor, mutations *mutationCache) *col lazyAllocAppendColumn(&c.withUDTs, col, numDeletable) } } + // Populate the per-index column cache + c.index = make([]indexColumnCache, 0, 1+len(desc.Indexes)+len(mutations.indexes)) + c.index = append(c.index, makeIndexColumnCache(&desc.PrimaryIndex, c.all)) + for i := range desc.Indexes { + c.index = append(c.index, makeIndexColumnCache(&desc.Indexes[i], c.all)) + } + for i := range mutations.indexes { + c.index = append(c.index, makeIndexColumnCache(mutations.indexes[i].AsIndex().IndexDesc(), c.all)) + } return &c } +// makeIndexColumnCache builds a cache of catalog.Column slices pertaining to +// the columns referenced in an index. +func makeIndexColumnCache(idx *descpb.IndexDescriptor, all []catalog.Column) (ic indexColumnCache) { + nKey := len(idx.KeyColumnIDs) + nKeySuffix := len(idx.KeySuffixColumnIDs) + nStored := len(idx.StoreColumnIDs) + nAll := nKey + nKeySuffix + nStored + ic.allDirs = make([]descpb.IndexDescriptor_Direction, nAll) + // Only copy key column directions, others will remain at ASC (default value). + copy(ic.allDirs, idx.KeyColumnDirections) + ic.all = make([]catalog.Column, 0, nAll) + appendColumnsByID(&ic.all, all, idx.KeyColumnIDs) + appendColumnsByID(&ic.all, all, idx.KeySuffixColumnIDs) + appendColumnsByID(&ic.all, all, idx.StoreColumnIDs) + ic.key = ic.all[:nKey] + ic.keyDirs = ic.allDirs[:nKey] + ic.keySuffix = ic.all[nKey : nKey+nKeySuffix] + ic.stored = ic.all[nKey+nKeySuffix:] + nFull := nKey + if !idx.Unique { + nFull = nFull + nKeySuffix + } + ic.full = ic.all[:nFull] + ic.fullDirs = ic.allDirs[:nFull] + return ic +} + +func appendColumnsByID(slice *[]catalog.Column, source []catalog.Column, ids []descpb.ColumnID) { + for _, id := range ids { + var col catalog.Column + for _, candidate := range source { + if candidate.GetID() == id { + col = candidate + break + } + } + *slice = append(*slice, col) + } +} + func lazyAllocAppendColumn(slice *[]catalog.Column, col catalog.Column, cap int) { if *slice == nil { *slice = make([]catalog.Column, 0, cap) diff --git a/pkg/sql/catalog/tabledesc/index.go b/pkg/sql/catalog/tabledesc/index.go index a39173c87ce1..b8bc6cd0920a 100644 --- a/pkg/sql/catalog/tabledesc/index.go +++ b/pkg/sql/catalog/tabledesc/index.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/geo/geoindex" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/util/iterutil" "github.com/cockroachdb/cockroach/pkg/util/protoutil" @@ -202,7 +203,7 @@ func (w index) GetGeoConfig() geoindex.Config { } // GetSharded returns the ShardedDescriptor in the index descriptor -func (w index) GetSharded() descpb.ShardedDescriptor { +func (w index) GetSharded() catpb.ShardedDescriptor { return w.desc.Sharded } diff --git a/pkg/sql/catalog/tabledesc/index_test.go b/pkg/sql/catalog/tabledesc/index_test.go index fa326b973fc5..733891f5f176 100644 --- a/pkg/sql/catalog/tabledesc/index_test.go +++ b/pkg/sql/catalog/tabledesc/index_test.go @@ -26,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -276,7 +277,7 @@ func TestIndexInterface(t *testing.T) { errMsgFmt, "GetGeoConfig", idx.GetName()) require.Equal(t, idx == s4, idx.GetShardColumnName() != "", errMsgFmt, "GetShardColumnName", idx.GetName()) - require.Equal(t, idx == s4, !(&descpb.ShardedDescriptor{}).Equal(idx.GetSharded()), + require.Equal(t, idx == s4, !(&catpb.ShardedDescriptor{}).Equal(idx.GetSharded()), errMsgFmt, "GetSharded", idx.GetName()) require.Equalf(t, idx != s3, idx.NumSecondaryStoredColumns() == 0, errMsgFmt, "NumSecondaryStoredColumns", idx.GetName()) diff --git a/pkg/sql/catalog/tabledesc/safe_format_test.go b/pkg/sql/catalog/tabledesc/safe_format_test.go index 1436c85e3ef9..dbca705f4e3c 100644 --- a/pkg/sql/catalog/tabledesc/safe_format_test.go +++ b/pkg/sql/catalog/tabledesc/safe_format_test.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -124,7 +125,7 @@ func TestSafeMessage(t *testing.T) { ReferencedColumnIDs: []descpb.ColumnID{2}, ReferencedTableID: 112, Validity: descpb.ConstraintValidity_Validated, - OnDelete: descpb.ForeignKeyReference_CASCADE, + OnDelete: catpb.ForeignKeyAction_CASCADE, Match: descpb.ForeignKeyReference_PARTIAL, }) mutable.OutboundFKs = append(mutable.OutboundFKs, descpb.ForeignKeyConstraint{ @@ -134,7 +135,7 @@ func TestSafeMessage(t *testing.T) { ReferencedColumnIDs: []descpb.ColumnID{1}, ReferencedTableID: 3, Validity: descpb.ConstraintValidity_Validated, - OnDelete: descpb.ForeignKeyReference_SET_DEFAULT, + OnDelete: catpb.ForeignKeyAction_SET_DEFAULT, Match: descpb.ForeignKeyReference_SIMPLE, }) @@ -150,9 +151,8 @@ func TestSafeMessage(t *testing.T) { OriginColumnIDs: []descpb.ColumnID{2}, ReferencedTableID: 2, ReferencedColumnIDs: []descpb.ColumnID{3}, - Validity: descpb.ConstraintValidity_Unvalidated, - OnDelete: descpb.ForeignKeyReference_SET_NULL, - Match: descpb.ForeignKeyReference_FULL, + Validity: descpb.ConstraintValidity_Unvalidated, OnDelete: catpb.ForeignKeyAction_SET_NULL, + Match: descpb.ForeignKeyReference_FULL, }, }, }, diff --git a/pkg/sql/catalog/tabledesc/structured.go b/pkg/sql/catalog/tabledesc/structured.go index dee7e4afb880..9fa4ff77453d 100644 --- a/pkg/sql/catalog/tabledesc/structured.go +++ b/pkg/sql/catalog/tabledesc/structured.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" @@ -36,7 +37,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" "github.com/lib/pq/oid" - "google.golang.org/protobuf/proto" ) // Mutable is a custom type for TableDescriptors @@ -2177,7 +2177,7 @@ func (desc *wrapper) PrimaryIndexSpan(codec keys.SQLCodec) roachpb.Span { // IndexSpan implements the TableDescriptor interface. func (desc *wrapper) IndexSpan(codec keys.SQLCodec, indexID descpb.IndexID) roachpb.Span { - prefix := roachpb.Key(rowenc.MakeIndexKeyPrefix(codec, desc, indexID)) + prefix := roachpb.Key(rowenc.MakeIndexKeyPrefix(codec, desc.GetID(), indexID)) return roachpb.Span{Key: prefix, EndKey: prefix.PrefixEnd()} } @@ -2391,13 +2391,13 @@ func (desc *wrapper) IsLocalityGlobal() bool { } // GetRegionalByTableRegion implements the TableDescriptor interface. -func (desc *wrapper) GetRegionalByTableRegion() (descpb.RegionName, error) { +func (desc *wrapper) GetRegionalByTableRegion() (catpb.RegionName, error) { if !desc.IsLocalityRegionalByTable() { return "", errors.AssertionFailedf("%s is not REGIONAL BY TABLE", desc.Name) } region := desc.LocalityConfig.GetRegionalByTable().Region if region == nil { - return descpb.RegionName(tree.PrimaryRegionNotSpecifiedName), nil + return catpb.RegionName(tree.PrimaryRegionNotSpecifiedName), nil } return *region, nil } @@ -2424,7 +2424,7 @@ func (desc *wrapper) GetRegionalByRowTableRegionColumnName() (tree.Name, error) func (desc *wrapper) GetMultiRegionEnumDependencyIfExists() bool { if desc.IsLocalityRegionalByTable() { regionName, _ := desc.GetRegionalByTableRegion() - return regionName != descpb.RegionName(tree.PrimaryRegionNotSpecifiedName) + return regionName != catpb.RegionName(tree.PrimaryRegionNotSpecifiedName) } return false } @@ -2442,15 +2442,15 @@ func (desc *Mutable) SetTableLocalityRegionalByTable(region tree.Name) { } // LocalityConfigRegionalByTable returns a config for a REGIONAL BY TABLE table. -func LocalityConfigRegionalByTable(region tree.Name) descpb.TableDescriptor_LocalityConfig { - l := &descpb.TableDescriptor_LocalityConfig_RegionalByTable_{ - RegionalByTable: &descpb.TableDescriptor_LocalityConfig_RegionalByTable{}, +func LocalityConfigRegionalByTable(region tree.Name) catpb.LocalityConfig { + l := &catpb.LocalityConfig_RegionalByTable_{ + RegionalByTable: &catpb.LocalityConfig_RegionalByTable{}, } if region != tree.PrimaryRegionNotSpecifiedName { - regionName := descpb.RegionName(region) + regionName := catpb.RegionName(region) l.RegionalByTable.Region = ®ionName } - return descpb.TableDescriptor_LocalityConfig{Locality: l} + return catpb.LocalityConfig{Locality: l} } // SetTableLocalityRegionalByRow sets the descriptor's locality config to @@ -2465,13 +2465,13 @@ func (desc *Mutable) SetTableLocalityRegionalByRow(regionColName tree.Name) { } // LocalityConfigRegionalByRow returns a config for a REGIONAL BY ROW table. -func LocalityConfigRegionalByRow(regionColName tree.Name) descpb.TableDescriptor_LocalityConfig { - rbr := &descpb.TableDescriptor_LocalityConfig_RegionalByRow{} +func LocalityConfigRegionalByRow(regionColName tree.Name) catpb.LocalityConfig { + rbr := &catpb.LocalityConfig_RegionalByRow{} if regionColName != tree.RegionalByRowRegionNotSpecifiedName { - rbr.As = proto.String(string(regionColName)) + rbr.As = (*string)(®ionColName) } - return descpb.TableDescriptor_LocalityConfig{ - Locality: &descpb.TableDescriptor_LocalityConfig_RegionalByRow_{ + return catpb.LocalityConfig{ + Locality: &catpb.LocalityConfig_RegionalByRow_{ RegionalByRow: rbr, }, } @@ -2488,10 +2488,10 @@ func (desc *Mutable) SetTableLocalityGlobal() { } // LocalityConfigGlobal returns a config for a GLOBAL table. -func LocalityConfigGlobal() descpb.TableDescriptor_LocalityConfig { - return descpb.TableDescriptor_LocalityConfig{ - Locality: &descpb.TableDescriptor_LocalityConfig_Global_{ - Global: &descpb.TableDescriptor_LocalityConfig_Global{}, +func LocalityConfigGlobal() catpb.LocalityConfig { + return catpb.LocalityConfig{ + Locality: &catpb.LocalityConfig_Global_{ + Global: &catpb.LocalityConfig_Global{}, }, } } diff --git a/pkg/sql/catalog/tabledesc/table.go b/pkg/sql/catalog/tabledesc/table.go index e537f34fd28e..47dae6190c8a 100644 --- a/pkg/sql/catalog/tabledesc/table.go +++ b/pkg/sql/catalog/tabledesc/table.go @@ -18,6 +18,7 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" @@ -114,9 +115,9 @@ func MakeColumnDefDescs( if d.GeneratedIdentity.IsGeneratedAsIdentity { switch d.GeneratedIdentity.GeneratedAsIdentityType { case tree.GeneratedAlways: - col.GeneratedAsIdentityType = descpb.GeneratedAsIdentityType_GENERATED_ALWAYS + col.GeneratedAsIdentityType = catpb.GeneratedAsIdentityType_GENERATED_ALWAYS case tree.GeneratedByDefault: - col.GeneratedAsIdentityType = descpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT + col.GeneratedAsIdentityType = catpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT default: return nil, errors.AssertionFailedf( "column %s is of invalid generated as identity type (neither ALWAYS nor BY DEFAULT)", string(d.Name)) @@ -199,7 +200,7 @@ func MakeColumnDefDescs( Unique: true, KeyColumnNames: []string{shardColName, string(d.Name)}, KeyColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC, descpb.IndexDescriptor_ASC}, - Sharded: descpb.ShardedDescriptor{ + Sharded: catpb.ShardedDescriptor{ IsSharded: true, Name: shardColName, ShardBuckets: buckets, @@ -651,7 +652,7 @@ func RenameColumnInTable( // Rename the column in hash-sharded idx descriptors. Potentially rename the // shard column too if we haven't already done it. shardColumnsToRename := make(map[tree.Name]tree.Name) // map[oldShardColName]newShardColName - maybeUpdateShardedDesc := func(shardedDesc *descpb.ShardedDescriptor) { + maybeUpdateShardedDesc := func(shardedDesc *catpb.ShardedDescriptor) { if !shardedDesc.IsSharded { return } diff --git a/pkg/sql/catalog/tabledesc/table_desc.go b/pkg/sql/catalog/tabledesc/table_desc.go index 72dc0e5365af..f3b4d254ccbc 100644 --- a/pkg/sql/catalog/tabledesc/table_desc.go +++ b/pkg/sql/catalog/tabledesc/table_desc.go @@ -418,6 +418,79 @@ func (desc *wrapper) SystemColumns() []catalog.Column { return desc.getExistingOrNewColumnCache().system } +// IndexColumns implements the TableDescriptor interface. +func (desc *wrapper) IndexColumns(idx catalog.Index) []catalog.Column { + if ic := desc.getExistingOrNewIndexColumnCache(idx); ic != nil { + return ic.all + } + return nil +} + +// IndexKeyColumns implements the TableDescriptor interface. +func (desc *wrapper) IndexKeyColumns(idx catalog.Index) []catalog.Column { + if ic := desc.getExistingOrNewIndexColumnCache(idx); ic != nil { + return ic.key + } + return nil +} + +// IndexKeyColumnDirections implements the TableDescriptor interface. +func (desc *wrapper) IndexKeyColumnDirections( + idx catalog.Index, +) []descpb.IndexDescriptor_Direction { + if ic := desc.getExistingOrNewIndexColumnCache(idx); ic != nil { + return ic.keyDirs + } + return nil +} + +// IndexKeySuffixColumns implements the TableDescriptor interface. +func (desc *wrapper) IndexKeySuffixColumns(idx catalog.Index) []catalog.Column { + if ic := desc.getExistingOrNewIndexColumnCache(idx); ic != nil { + return ic.keySuffix + } + return nil +} + +// IndexFullColumns implements the TableDescriptor interface. +func (desc *wrapper) IndexFullColumns(idx catalog.Index) []catalog.Column { + if ic := desc.getExistingOrNewIndexColumnCache(idx); ic != nil { + return ic.full + } + return nil +} + +// IndexFullColumnDirections implements the TableDescriptor interface. +func (desc *wrapper) IndexFullColumnDirections( + idx catalog.Index, +) []descpb.IndexDescriptor_Direction { + if ic := desc.getExistingOrNewIndexColumnCache(idx); ic != nil { + return ic.fullDirs + } + return nil +} + +// IndexStoredColumns implements the TableDescriptor interface. +func (desc *wrapper) IndexStoredColumns(idx catalog.Index) []catalog.Column { + if ic := desc.getExistingOrNewIndexColumnCache(idx); ic != nil { + return ic.stored + } + return nil +} + +// getExistingOrNewIndexColumnCache is a convenience method for Index*Columns +// methods. +func (desc *wrapper) getExistingOrNewIndexColumnCache(idx catalog.Index) *indexColumnCache { + if idx == nil { + return nil + } + c := desc.getExistingOrNewColumnCache() + if idx.Ordinal() >= len(c.index) { + return nil + } + return &c.index[idx.Ordinal()] +} + // FindColumnWithID implements the TableDescriptor interface. func (desc *wrapper) FindColumnWithID(id descpb.ColumnID) (catalog.Column, error) { for _, col := range desc.AllColumns() { diff --git a/pkg/sql/catalog/tabledesc/validate.go b/pkg/sql/catalog/tabledesc/validate.go index 0bae1def4021..db9d76bb7a6a 100644 --- a/pkg/sql/catalog/tabledesc/validate.go +++ b/pkg/sql/catalog/tabledesc/validate.go @@ -14,6 +14,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catprivilege" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -473,8 +474,8 @@ func ValidateOnUpdate(desc catalog.TableDescriptor, errReportFn func(err error)) } _ = desc.ForeachOutboundFK(func(fk *descpb.ForeignKeyConstraint) error { - if fk.OnUpdate == descpb.ForeignKeyReference_NO_ACTION || - fk.OnUpdate == descpb.ForeignKeyReference_RESTRICT { + if fk.OnUpdate == catpb.ForeignKeyAction_NO_ACTION || + fk.OnUpdate == catpb.ForeignKeyAction_RESTRICT { return nil } for _, fkCol := range fk.OriginColumnIDs { @@ -1011,7 +1012,7 @@ func (desc *wrapper) ensureShardedIndexNotComputed(index *descpb.IndexDescriptor // stored sorted by upper bound. colOffset is non-zero for subpartitions and // indicates how many index columns to skip over. func (desc *wrapper) validatePartitioningDescriptor( - a *rowenc.DatumAlloc, + a *tree.DatumAlloc, idx catalog.Index, part catalog.Partitioning, colOffset int, @@ -1182,7 +1183,7 @@ func (ps partitionInterval) Range() interval.Range { func (desc *wrapper) validatePartitioning() error { partitionNames := make(map[string]string) - a := &rowenc.DatumAlloc{} + a := &tree.DatumAlloc{} return catalog.ForEachNonDropIndex(desc, func(idx catalog.Index) error { return desc.validatePartitioningDescriptor( a, idx, idx.GetPartitioning(), 0 /* colOffset */, partitionNames, diff --git a/pkg/sql/catalog/tabledesc/validate_test.go b/pkg/sql/catalog/tabledesc/validate_test.go index fe609a8c7873..cd8776c00de6 100644 --- a/pkg/sql/catalog/tabledesc/validate_test.go +++ b/pkg/sql/catalog/tabledesc/validate_test.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catconstants" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" @@ -935,7 +936,7 @@ func TestValidateTableDesc(t *testing.T) { KeyColumnIDs: []descpb.ColumnID{2, 1}, KeyColumnNames: []string{"crdb_internal_bar_shard_5", "bar"}, KeyColumnDirections: []descpb.IndexDescriptor_Direction{descpb.IndexDescriptor_ASC, descpb.IndexDescriptor_ASC}, - Sharded: descpb.ShardedDescriptor{ + Sharded: catpb.ShardedDescriptor{ IsSharded: true, Name: "does not exist", ShardBuckets: 5, @@ -1422,7 +1423,7 @@ func TestValidateTableDesc(t *testing.T) { { ID: 1, Name: "bar", - GeneratedAsIdentityType: descpb.GeneratedAsIdentityType_GENERATED_ALWAYS, + GeneratedAsIdentityType: catpb.GeneratedAsIdentityType_GENERATED_ALWAYS, OnUpdateExpr: proto.String("'blah'"), }, }, @@ -1442,7 +1443,7 @@ func TestValidateTableDesc(t *testing.T) { { ID: 1, Name: "bar", - GeneratedAsIdentityType: descpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT, + GeneratedAsIdentityType: catpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT, OnUpdateExpr: proto.String("'blah'"), }, }, @@ -1460,7 +1461,7 @@ func TestValidateTableDesc(t *testing.T) { FormatVersion: descpb.InterleavedFormatVersion, Columns: []descpb.ColumnDescriptor{ {ID: 1, Name: "bar", Nullable: true, - GeneratedAsIdentityType: descpb.GeneratedAsIdentityType_GENERATED_ALWAYS, + GeneratedAsIdentityType: catpb.GeneratedAsIdentityType_GENERATED_ALWAYS, }, }, NextColumnID: 3, @@ -1473,7 +1474,7 @@ func TestValidateTableDesc(t *testing.T) { FormatVersion: descpb.InterleavedFormatVersion, Columns: []descpb.ColumnDescriptor{ {ID: 1, Name: "bar", Nullable: true, - GeneratedAsIdentityType: descpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT, + GeneratedAsIdentityType: catpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT, }, }, NextColumnID: 3, @@ -1485,7 +1486,7 @@ func TestValidateTableDesc(t *testing.T) { Name: "foo", FormatVersion: descpb.InterleavedFormatVersion, Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar", GeneratedAsIdentityType: descpb.GeneratedAsIdentityType_GENERATED_ALWAYS, + {ID: 1, Name: "bar", GeneratedAsIdentityType: catpb.GeneratedAsIdentityType_GENERATED_ALWAYS, ComputeExpr: &computedExpr}, }, NextColumnID: 3, @@ -1497,7 +1498,7 @@ func TestValidateTableDesc(t *testing.T) { Name: "foo", FormatVersion: descpb.InterleavedFormatVersion, Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar", GeneratedAsIdentityType: descpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT, + {ID: 1, Name: "bar", GeneratedAsIdentityType: catpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT, ComputeExpr: &computedExpr}, }, NextColumnID: 3, @@ -1510,7 +1511,7 @@ func TestValidateTableDesc(t *testing.T) { FormatVersion: descpb.InterleavedFormatVersion, Columns: []descpb.ColumnDescriptor{ {ID: 1, Name: "bar", Nullable: true, - GeneratedAsIdentityType: descpb.GeneratedAsIdentityType_GENERATED_ALWAYS, + GeneratedAsIdentityType: catpb.GeneratedAsIdentityType_GENERATED_ALWAYS, GeneratedAsIdentitySequenceOption: &generatedAsIdentitySequenceOptionExpr, }, }, @@ -1524,7 +1525,7 @@ func TestValidateTableDesc(t *testing.T) { FormatVersion: descpb.InterleavedFormatVersion, Columns: []descpb.ColumnDescriptor{ {ID: 1, Name: "bar", Nullable: true, - GeneratedAsIdentityType: descpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT, + GeneratedAsIdentityType: catpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT, GeneratedAsIdentitySequenceOption: &generatedAsIdentitySequenceOptionExpr, }, }, @@ -1537,7 +1538,7 @@ func TestValidateTableDesc(t *testing.T) { Name: "foo", FormatVersion: descpb.InterleavedFormatVersion, Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar", GeneratedAsIdentityType: descpb.GeneratedAsIdentityType_GENERATED_ALWAYS, + {ID: 1, Name: "bar", GeneratedAsIdentityType: catpb.GeneratedAsIdentityType_GENERATED_ALWAYS, GeneratedAsIdentitySequenceOption: &generatedAsIdentitySequenceOptionExpr, ComputeExpr: &computedExpr}, }, @@ -1550,7 +1551,7 @@ func TestValidateTableDesc(t *testing.T) { Name: "foo", FormatVersion: descpb.InterleavedFormatVersion, Columns: []descpb.ColumnDescriptor{ - {ID: 1, Name: "bar", GeneratedAsIdentityType: descpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT, + {ID: 1, Name: "bar", GeneratedAsIdentityType: catpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT, GeneratedAsIdentitySequenceOption: &generatedAsIdentitySequenceOptionExpr, ComputeExpr: &computedExpr}, }, diff --git a/pkg/sql/catalog/typedesc/BUILD.bazel b/pkg/sql/catalog/typedesc/BUILD.bazel index 4b58c76d5928..3a2a634f6d7c 100644 --- a/pkg/sql/catalog/typedesc/BUILD.bazel +++ b/pkg/sql/catalog/typedesc/BUILD.bazel @@ -13,6 +13,7 @@ go_library( deps = [ "//pkg/keys", "//pkg/sql/catalog", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/catprivilege", "//pkg/sql/catalog/descpb", "//pkg/sql/enum", diff --git a/pkg/sql/catalog/typedesc/table_implicit_record_type.go b/pkg/sql/catalog/typedesc/table_implicit_record_type.go index a4e7527a35f5..750b31d5cf0f 100644 --- a/pkg/sql/catalog/typedesc/table_implicit_record_type.go +++ b/pkg/sql/catalog/typedesc/table_implicit_record_type.go @@ -14,6 +14,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -269,31 +270,31 @@ func (v TableImplicitRecordType) IsCompatibleWith(_ catalog.TypeDescriptor) erro } // PrimaryRegionName implements the TypeDescriptorInterface. -func (v TableImplicitRecordType) PrimaryRegionName() (descpb.RegionName, error) { +func (v TableImplicitRecordType) PrimaryRegionName() (catpb.RegionName, error) { return "", errors.AssertionFailedf( "can not get primary region of a implicit table record type") } // RegionNames implements the TypeDescriptorInterface. -func (v TableImplicitRecordType) RegionNames() (descpb.RegionNames, error) { +func (v TableImplicitRecordType) RegionNames() (catpb.RegionNames, error) { return nil, errors.AssertionFailedf( "can not get region names of a implicit table record type") } // RegionNamesIncludingTransitioning implements the TypeDescriptorInterface. -func (v TableImplicitRecordType) RegionNamesIncludingTransitioning() (descpb.RegionNames, error) { +func (v TableImplicitRecordType) RegionNamesIncludingTransitioning() (catpb.RegionNames, error) { return nil, errors.AssertionFailedf( "can not get region names of a implicit table record type") } // RegionNamesForValidation implements the TypeDescriptorInterface. -func (v TableImplicitRecordType) RegionNamesForValidation() (descpb.RegionNames, error) { +func (v TableImplicitRecordType) RegionNamesForValidation() (catpb.RegionNames, error) { return nil, errors.AssertionFailedf( "can not get region names of a implicit table record type") } // TransitioningRegionNames implements the TypeDescriptorInterface. -func (v TableImplicitRecordType) TransitioningRegionNames() (descpb.RegionNames, error) { +func (v TableImplicitRecordType) TransitioningRegionNames() (catpb.RegionNames, error) { return nil, errors.AssertionFailedf( "can not get region names of a implicit table record type") } diff --git a/pkg/sql/catalog/typedesc/type_desc.go b/pkg/sql/catalog/typedesc/type_desc.go index e41a85c46fdc..a4822ac563ab 100644 --- a/pkg/sql/catalog/typedesc/type_desc.go +++ b/pkg/sql/catalog/typedesc/type_desc.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catprivilege" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/enum" @@ -188,7 +189,7 @@ func (desc *immutable) NewBuilder() catalog.DescriptorBuilder { } // PrimaryRegionName implements the TypeDescriptor interface. -func (desc *immutable) PrimaryRegionName() (descpb.RegionName, error) { +func (desc *immutable) PrimaryRegionName() (catpb.RegionName, error) { if desc.Kind != descpb.TypeDescriptor_MULTIREGION_ENUM { return "", errors.AssertionFailedf( "can not get primary region of a non multi-region enum") @@ -197,66 +198,66 @@ func (desc *immutable) PrimaryRegionName() (descpb.RegionName, error) { } // RegionNames implements the TypeDescriptor interface. -func (desc *immutable) RegionNames() (descpb.RegionNames, error) { +func (desc *immutable) RegionNames() (catpb.RegionNames, error) { if desc.Kind != descpb.TypeDescriptor_MULTIREGION_ENUM { return nil, errors.AssertionFailedf( "can not get regions of a non multi-region enum %d", desc.ID, ) } - var regions descpb.RegionNames + var regions catpb.RegionNames for _, member := range desc.EnumMembers { if member.Capability == descpb.TypeDescriptor_EnumMember_READ_ONLY { continue } - regions = append(regions, descpb.RegionName(member.LogicalRepresentation)) + regions = append(regions, catpb.RegionName(member.LogicalRepresentation)) } return regions, nil } // TransitioningRegionNames implements the TypeDescriptor interface. -func (desc *immutable) TransitioningRegionNames() (descpb.RegionNames, error) { +func (desc *immutable) TransitioningRegionNames() (catpb.RegionNames, error) { if desc.Kind != descpb.TypeDescriptor_MULTIREGION_ENUM { return nil, errors.AssertionFailedf( "can not get regions of a non multi-region enum %d", desc.ID, ) } - var regions descpb.RegionNames + var regions catpb.RegionNames for _, member := range desc.EnumMembers { if member.Direction != descpb.TypeDescriptor_EnumMember_NONE { - regions = append(regions, descpb.RegionName(member.LogicalRepresentation)) + regions = append(regions, catpb.RegionName(member.LogicalRepresentation)) } } return regions, nil } // RegionNamesForValidation implements the TypeDescriptor interface. -func (desc *immutable) RegionNamesForValidation() (descpb.RegionNames, error) { +func (desc *immutable) RegionNamesForValidation() (catpb.RegionNames, error) { if desc.Kind != descpb.TypeDescriptor_MULTIREGION_ENUM { return nil, errors.AssertionFailedf( "can not get regions of a non multi-region enum %d", desc.ID, ) } - var regions descpb.RegionNames + var regions catpb.RegionNames for _, member := range desc.EnumMembers { if member.Capability == descpb.TypeDescriptor_EnumMember_READ_ONLY && member.Direction == descpb.TypeDescriptor_EnumMember_ADD { continue } - regions = append(regions, descpb.RegionName(member.LogicalRepresentation)) + regions = append(regions, catpb.RegionName(member.LogicalRepresentation)) } return regions, nil } // RegionNamesIncludingTransitioning implements the TypeDescriptor interface. -func (desc *immutable) RegionNamesIncludingTransitioning() (descpb.RegionNames, error) { +func (desc *immutable) RegionNamesIncludingTransitioning() (catpb.RegionNames, error) { if desc.Kind != descpb.TypeDescriptor_MULTIREGION_ENUM { return nil, errors.AssertionFailedf( "can not get regions of a non multi-region enum %d", desc.ID, ) } - var regions descpb.RegionNames + var regions catpb.RegionNames for _, member := range desc.EnumMembers { - regions = append(regions, descpb.RegionName(member.LogicalRepresentation)) + regions = append(regions, catpb.RegionName(member.LogicalRepresentation)) } return regions, nil } @@ -665,7 +666,7 @@ func (desc *immutable) validateMultiRegion( { found := false for _, member := range desc.EnumMembers { - if descpb.RegionName(member.LogicalRepresentation) == primaryRegion { + if catpb.RegionName(member.LogicalRepresentation) == primaryRegion { found = true } } diff --git a/pkg/sql/colconv/vec_to_datum.eg.go b/pkg/sql/colconv/vec_to_datum.eg.go index c791b31702ee..a8e914c3b855 100644 --- a/pkg/sql/colconv/vec_to_datum.eg.go +++ b/pkg/sql/colconv/vec_to_datum.eg.go @@ -10,14 +10,12 @@ package colconv import ( - "math/big" "sync" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/json" @@ -55,7 +53,7 @@ var ( type VecToDatumConverter struct { convertedVecs []tree.Datums vecIdxsToConvert []int - da rowenc.DatumAlloc + da tree.DatumAlloc } var _ execinfra.Releasable = &VecToDatumConverter{} @@ -249,7 +247,7 @@ func (c *VecToDatumConverter) GetDatumColumn(colIdx int) tree.Datums { // selection vector. It doesn't account for the memory used by the newly // created tree.Datums, so it is up to the caller to do the memory accounting. func ColVecToDatumAndDeselect( - converted []tree.Datum, col coldata.Vec, length int, sel []int, da *rowenc.DatumAlloc, + converted []tree.Datum, col coldata.Vec, length int, sel []int, da *tree.DatumAlloc, ) { if length == 0 { return @@ -450,10 +448,6 @@ func ColVecToDatumAndDeselect( _ = true v := typedCol.Get(srcIdx) _converted := da.NewDDecimal(tree.DDecimal{Decimal: v}) - // Clear the Coeff so that the Set below allocates a new slice for the - // Coeff.abs field. - _converted.Coeff = big.Int{} - _converted.Coeff.Set(&v.Coeff) //gcassert:bce converted[destIdx] = _converted } @@ -834,10 +828,6 @@ func ColVecToDatumAndDeselect( _ = true v := typedCol.Get(srcIdx) _converted := da.NewDDecimal(tree.DDecimal{Decimal: v}) - // Clear the Coeff so that the Set below allocates a new slice for the - // Coeff.abs field. - _converted.Coeff = big.Int{} - _converted.Coeff.Set(&v.Coeff) //gcassert:bce converted[destIdx] = _converted } @@ -1035,7 +1025,7 @@ func ColVecToDatumAndDeselect( // doesn't account for the memory used by the newly created tree.Datums, so it // is up to the caller to do the memory accounting. func ColVecToDatum( - converted []tree.Datum, col coldata.Vec, length int, sel []int, da *rowenc.DatumAlloc, + converted []tree.Datum, col coldata.Vec, length int, sel []int, da *tree.DatumAlloc, ) { if length == 0 { return @@ -1225,10 +1215,6 @@ func ColVecToDatum( _ = true v := typedCol.Get(srcIdx) _converted := da.NewDDecimal(tree.DDecimal{Decimal: v}) - // Clear the Coeff so that the Set below allocates a new slice for the - // Coeff.abs field. - _converted.Coeff = big.Int{} - _converted.Coeff.Set(&v.Coeff) converted[destIdx] = _converted } } @@ -1643,10 +1629,6 @@ func ColVecToDatum( //gcassert:bce v := typedCol.Get(srcIdx) _converted := da.NewDDecimal(tree.DDecimal{Decimal: v}) - // Clear the Coeff so that the Set below allocates a new slice for the - // Coeff.abs field. - _converted.Coeff = big.Int{} - _converted.Coeff.Set(&v.Coeff) //gcassert:bce converted[destIdx] = _converted } @@ -2029,10 +2011,6 @@ func ColVecToDatum( _ = true v := typedCol.Get(srcIdx) _converted := da.NewDDecimal(tree.DDecimal{Decimal: v}) - // Clear the Coeff so that the Set below allocates a new slice for the - // Coeff.abs field. - _converted.Coeff = big.Int{} - _converted.Coeff.Set(&v.Coeff) converted[destIdx] = _converted } } @@ -2375,10 +2353,6 @@ func ColVecToDatum( //gcassert:bce v := typedCol.Get(srcIdx) _converted := da.NewDDecimal(tree.DDecimal{Decimal: v}) - // Clear the Coeff so that the Set below allocates a new slice for the - // Coeff.abs field. - _converted.Coeff = big.Int{} - _converted.Coeff.Set(&v.Coeff) //gcassert:bce converted[destIdx] = _converted } diff --git a/pkg/sql/colconv/vec_to_datum_tmpl.go b/pkg/sql/colconv/vec_to_datum_tmpl.go index 039e8c1f0d9c..04a05170aeca 100644 --- a/pkg/sql/colconv/vec_to_datum_tmpl.go +++ b/pkg/sql/colconv/vec_to_datum_tmpl.go @@ -28,7 +28,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/json" @@ -66,7 +65,7 @@ var ( type VecToDatumConverter struct { convertedVecs []tree.Datums vecIdxsToConvert []int - da rowenc.DatumAlloc + da tree.DatumAlloc } var _ execinfra.Releasable = &VecToDatumConverter{} @@ -260,7 +259,7 @@ func (c *VecToDatumConverter) GetDatumColumn(colIdx int) tree.Datums { // selection vector. It doesn't account for the memory used by the newly // created tree.Datums, so it is up to the caller to do the memory accounting. func ColVecToDatumAndDeselect( - converted []tree.Datum, col coldata.Vec, length int, sel []int, da *rowenc.DatumAlloc, + converted []tree.Datum, col coldata.Vec, length int, sel []int, da *tree.DatumAlloc, ) { if length == 0 { return @@ -282,7 +281,7 @@ func ColVecToDatumAndDeselect( // doesn't account for the memory used by the newly created tree.Datums, so it // is up to the caller to do the memory accounting. func ColVecToDatum( - converted []tree.Datum, col coldata.Vec, length int, sel []int, da *rowenc.DatumAlloc, + converted []tree.Datum, col coldata.Vec, length int, sel []int, da *tree.DatumAlloc, ) { if length == 0 { return @@ -342,7 +341,7 @@ func vecToDatum( col coldata.Vec, length int, sel []int, - da *rowenc.DatumAlloc, + da *tree.DatumAlloc, hasNulls bool, hasSel bool, deselect bool, diff --git a/pkg/sql/colencoding/BUILD.bazel b/pkg/sql/colencoding/BUILD.bazel index f5f0f4903499..22ba7b207b33 100644 --- a/pkg/sql/colencoding/BUILD.bazel +++ b/pkg/sql/colencoding/BUILD.bazel @@ -12,7 +12,8 @@ go_library( "//pkg/col/coldata", "//pkg/roachpb:with-mocks", "//pkg/sql/catalog/descpb", - "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", + "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/tree", "//pkg/sql/types", "//pkg/util", @@ -20,7 +21,7 @@ go_library( "//pkg/util/encoding", "//pkg/util/log", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", ], ) @@ -33,9 +34,8 @@ go_test( deps = [ "//pkg/col/coldata", "//pkg/col/coldataext", - "//pkg/sql/catalog/descpb", "//pkg/sql/randgen", - "//pkg/sql/rowenc", + "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/tree", "//pkg/sql/types", "//pkg/util/encoding", diff --git a/pkg/sql/colencoding/key_encoding.go b/pkg/sql/colencoding/key_encoding.go index 85b8131861c3..c1cffb496fee 100644 --- a/pkg/sql/colencoding/key_encoding.go +++ b/pkg/sql/colencoding/key_encoding.go @@ -13,11 +13,12 @@ package colencoding import ( "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util" @@ -43,7 +44,7 @@ import ( // NULL, regardless of whether or not indexColIdx indicates that the column // should be decoded. func DecodeKeyValsToCols( - da *rowenc.DatumAlloc, + da *tree.DatumAlloc, vecs *coldata.TypedVecs, rowIdx int, indexColIdx []int, @@ -64,7 +65,7 @@ func DecodeKeyValsToCols( foundNull = foundNull || isNull } // Don't need the coldata - skip it. - key, err = rowenc.SkipTableKey(key) + key, err = keyside.Skip(key) } else { if unseen != nil { unseen.Remove(vecIdx) @@ -81,12 +82,12 @@ func DecodeKeyValsToCols( return key, foundNull, scratch, nil } -// decodeTableKeyToCol decodes a value encoded by EncodeTableKey, writing the +// decodeTableKeyToCol decodes a value encoded by keyside.Encode, writing the // result to the rowIdx'th slot of the vecIdx'th vector in coldata.TypedVecs. -// See the analog, rowenc.DecodeTableKey, in rowenc/column_type_encoding.go. +// See the analog, keyside.Decode, in rowenc/column_type_encoding.go. // decodeTableKeyToCol also returns whether or not the decoded value was NULL. func decodeTableKeyToCol( - da *rowenc.DatumAlloc, + da *tree.DatumAlloc, vecs *coldata.TypedVecs, vecIdx int, rowIdx int, @@ -206,7 +207,7 @@ func decodeTableKeyToCol( if dir == descpb.IndexDescriptor_DESC { encDir = encoding.Descending } - d, rkey, err = rowenc.DecodeTableKey(da, valType, key, encDir) + d, rkey, err = keyside.Decode(da, valType, key, encDir) vecs.DatumCols[colIdx].Set(rowIdx, d) } return rkey, false, scratch, err @@ -219,7 +220,7 @@ func decodeTableKeyToCol( // See the analog, rowenc.UnmarshalColumnValue, in // rowenc/column_type_encoding.go. func UnmarshalColumnValueToCol( - da *rowenc.DatumAlloc, + da *tree.DatumAlloc, vecs *coldata.TypedVecs, vecIdx, rowIdx int, typ *types.T, @@ -281,7 +282,7 @@ func UnmarshalColumnValueToCol( // Types backed by tree.Datums. default: var d tree.Datum - d, err = rowenc.UnmarshalColumnValue(da, typ, value) + d, err = valueside.UnmarshalLegacy(da, typ, value) if err != nil { return err } diff --git a/pkg/sql/colencoding/value_encoding.go b/pkg/sql/colencoding/value_encoding.go index 70e8179ebe88..501c747f624c 100644 --- a/pkg/sql/colencoding/value_encoding.go +++ b/pkg/sql/colencoding/value_encoding.go @@ -14,7 +14,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/col/coldata" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/duration" @@ -27,7 +27,7 @@ import ( // coldata.TypedVecs. // See the analog in rowenc/column_type_encoding.go. func DecodeTableValueToCol( - da *rowenc.DatumAlloc, + da *tree.DatumAlloc, vecs *coldata.TypedVecs, vecIdx int, rowIdx int, @@ -111,7 +111,7 @@ func DecodeTableValueToCol( // Types backed by tree.Datums. default: var d tree.Datum - d, buf, err = rowenc.DecodeUntaggedDatum(da, valTyp, buf) + d, buf, err = valueside.DecodeUntaggedDatum(da, valTyp, buf) if err != nil { return buf, err } diff --git a/pkg/sql/colencoding/value_encoding_test.go b/pkg/sql/colencoding/value_encoding_test.go index 32bab922463d..36fcf1202bfa 100644 --- a/pkg/sql/colencoding/value_encoding_test.go +++ b/pkg/sql/colencoding/value_encoding_test.go @@ -15,9 +15,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/randgen" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -27,7 +26,7 @@ import ( func TestDecodeTableValueToCol(t *testing.T) { rng, _ := randutil.NewTestRand() var ( - da rowenc.DatumAlloc + da tree.DatumAlloc buf, scratch []byte ) nCols := 1000 @@ -39,7 +38,7 @@ func TestDecodeTableValueToCol(t *testing.T) { typs[i] = ct datums[i] = datum var err error - buf, err = rowenc.EncodeTableValue(buf, descpb.ColumnID(encoding.NoColumnID), datum, scratch) + buf, err = valueside.Encode(buf, valueside.NoColumnID, datum, scratch) if err != nil { t.Fatal(err) } diff --git a/pkg/sql/colexec/BUILD.bazel b/pkg/sql/colexec/BUILD.bazel index 8586b09d9e3d..9c2d0f7f9e19 100644 --- a/pkg/sql/colexec/BUILD.bazel +++ b/pkg/sql/colexec/BUILD.bazel @@ -22,6 +22,7 @@ go_library( "invariants_checker.go", "limit.go", "materializer.go", + "not_expr_ops.go", "offset.go", "ordered_aggregator.go", "parallel_unordered_synchronizer.go", @@ -80,7 +81,7 @@ go_library( "//pkg/util/mon", "//pkg/util/stringarena", "//pkg/util/tracing", - "@com_github_cockroachdb_apd_v2//:apd", # keep + "@com_github_cockroachdb_apd_v3//:apd", # keep "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", "@com_github_marusama_semaphore//:semaphore", @@ -115,6 +116,7 @@ go_test( "main_test.go", "materializer_test.go", "mergejoiner_test.go", + "not_expr_ops_test.go", "offset_test.go", "ordered_synchronizer_test.go", "parallel_unordered_synchronizer_test.go", @@ -177,7 +179,7 @@ go_test( "//pkg/util/timeofday", "//pkg/util/uuid", "@com_github_apache_arrow_go_arrow//array", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_marusama_semaphore//:semaphore", "@com_github_stretchr_testify//require", diff --git a/pkg/sql/colexec/aggregators_test.go b/pkg/sql/colexec/aggregators_test.go index 59e0f1a11ac1..3f95ca0112f2 100644 --- a/pkg/sql/colexec/aggregators_test.go +++ b/pkg/sql/colexec/aggregators_test.go @@ -16,7 +16,7 @@ import ( "math" "testing" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldatatestutils" "github.com/cockroachdb/cockroach/pkg/settings/cluster" diff --git a/pkg/sql/colexec/aggregators_util.go b/pkg/sql/colexec/aggregators_util.go index 7b2d13232d07..f1eed1a6bb5a 100644 --- a/pkg/sql/colexec/aggregators_util.go +++ b/pkg/sql/colexec/aggregators_util.go @@ -52,10 +52,7 @@ type aggregatorHelper interface { // DISTINCT or FILTER aggregation, then the defaultAggregatorHelper // is returned which has negligible performance overhead. func newAggregatorHelper( - args *colexecagg.NewAggregatorArgs, - datumAlloc *rowenc.DatumAlloc, - isHashAgg bool, - maxBatchSize int, + args *colexecagg.NewAggregatorArgs, datumAlloc *tree.DatumAlloc, isHashAgg bool, maxBatchSize int, ) aggregatorHelper { hasDistinct, hasFilterAgg := false, false aggFilter := make([]int, len(args.Spec.Aggregations)) @@ -261,7 +258,7 @@ type distinctAggregatorHelperBase struct { inputTypes []*types.T aggColsConverter *colconv.VecToDatumConverter arena stringarena.Arena - datumAlloc *rowenc.DatumAlloc + datumAlloc *tree.DatumAlloc scratch struct { ed rowenc.EncDatum encoded []byte @@ -272,7 +269,7 @@ type distinctAggregatorHelperBase struct { } func newDistinctAggregatorHelperBase( - args *colexecagg.NewAggregatorArgs, datumAlloc *rowenc.DatumAlloc, maxBatchSize int, + args *colexecagg.NewAggregatorArgs, datumAlloc *tree.DatumAlloc, maxBatchSize int, ) *distinctAggregatorHelperBase { b := &distinctAggregatorHelperBase{ aggregatorHelperBase: newAggregatorHelperBase(args.Spec, maxBatchSize), @@ -396,7 +393,7 @@ var _ aggregatorHelper = &filteringDistinctHashAggregatorHelper{} func newFilteringDistinctHashAggregatorHelper( args *colexecagg.NewAggregatorArgs, filters []*filteringSingleFunctionHashHelper, - datumAlloc *rowenc.DatumAlloc, + datumAlloc *tree.DatumAlloc, maxBatchSize int, ) aggregatorHelper { return &filteringDistinctHashAggregatorHelper{ @@ -454,7 +451,7 @@ type distinctOrderedAggregatorHelper struct { var _ aggregatorHelper = &distinctOrderedAggregatorHelper{} func newDistinctOrderedAggregatorHelper( - args *colexecagg.NewAggregatorArgs, datumAlloc *rowenc.DatumAlloc, maxBatchSize int, + args *colexecagg.NewAggregatorArgs, datumAlloc *tree.DatumAlloc, maxBatchSize int, ) aggregatorHelper { return &distinctOrderedAggregatorHelper{ distinctAggregatorHelperBase: newDistinctAggregatorHelperBase( diff --git a/pkg/sql/colexec/case_test.go b/pkg/sql/colexec/case_test.go index de196815709c..b31241d4fda6 100644 --- a/pkg/sql/colexec/case_test.go +++ b/pkg/sql/colexec/case_test.go @@ -114,7 +114,7 @@ func TestCaseOpRandomized(t *testing.T) { }, } - var da rowenc.DatumAlloc + var da tree.DatumAlloc rng, _ := randutil.NewTestRand() numWhenArms := 1 + rng.Intn(5) diff --git a/pkg/sql/colexec/colbuilder/execplan.go b/pkg/sql/colexec/colbuilder/execplan.go index a0ae01adafc5..8f4eade17073 100644 --- a/pkg/sql/colexec/colbuilder/execplan.go +++ b/pkg/sql/colexec/colbuilder/execplan.go @@ -170,13 +170,13 @@ func supportedNatively(spec *execinfrapb.ProcessorSpec) error { return nil case spec.Core.TableReader != nil: - if spec.Core.TableReader.IsCheck { + if spec.Core.TableReader.DeprecatedIsCheck { return errors.Newf("scrub table reader is unsupported in vectorized") } return nil case spec.Core.JoinReader != nil: - if spec.Core.JoinReader.LookupColumns != nil || !spec.Core.JoinReader.LookupExpr.Empty() { + if !spec.Core.JoinReader.IsIndexJoin() { return errLookupJoinUnsupported } return nil @@ -662,7 +662,7 @@ func NewColOperator( factory := args.Factory if args.ExprHelper == nil { args.ExprHelper = colexecargs.NewExprHelper() - args.ExprHelper.SemaCtx = flowCtx.TypeResolverFactory.NewSemaContext(flowCtx.Txn) + args.ExprHelper.SemaCtx = flowCtx.NewSemaContext(flowCtx.Txn) } if args.MonitorRegistry == nil { args.MonitorRegistry = &colexecargs.MonitorRegistry{} @@ -740,7 +740,7 @@ func NewColOperator( if err := checkNumIn(inputs, 1); err != nil { return r, err } - if core.JoinReader.LookupColumns != nil || !core.JoinReader.LookupExpr.Empty() { + if !core.JoinReader.IsIndexJoin() { return r, errors.AssertionFailedf("lookup join reader is unsupported in vectorized") } // We have to create a separate account in order for the cFetcher to @@ -753,11 +753,23 @@ func NewColOperator( kvFetcherMemAcc := args.MonitorRegistry.CreateUnlimitedMemAccount( ctx, flowCtx, "kvfetcher" /* opName */, spec.ProcessorID, ) + var streamerBudgetAcc *mon.BoundAccount + // We have an index join, and when the ordering doesn't have to be + // maintained, we might use the Streamer API which requires a + // separate memory account that is bound to an unlimited memory + // monitor. + if !core.JoinReader.MaintainOrdering { + streamerBudgetAcc = args.MonitorRegistry.CreateUnlimitedMemAccount( + ctx, flowCtx, "streamer" /* opName */, spec.ProcessorID, + ) + } inputTypes := make([]*types.T, len(spec.Input[0].ColumnTypes)) copy(inputTypes, spec.Input[0].ColumnTypes) indexJoinOp, err := colfetcher.NewColIndexJoin( - ctx, getStreamingAllocator(ctx, args), colmem.NewAllocator(ctx, cFetcherMemAcc, factory), kvFetcherMemAcc, - flowCtx, args.ExprHelper, inputs[0].Root, core.JoinReader, post, inputTypes, + ctx, getStreamingAllocator(ctx, args), + colmem.NewAllocator(ctx, cFetcherMemAcc, factory), + kvFetcherMemAcc, streamerBudgetAcc, flowCtx, args.ExprHelper, + inputs[0].Root, core.JoinReader, post, inputTypes, ) if err != nil { return r, err @@ -1842,6 +1854,15 @@ func planSelectionOperators( op, resultIdx, true /* negate */, typs[resultIdx].Family() == types.TupleFamily, ) return op, resultIdx, typs, err + case *tree.NotExpr: + op, resultIdx, typs, err = planProjectionOperators( + ctx, evalCtx, t.TypedInnerExpr(), columnTypes, input, acc, factory, releasables, + ) + if err != nil { + return op, resultIdx, typs, err + } + op = colexec.NewNotExprSelOp(op, resultIdx) + return op, resultIdx, typs, err case *tree.ComparisonExpr: cmpOp := t.Operator leftOp, leftIdx, ct, err := planProjectionOperators( @@ -1978,6 +1999,17 @@ func planProjectionOperators( return planIsNullProjectionOp(ctx, evalCtx, t.ResolvedType(), t.TypedInnerExpr(), columnTypes, input, acc, false /* negate */, factory, releasables) case *tree.IsNotNullExpr: return planIsNullProjectionOp(ctx, evalCtx, t.ResolvedType(), t.TypedInnerExpr(), columnTypes, input, acc, true /* negate */, factory, releasables) + case *tree.NotExpr: + op, resultIdx, typs, err = planProjectionOperators( + ctx, evalCtx, expr, columnTypes, input, acc, factory, releasables, + ) + if err != nil { + return op, resultIdx, typs, err + } + outputIdx := len(typs) + op = colexec.NewNotExprProjOp(colmem.NewAllocator(ctx, acc, factory), op, resultIdx, outputIdx) + typs = appendOneType(typs, t.ResolvedType()) + return op, outputIdx, typs, nil case *tree.CastExpr: expr := t.Expr.(tree.TypedExpr) op, resultIdx, typs, err = planProjectionOperators( diff --git a/pkg/sql/colexec/colexecagg/BUILD.bazel b/pkg/sql/colexec/colexecagg/BUILD.bazel index 646c7c77c5b0..2e2ce136e97d 100644 --- a/pkg/sql/colexec/colexecagg/BUILD.bazel +++ b/pkg/sql/colexec/colexecagg/BUILD.bazel @@ -26,7 +26,7 @@ go_library( "//pkg/util/duration", "//pkg/util/json", # keep "//pkg/util/mon", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", ], ) @@ -35,6 +35,7 @@ go_test( name = "colexecagg_test", srcs = ["dep_test.go"], embed = [":colexecagg"], + tags = ["no-remote"], deps = ["//pkg/testutils/buildutil"], ) diff --git a/pkg/sql/colexec/colexecagg/aggregate_funcs.go b/pkg/sql/colexec/colexecagg/aggregate_funcs.go index 0e0c394c0d48..7943eae67564 100644 --- a/pkg/sql/colexec/colexecagg/aggregate_funcs.go +++ b/pkg/sql/colexec/colexecagg/aggregate_funcs.go @@ -13,7 +13,7 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colconv" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" diff --git a/pkg/sql/colexec/colexecagg/any_not_null_agg_tmpl.go b/pkg/sql/colexec/colexecagg/any_not_null_agg_tmpl.go index d25f393ef327..b5590c446d4a 100644 --- a/pkg/sql/colexec/colexecagg/any_not_null_agg_tmpl.go +++ b/pkg/sql/colexec/colexecagg/any_not_null_agg_tmpl.go @@ -24,7 +24,7 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" diff --git a/pkg/sql/colexec/colexecagg/avg_agg_tmpl.go b/pkg/sql/colexec/colexecagg/avg_agg_tmpl.go index 91c74ea9076f..501b08a9ec09 100644 --- a/pkg/sql/colexec/colexecagg/avg_agg_tmpl.go +++ b/pkg/sql/colexec/colexecagg/avg_agg_tmpl.go @@ -24,7 +24,7 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" @@ -106,14 +106,6 @@ type avg_TYPE_AGGKINDAgg struct { // curCount keeps track of the number of non-null elements that we've seen // belonging to the current group. curCount int64 - // {{if .NeedsHelper}} - // {{/* - // overloadHelper is used only when we perform the summation of integers - // and get a decimal result which is the case when NeedsHelper is true. In - // all other cases we don't want to wastefully allocate the helper. - // */}} - overloadHelper execgen.OverloadHelper - // {{end}} } var _ AggregateFunc = &avg_TYPE_AGGKINDAgg{} @@ -129,16 +121,6 @@ func (a *avg_TYPE_AGGKINDAgg) SetOutput(vec coldata.Vec) { func (a *avg_TYPE_AGGKINDAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // {{if .NeedsHelper}} - // {{/* - // overloadHelper is used only when we perform the summation of integers - // and get a decimal result which is the case when NeedsHelper is true. In - // all other cases we don't want to wastefully allocate the helper. - // */}} - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - // {{end}} execgen.SETVARIABLESIZE(oldCurSumSize, a.curSum) vec := vecs[inputIdxs[0]] col, nulls := vec.TemplateType(), vec.Nulls() @@ -257,11 +239,6 @@ func (a *avg_TYPE_AGGKINDAggAlloc) newAggFunc() AggregateFunc { // Remove implements the slidingWindowAggregateFunc interface (see // window_aggregator_tmpl.go). func (a *avg_TYPE_AGGKINDAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int) { - // {{if .NeedsHelper}} - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - // {{end}} execgen.SETVARIABLESIZE(oldCurSumSize, a.curSum) vec := vecs[inputIdxs[0]] col, nulls := vec.TemplateType(), vec.Nulls() diff --git a/pkg/sql/colexec/colexecagg/hash_any_not_null_agg.eg.go b/pkg/sql/colexec/colexecagg/hash_any_not_null_agg.eg.go index f3a8b779a488..c476f0641392 100644 --- a/pkg/sql/colexec/colexecagg/hash_any_not_null_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/hash_any_not_null_agg.eg.go @@ -13,7 +13,7 @@ import ( "time" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" @@ -342,7 +342,7 @@ func (a *anyNotNullDecimalHashAgg) Compute( return } - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -388,7 +388,7 @@ func (a *anyNotNullDecimalHashAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } diff --git a/pkg/sql/colexec/colexecagg/hash_avg_agg.eg.go b/pkg/sql/colexec/colexecagg/hash_avg_agg.eg.go index 75de3c0dc5f6..7bfbb4343a1f 100644 --- a/pkg/sql/colexec/colexecagg/hash_avg_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/hash_avg_agg.eg.go @@ -12,9 +12,8 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -76,8 +75,7 @@ type avgInt16HashAgg struct { curSum apd.Decimal // curCount keeps track of the number of non-null elements that we've seen // belonging to the current group. - curCount int64 - overloadHelper execgen.OverloadHelper + curCount int64 } var _ AggregateFunc = &avgInt16HashAgg{} @@ -85,10 +83,7 @@ var _ AggregateFunc = &avgInt16HashAgg{} func (a *avgInt16HashAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int16(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -104,9 +99,9 @@ func (a *avgInt16HashAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -124,9 +119,9 @@ func (a *avgInt16HashAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -138,7 +133,7 @@ func (a *avgInt16HashAgg) Compute( } }, ) - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } @@ -194,8 +189,7 @@ type avgInt32HashAgg struct { curSum apd.Decimal // curCount keeps track of the number of non-null elements that we've seen // belonging to the current group. - curCount int64 - overloadHelper execgen.OverloadHelper + curCount int64 } var _ AggregateFunc = &avgInt32HashAgg{} @@ -203,10 +197,7 @@ var _ AggregateFunc = &avgInt32HashAgg{} func (a *avgInt32HashAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int32(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -222,9 +213,9 @@ func (a *avgInt32HashAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -242,9 +233,9 @@ func (a *avgInt32HashAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -256,7 +247,7 @@ func (a *avgInt32HashAgg) Compute( } }, ) - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } @@ -312,8 +303,7 @@ type avgInt64HashAgg struct { curSum apd.Decimal // curCount keeps track of the number of non-null elements that we've seen // belonging to the current group. - curCount int64 - overloadHelper execgen.OverloadHelper + curCount int64 } var _ AggregateFunc = &avgInt64HashAgg{} @@ -321,10 +311,7 @@ var _ AggregateFunc = &avgInt64HashAgg{} func (a *avgInt64HashAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int64(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -340,9 +327,9 @@ func (a *avgInt64HashAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -360,9 +347,9 @@ func (a *avgInt64HashAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -374,7 +361,7 @@ func (a *avgInt64HashAgg) Compute( } }, ) - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } @@ -438,7 +425,7 @@ var _ AggregateFunc = &avgDecimalHashAgg{} func (a *avgDecimalHashAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -486,7 +473,7 @@ func (a *avgDecimalHashAgg) Compute( } }, ) - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } diff --git a/pkg/sql/colexec/colexecagg/hash_min_max_agg.eg.go b/pkg/sql/colexec/colexecagg/hash_min_max_agg.eg.go index b57a08603496..5fa52970bffe 100644 --- a/pkg/sql/colexec/colexecagg/hash_min_max_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/hash_min_max_agg.eg.go @@ -15,7 +15,7 @@ import ( "time" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" @@ -393,7 +393,7 @@ var _ AggregateFunc = &minDecimalHashAgg{} func (a *minDecimalHashAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -455,7 +455,7 @@ func (a *minDecimalHashAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -2042,7 +2042,7 @@ var _ AggregateFunc = &maxDecimalHashAgg{} func (a *maxDecimalHashAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -2104,7 +2104,7 @@ func (a *maxDecimalHashAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } diff --git a/pkg/sql/colexec/colexecagg/hash_sum_agg.eg.go b/pkg/sql/colexec/colexecagg/hash_sum_agg.eg.go index d437340eab20..3e355c6e914e 100644 --- a/pkg/sql/colexec/colexecagg/hash_sum_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/hash_sum_agg.eg.go @@ -12,9 +12,8 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -75,8 +74,7 @@ type sumInt16HashAgg struct { curAgg apd.Decimal // numNonNull tracks the number of non-null values we have seen for the group // that is currently being aggregated. - numNonNull uint64 - overloadHelper execgen.OverloadHelper + numNonNull uint64 } var _ AggregateFunc = &sumInt16HashAgg{} @@ -84,10 +82,7 @@ var _ AggregateFunc = &sumInt16HashAgg{} func (a *sumInt16HashAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int16(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -103,9 +98,9 @@ func (a *sumInt16HashAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -123,9 +118,9 @@ func (a *sumInt16HashAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -137,7 +132,7 @@ func (a *sumInt16HashAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -188,8 +183,7 @@ type sumInt32HashAgg struct { curAgg apd.Decimal // numNonNull tracks the number of non-null values we have seen for the group // that is currently being aggregated. - numNonNull uint64 - overloadHelper execgen.OverloadHelper + numNonNull uint64 } var _ AggregateFunc = &sumInt32HashAgg{} @@ -197,10 +191,7 @@ var _ AggregateFunc = &sumInt32HashAgg{} func (a *sumInt32HashAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int32(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -216,9 +207,9 @@ func (a *sumInt32HashAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -236,9 +227,9 @@ func (a *sumInt32HashAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -250,7 +241,7 @@ func (a *sumInt32HashAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -301,8 +292,7 @@ type sumInt64HashAgg struct { curAgg apd.Decimal // numNonNull tracks the number of non-null values we have seen for the group // that is currently being aggregated. - numNonNull uint64 - overloadHelper execgen.OverloadHelper + numNonNull uint64 } var _ AggregateFunc = &sumInt64HashAgg{} @@ -310,10 +300,7 @@ var _ AggregateFunc = &sumInt64HashAgg{} func (a *sumInt64HashAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int64(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -329,9 +316,9 @@ func (a *sumInt64HashAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -349,9 +336,9 @@ func (a *sumInt64HashAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -363,7 +350,7 @@ func (a *sumInt64HashAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -422,7 +409,7 @@ var _ AggregateFunc = &sumDecimalHashAgg{} func (a *sumDecimalHashAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -470,7 +457,7 @@ func (a *sumDecimalHashAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } diff --git a/pkg/sql/colexec/colexecagg/hash_sum_int_agg.eg.go b/pkg/sql/colexec/colexecagg/hash_sum_int_agg.eg.go index 1ceffb1a41f9..c20a89e63842 100644 --- a/pkg/sql/colexec/colexecagg/hash_sum_int_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/hash_sum_int_agg.eg.go @@ -12,7 +12,7 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colmem" diff --git a/pkg/sql/colexec/colexecagg/min_max_agg_tmpl.go b/pkg/sql/colexec/colexecagg/min_max_agg_tmpl.go index 3111383f2d43..4cf955fc5c57 100644 --- a/pkg/sql/colexec/colexecagg/min_max_agg_tmpl.go +++ b/pkg/sql/colexec/colexecagg/min_max_agg_tmpl.go @@ -24,7 +24,7 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecagg/ordered_any_not_null_agg.eg.go b/pkg/sql/colexec/colexecagg/ordered_any_not_null_agg.eg.go index a73d0ba37173..ab587b5f479f 100644 --- a/pkg/sql/colexec/colexecagg/ordered_any_not_null_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/ordered_any_not_null_agg.eg.go @@ -13,7 +13,7 @@ import ( "time" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" @@ -524,7 +524,7 @@ func (a *anyNotNullDecimalOrderedAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -656,7 +656,7 @@ func (a *anyNotNullDecimalOrderedAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } diff --git a/pkg/sql/colexec/colexecagg/ordered_avg_agg.eg.go b/pkg/sql/colexec/colexecagg/ordered_avg_agg.eg.go index 8eb43d2d5670..872075b9b42c 100644 --- a/pkg/sql/colexec/colexecagg/ordered_avg_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/ordered_avg_agg.eg.go @@ -12,9 +12,8 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -78,8 +77,7 @@ type avgInt16OrderedAgg struct { curSum apd.Decimal // curCount keeps track of the number of non-null elements that we've seen // belonging to the current group. - curCount int64 - overloadHelper execgen.OverloadHelper + curCount int64 } var _ AggregateFunc = &avgInt16OrderedAgg{} @@ -92,10 +90,7 @@ func (a *avgInt16OrderedAgg) SetOutput(vec coldata.Vec) { func (a *avgInt16OrderedAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int16(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -138,9 +133,9 @@ func (a *avgInt16OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -180,9 +175,9 @@ func (a *avgInt16OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -223,9 +218,9 @@ func (a *avgInt16OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -263,9 +258,9 @@ func (a *avgInt16OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -277,7 +272,7 @@ func (a *avgInt16OrderedAgg) Compute( } }, ) - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } @@ -340,8 +335,7 @@ type avgInt32OrderedAgg struct { curSum apd.Decimal // curCount keeps track of the number of non-null elements that we've seen // belonging to the current group. - curCount int64 - overloadHelper execgen.OverloadHelper + curCount int64 } var _ AggregateFunc = &avgInt32OrderedAgg{} @@ -354,10 +348,7 @@ func (a *avgInt32OrderedAgg) SetOutput(vec coldata.Vec) { func (a *avgInt32OrderedAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int32(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -400,9 +391,9 @@ func (a *avgInt32OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -442,9 +433,9 @@ func (a *avgInt32OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -485,9 +476,9 @@ func (a *avgInt32OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -525,9 +516,9 @@ func (a *avgInt32OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -539,7 +530,7 @@ func (a *avgInt32OrderedAgg) Compute( } }, ) - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } @@ -602,8 +593,7 @@ type avgInt64OrderedAgg struct { curSum apd.Decimal // curCount keeps track of the number of non-null elements that we've seen // belonging to the current group. - curCount int64 - overloadHelper execgen.OverloadHelper + curCount int64 } var _ AggregateFunc = &avgInt64OrderedAgg{} @@ -616,10 +606,7 @@ func (a *avgInt64OrderedAgg) SetOutput(vec coldata.Vec) { func (a *avgInt64OrderedAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int64(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -662,9 +649,9 @@ func (a *avgInt64OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -704,9 +691,9 @@ func (a *avgInt64OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -747,9 +734,9 @@ func (a *avgInt64OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -787,9 +774,9 @@ func (a *avgInt64OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -801,7 +788,7 @@ func (a *avgInt64OrderedAgg) Compute( } }, ) - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } @@ -877,7 +864,7 @@ func (a *avgDecimalOrderedAgg) SetOutput(vec coldata.Vec) { func (a *avgDecimalOrderedAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -1055,7 +1042,7 @@ func (a *avgDecimalOrderedAgg) Compute( } }, ) - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } diff --git a/pkg/sql/colexec/colexecagg/ordered_min_max_agg.eg.go b/pkg/sql/colexec/colexecagg/ordered_min_max_agg.eg.go index 6399abcfa111..8049701a4b68 100644 --- a/pkg/sql/colexec/colexecagg/ordered_min_max_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/ordered_min_max_agg.eg.go @@ -15,7 +15,7 @@ import ( "time" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" @@ -684,7 +684,7 @@ func (a *minDecimalOrderedAgg) SetOutput(vec coldata.Vec) { func (a *minDecimalOrderedAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -868,7 +868,7 @@ func (a *minDecimalOrderedAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -4001,7 +4001,7 @@ func (a *maxDecimalOrderedAgg) SetOutput(vec coldata.Vec) { func (a *maxDecimalOrderedAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -4185,7 +4185,7 @@ func (a *maxDecimalOrderedAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } diff --git a/pkg/sql/colexec/colexecagg/ordered_sum_agg.eg.go b/pkg/sql/colexec/colexecagg/ordered_sum_agg.eg.go index a0cade17e013..61f1285e7ff3 100644 --- a/pkg/sql/colexec/colexecagg/ordered_sum_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/ordered_sum_agg.eg.go @@ -12,9 +12,8 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -77,8 +76,7 @@ type sumInt16OrderedAgg struct { curAgg apd.Decimal // numNonNull tracks the number of non-null values we have seen for the group // that is currently being aggregated. - numNonNull uint64 - overloadHelper execgen.OverloadHelper + numNonNull uint64 } var _ AggregateFunc = &sumInt16OrderedAgg{} @@ -91,10 +89,7 @@ func (a *sumInt16OrderedAgg) SetOutput(vec coldata.Vec) { func (a *sumInt16OrderedAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int16(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -134,9 +129,9 @@ func (a *sumInt16OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -172,9 +167,9 @@ func (a *sumInt16OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -212,9 +207,9 @@ func (a *sumInt16OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -248,9 +243,9 @@ func (a *sumInt16OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -262,7 +257,7 @@ func (a *sumInt16OrderedAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -320,8 +315,7 @@ type sumInt32OrderedAgg struct { curAgg apd.Decimal // numNonNull tracks the number of non-null values we have seen for the group // that is currently being aggregated. - numNonNull uint64 - overloadHelper execgen.OverloadHelper + numNonNull uint64 } var _ AggregateFunc = &sumInt32OrderedAgg{} @@ -334,10 +328,7 @@ func (a *sumInt32OrderedAgg) SetOutput(vec coldata.Vec) { func (a *sumInt32OrderedAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int32(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -377,9 +368,9 @@ func (a *sumInt32OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -415,9 +406,9 @@ func (a *sumInt32OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -455,9 +446,9 @@ func (a *sumInt32OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -491,9 +482,9 @@ func (a *sumInt32OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -505,7 +496,7 @@ func (a *sumInt32OrderedAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -563,8 +554,7 @@ type sumInt64OrderedAgg struct { curAgg apd.Decimal // numNonNull tracks the number of non-null values we have seen for the group // that is currently being aggregated. - numNonNull uint64 - overloadHelper execgen.OverloadHelper + numNonNull uint64 } var _ AggregateFunc = &sumInt64OrderedAgg{} @@ -577,10 +567,7 @@ func (a *sumInt64OrderedAgg) SetOutput(vec coldata.Vec) { func (a *sumInt64OrderedAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int64(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -620,9 +607,9 @@ func (a *sumInt64OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -658,9 +645,9 @@ func (a *sumInt64OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -698,9 +685,9 @@ func (a *sumInt64OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -734,9 +721,9 @@ func (a *sumInt64OrderedAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -748,7 +735,7 @@ func (a *sumInt64OrderedAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -819,7 +806,7 @@ func (a *sumDecimalOrderedAgg) SetOutput(vec coldata.Vec) { func (a *sumDecimalOrderedAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() a.allocator.PerformOperation([]coldata.Vec{a.vec}, func() { @@ -983,7 +970,7 @@ func (a *sumDecimalOrderedAgg) Compute( } }, ) - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } diff --git a/pkg/sql/colexec/colexecagg/ordered_sum_int_agg.eg.go b/pkg/sql/colexec/colexecagg/ordered_sum_int_agg.eg.go index 8972b563e836..477f83ffe5c2 100644 --- a/pkg/sql/colexec/colexecagg/ordered_sum_int_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/ordered_sum_int_agg.eg.go @@ -12,7 +12,7 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colmem" diff --git a/pkg/sql/colexec/colexecagg/sum_agg_tmpl.go b/pkg/sql/colexec/colexecagg/sum_agg_tmpl.go index 09d55011a5c4..1347a93f7b10 100644 --- a/pkg/sql/colexec/colexecagg/sum_agg_tmpl.go +++ b/pkg/sql/colexec/colexecagg/sum_agg_tmpl.go @@ -24,7 +24,7 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" @@ -98,15 +98,6 @@ type sum_SUMKIND_TYPE_AGGKINDAgg struct { // numNonNull tracks the number of non-null values we have seen for the group // that is currently being aggregated. numNonNull uint64 - // {{if .NeedsHelper}} - // {{/* - // overloadHelper is used only when we perform the summation of integers - // and get a decimal result which is the case when {{if .NeedsHelper}} - // evaluates to true. In all other cases we don't want to wastefully - // allocate the helper. - // */}} - overloadHelper execgen.OverloadHelper - // {{end}} } var _ AggregateFunc = &sum_SUMKIND_TYPE_AGGKINDAgg{} @@ -122,17 +113,6 @@ func (a *sum_SUMKIND_TYPE_AGGKINDAgg) SetOutput(vec coldata.Vec) { func (a *sum_SUMKIND_TYPE_AGGKINDAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // {{if .NeedsHelper}} - // {{/* - // overloadHelper is used only when we perform the summation of integers - // and get a decimal result which is the case when {{if .NeedsHelper}} - // evaluates to true. In all other cases we don't want to wastefully - // allocate the helper. - // */}} - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - // {{end}} execgen.SETVARIABLESIZE(oldCurAggSize, a.curAgg) vec := vecs[inputIdxs[0]] col, nulls := vec.TemplateType(), vec.Nulls() @@ -253,11 +233,6 @@ func (a *sum_SUMKIND_TYPE_AGGKINDAggAlloc) newAggFunc() AggregateFunc { func (a *sum_SUMKIND_TYPE_AGGKINDAgg) Remove( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, ) { - // {{if .NeedsHelper}} - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - // {{end}} execgen.SETVARIABLESIZE(oldCurAggSize, a.curAgg) vec := vecs[inputIdxs[0]] col, nulls := vec.TemplateType(), vec.Nulls() diff --git a/pkg/sql/colexec/colexecagg/window_avg_agg.eg.go b/pkg/sql/colexec/colexecagg/window_avg_agg.eg.go index 8f455fce4d8e..a1bf6255f001 100644 --- a/pkg/sql/colexec/colexecagg/window_avg_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/window_avg_agg.eg.go @@ -12,9 +12,8 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -76,8 +75,7 @@ type avgInt16WindowAgg struct { curSum apd.Decimal // curCount keeps track of the number of non-null elements that we've seen // belonging to the current group. - curCount int64 - overloadHelper execgen.OverloadHelper + curCount int64 } var _ AggregateFunc = &avgInt16WindowAgg{} @@ -85,10 +83,7 @@ var _ AggregateFunc = &avgInt16WindowAgg{} func (a *avgInt16WindowAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int16(), vec.Nulls() // Unnecessary memory accounting can have significant overhead for window @@ -106,9 +101,9 @@ func (a *avgInt16WindowAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -127,9 +122,9 @@ func (a *avgInt16WindowAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -138,7 +133,7 @@ func (a *avgInt16WindowAgg) Compute( } } } - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } @@ -189,10 +184,7 @@ func (a *avgInt16WindowAggAlloc) newAggFunc() AggregateFunc { // Remove implements the slidingWindowAggregateFunc interface (see // window_aggregator_tmpl.go). func (a *avgInt16WindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int16(), vec.Nulls() _, _ = col.Get(endIdx-1), col.Get(startIdx) @@ -207,9 +199,9 @@ func (a *avgInt16WindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, start { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Sub(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -228,9 +220,9 @@ func (a *avgInt16WindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, start { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Sub(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -239,7 +231,7 @@ func (a *avgInt16WindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, start } } } - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } @@ -253,8 +245,7 @@ type avgInt32WindowAgg struct { curSum apd.Decimal // curCount keeps track of the number of non-null elements that we've seen // belonging to the current group. - curCount int64 - overloadHelper execgen.OverloadHelper + curCount int64 } var _ AggregateFunc = &avgInt32WindowAgg{} @@ -262,10 +253,7 @@ var _ AggregateFunc = &avgInt32WindowAgg{} func (a *avgInt32WindowAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int32(), vec.Nulls() // Unnecessary memory accounting can have significant overhead for window @@ -283,9 +271,9 @@ func (a *avgInt32WindowAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -304,9 +292,9 @@ func (a *avgInt32WindowAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -315,7 +303,7 @@ func (a *avgInt32WindowAgg) Compute( } } } - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } @@ -366,10 +354,7 @@ func (a *avgInt32WindowAggAlloc) newAggFunc() AggregateFunc { // Remove implements the slidingWindowAggregateFunc interface (see // window_aggregator_tmpl.go). func (a *avgInt32WindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int32(), vec.Nulls() _, _ = col.Get(endIdx-1), col.Get(startIdx) @@ -384,9 +369,9 @@ func (a *avgInt32WindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, start { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Sub(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -405,9 +390,9 @@ func (a *avgInt32WindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, start { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Sub(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -416,7 +401,7 @@ func (a *avgInt32WindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, start } } } - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } @@ -430,8 +415,7 @@ type avgInt64WindowAgg struct { curSum apd.Decimal // curCount keeps track of the number of non-null elements that we've seen // belonging to the current group. - curCount int64 - overloadHelper execgen.OverloadHelper + curCount int64 } var _ AggregateFunc = &avgInt64WindowAgg{} @@ -439,10 +423,7 @@ var _ AggregateFunc = &avgInt64WindowAgg{} func (a *avgInt64WindowAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int64(), vec.Nulls() // Unnecessary memory accounting can have significant overhead for window @@ -460,9 +441,9 @@ func (a *avgInt64WindowAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -481,9 +462,9 @@ func (a *avgInt64WindowAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -492,7 +473,7 @@ func (a *avgInt64WindowAgg) Compute( } } } - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } @@ -543,10 +524,7 @@ func (a *avgInt64WindowAggAlloc) newAggFunc() AggregateFunc { // Remove implements the slidingWindowAggregateFunc interface (see // window_aggregator_tmpl.go). func (a *avgInt64WindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int64(), vec.Nulls() _, _ = col.Get(endIdx-1), col.Get(startIdx) @@ -561,9 +539,9 @@ func (a *avgInt64WindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, start { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Sub(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -582,9 +560,9 @@ func (a *avgInt64WindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, start { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Sub(&a.curSum, &a.curSum, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&a.curSum, &a.curSum, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -593,7 +571,7 @@ func (a *avgInt64WindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, start } } } - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } @@ -615,7 +593,7 @@ var _ AggregateFunc = &avgDecimalWindowAgg{} func (a *avgDecimalWindowAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() // Unnecessary memory accounting can have significant overhead for window @@ -663,7 +641,7 @@ func (a *avgDecimalWindowAgg) Compute( } } } - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } @@ -714,7 +692,7 @@ func (a *avgDecimalWindowAggAlloc) newAggFunc() AggregateFunc { // Remove implements the slidingWindowAggregateFunc interface (see // window_aggregator_tmpl.go). func (a *avgDecimalWindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int) { - oldCurSumSize := tree.SizeOfDecimal(&a.curSum) + oldCurSumSize := a.curSum.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() _, _ = col.Get(endIdx-1), col.Get(startIdx) @@ -759,7 +737,7 @@ func (a *avgDecimalWindowAgg) Remove(vecs []coldata.Vec, inputIdxs []uint32, sta } } } - newCurSumSize := tree.SizeOfDecimal(&a.curSum) + newCurSumSize := a.curSum.Size() if newCurSumSize != oldCurSumSize { a.allocator.AdjustMemoryUsage(int64(newCurSumSize - oldCurSumSize)) } diff --git a/pkg/sql/colexec/colexecagg/window_min_max_agg.eg.go b/pkg/sql/colexec/colexecagg/window_min_max_agg.eg.go index 7d2a3419c29c..b561e5a9bc97 100644 --- a/pkg/sql/colexec/colexecagg/window_min_max_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/window_min_max_agg.eg.go @@ -15,7 +15,7 @@ import ( "time" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" @@ -400,7 +400,7 @@ var _ AggregateFunc = &minDecimalWindowAgg{} func (a *minDecimalWindowAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() // Unnecessary memory accounting can have significant overhead for window @@ -460,7 +460,7 @@ func (a *minDecimalWindowAgg) Compute( } } } - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -2093,7 +2093,7 @@ var _ AggregateFunc = &maxDecimalWindowAgg{} func (a *maxDecimalWindowAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() // Unnecessary memory accounting can have significant overhead for window @@ -2153,7 +2153,7 @@ func (a *maxDecimalWindowAgg) Compute( } } } - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } diff --git a/pkg/sql/colexec/colexecagg/window_sum_agg.eg.go b/pkg/sql/colexec/colexecagg/window_sum_agg.eg.go index c655f6f7a1b4..fb52a3530d63 100644 --- a/pkg/sql/colexec/colexecagg/window_sum_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/window_sum_agg.eg.go @@ -12,9 +12,8 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -75,8 +74,7 @@ type sumInt16WindowAgg struct { curAgg apd.Decimal // numNonNull tracks the number of non-null values we have seen for the group // that is currently being aggregated. - numNonNull uint64 - overloadHelper execgen.OverloadHelper + numNonNull uint64 } var _ AggregateFunc = &sumInt16WindowAgg{} @@ -84,10 +82,7 @@ var _ AggregateFunc = &sumInt16WindowAgg{} func (a *sumInt16WindowAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int16(), vec.Nulls() // Unnecessary memory accounting can have significant overhead for window @@ -105,9 +100,9 @@ func (a *sumInt16WindowAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -126,9 +121,9 @@ func (a *sumInt16WindowAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -137,7 +132,7 @@ func (a *sumInt16WindowAgg) Compute( } } } - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -186,10 +181,7 @@ func (a *sumInt16WindowAggAlloc) newAggFunc() AggregateFunc { func (a *sumInt16WindowAgg) Remove( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int16(), vec.Nulls() _, _ = col.Get(endIdx-1), col.Get(startIdx) @@ -204,9 +196,9 @@ func (a *sumInt16WindowAgg) Remove( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Sub(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -225,9 +217,9 @@ func (a *sumInt16WindowAgg) Remove( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Sub(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -236,7 +228,7 @@ func (a *sumInt16WindowAgg) Remove( } } } - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -249,8 +241,7 @@ type sumInt32WindowAgg struct { curAgg apd.Decimal // numNonNull tracks the number of non-null values we have seen for the group // that is currently being aggregated. - numNonNull uint64 - overloadHelper execgen.OverloadHelper + numNonNull uint64 } var _ AggregateFunc = &sumInt32WindowAgg{} @@ -258,10 +249,7 @@ var _ AggregateFunc = &sumInt32WindowAgg{} func (a *sumInt32WindowAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int32(), vec.Nulls() // Unnecessary memory accounting can have significant overhead for window @@ -279,9 +267,9 @@ func (a *sumInt32WindowAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -300,9 +288,9 @@ func (a *sumInt32WindowAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -311,7 +299,7 @@ func (a *sumInt32WindowAgg) Compute( } } } - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -360,10 +348,7 @@ func (a *sumInt32WindowAggAlloc) newAggFunc() AggregateFunc { func (a *sumInt32WindowAgg) Remove( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int32(), vec.Nulls() _, _ = col.Get(endIdx-1), col.Get(startIdx) @@ -378,9 +363,9 @@ func (a *sumInt32WindowAgg) Remove( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Sub(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -399,9 +384,9 @@ func (a *sumInt32WindowAgg) Remove( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Sub(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -410,7 +395,7 @@ func (a *sumInt32WindowAgg) Remove( } } } - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -423,8 +408,7 @@ type sumInt64WindowAgg struct { curAgg apd.Decimal // numNonNull tracks the number of non-null values we have seen for the group // that is currently being aggregated. - numNonNull uint64 - overloadHelper execgen.OverloadHelper + numNonNull uint64 } var _ AggregateFunc = &sumInt64WindowAgg{} @@ -432,10 +416,7 @@ var _ AggregateFunc = &sumInt64WindowAgg{} func (a *sumInt64WindowAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int64(), vec.Nulls() // Unnecessary memory accounting can have significant overhead for window @@ -453,9 +434,9 @@ func (a *sumInt64WindowAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -474,9 +455,9 @@ func (a *sumInt64WindowAgg) Compute( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -485,7 +466,7 @@ func (a *sumInt64WindowAgg) Compute( } } } - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -534,10 +515,7 @@ func (a *sumInt64WindowAggAlloc) newAggFunc() AggregateFunc { func (a *sumInt64WindowAgg) Remove( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". - _overloadHelper := a.overloadHelper - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Int64(), vec.Nulls() _, _ = col.Get(endIdx-1), col.Get(startIdx) @@ -552,9 +530,9 @@ func (a *sumInt64WindowAgg) Remove( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Sub(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -573,9 +551,9 @@ func (a *sumInt64WindowAgg) Remove( { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(v)) - if _, err := tree.ExactCtx.Sub(&a.curAgg, &a.curAgg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&a.curAgg, &a.curAgg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -584,7 +562,7 @@ func (a *sumInt64WindowAgg) Remove( } } } - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -605,7 +583,7 @@ var _ AggregateFunc = &sumDecimalWindowAgg{} func (a *sumDecimalWindowAgg) Compute( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, sel []int, ) { - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() // Unnecessary memory accounting can have significant overhead for window @@ -653,7 +631,7 @@ func (a *sumDecimalWindowAgg) Compute( } } } - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } @@ -702,7 +680,7 @@ func (a *sumDecimalWindowAggAlloc) newAggFunc() AggregateFunc { func (a *sumDecimalWindowAgg) Remove( vecs []coldata.Vec, inputIdxs []uint32, startIdx, endIdx int, ) { - oldCurAggSize := tree.SizeOfDecimal(&a.curAgg) + oldCurAggSize := a.curAgg.Size() vec := vecs[inputIdxs[0]] col, nulls := vec.Decimal(), vec.Nulls() _, _ = col.Get(endIdx-1), col.Get(startIdx) @@ -747,7 +725,7 @@ func (a *sumDecimalWindowAgg) Remove( } } } - newCurAggSize := tree.SizeOfDecimal(&a.curAgg) + newCurAggSize := a.curAgg.Size() if newCurAggSize != oldCurAggSize { a.allocator.AdjustMemoryUsage(int64(newCurAggSize - oldCurAggSize)) } diff --git a/pkg/sql/colexec/colexecagg/window_sum_int_agg.eg.go b/pkg/sql/colexec/colexecagg/window_sum_int_agg.eg.go index 051508590d0b..5fb7edf555fe 100644 --- a/pkg/sql/colexec/colexecagg/window_sum_int_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/window_sum_int_agg.eg.go @@ -12,7 +12,7 @@ package colexecagg import ( "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colmem" diff --git a/pkg/sql/colexec/colexecargs/BUILD.bazel b/pkg/sql/colexec/colexecargs/BUILD.bazel index 34047e3c0b59..fe799c2cb279 100644 --- a/pkg/sql/colexec/colexecargs/BUILD.bazel +++ b/pkg/sql/colexec/colexecargs/BUILD.bazel @@ -29,5 +29,6 @@ go_test( name = "colexecargs_test", srcs = ["dep_test.go"], embed = [":colexecargs"], + tags = ["no-remote"], deps = ["//pkg/testutils/buildutil"], ) diff --git a/pkg/sql/colexec/colexecbase/BUILD.bazel b/pkg/sql/colexec/colexecbase/BUILD.bazel index a632b5cd3181..bc7be433178f 100644 --- a/pkg/sql/colexec/colexecbase/BUILD.bazel +++ b/pkg/sql/colexec/colexecbase/BUILD.bazel @@ -33,7 +33,7 @@ go_library( "//pkg/util/json", # keep "//pkg/util/log", "//pkg/util/uuid", # keep - "@com_github_cockroachdb_apd_v2//:apd", # keep + "@com_github_cockroachdb_apd_v3//:apd", # keep "@com_github_cockroachdb_errors//:errors", "@com_github_lib_pq//oid", # keep ], @@ -50,6 +50,7 @@ go_test( "ordinality_test.go", "simple_project_test.go", ], + tags = ["no-remote"], deps = [ ":colexecbase", "//pkg/col/coldata", diff --git a/pkg/sql/colexec/colexecbase/cast.eg.go b/pkg/sql/colexec/colexecbase/cast.eg.go index 7f769c2579e7..51ef00038d5d 100644 --- a/pkg/sql/colexec/colexecbase/cast.eg.go +++ b/pkg/sql/colexec/colexecbase/cast.eg.go @@ -17,17 +17,15 @@ import ( "strings" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colconv" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/lex" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util" @@ -912,7 +910,7 @@ type castNativeToDatumOp struct { castOpBase scratch []tree.Datum - da rowenc.DatumAlloc + da tree.DatumAlloc } var _ colexecop.ClosableOperator = &castNativeToDatumOp{} @@ -1692,8 +1690,6 @@ func (c *castDecimalBoolOp) Next() coldata.Batch { type castDecimalInt2Op struct { castOpBase - - overloadHelper execgen.OverloadHelper } var _ colexecop.ResettableOperator = &castDecimalInt2Op{} @@ -1705,9 +1701,6 @@ func (c *castDecimalInt2Op) Next() coldata.Batch { if n == 0 { return coldata.ZeroBatch } - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "execgen.OverloadHelper". - _overloadHelper := c.overloadHelper sel := batch.Selection() inputVec := batch.ColVec(c.colIdx) outputVec := batch.ColVec(c.outputIdx) @@ -1737,8 +1730,8 @@ func (c *castDecimalInt2Op) Next() coldata.Batch { var r int16 { - tmpDec := &_overloadHelper.TmpDec1 - _, err := tree.DecimalCtx.RoundToIntegralValue(tmpDec, &v) + var tmpDec apd.Decimal //gcassert:noescape + _, err := tree.DecimalCtx.RoundToIntegralValue(&tmpDec, &v) if err != nil { colexecerror.ExpectedError(err) } @@ -1776,8 +1769,8 @@ func (c *castDecimalInt2Op) Next() coldata.Batch { var r int16 { - tmpDec := &_overloadHelper.TmpDec1 - _, err := tree.DecimalCtx.RoundToIntegralValue(tmpDec, &v) + var tmpDec apd.Decimal //gcassert:noescape + _, err := tree.DecimalCtx.RoundToIntegralValue(&tmpDec, &v) if err != nil { colexecerror.ExpectedError(err) } @@ -1818,8 +1811,8 @@ func (c *castDecimalInt2Op) Next() coldata.Batch { var r int16 { - tmpDec := &_overloadHelper.TmpDec1 - _, err := tree.DecimalCtx.RoundToIntegralValue(tmpDec, &v) + var tmpDec apd.Decimal //gcassert:noescape + _, err := tree.DecimalCtx.RoundToIntegralValue(&tmpDec, &v) if err != nil { colexecerror.ExpectedError(err) } @@ -1857,8 +1850,8 @@ func (c *castDecimalInt2Op) Next() coldata.Batch { var r int16 { - tmpDec := &_overloadHelper.TmpDec1 - _, err := tree.DecimalCtx.RoundToIntegralValue(tmpDec, &v) + var tmpDec apd.Decimal //gcassert:noescape + _, err := tree.DecimalCtx.RoundToIntegralValue(&tmpDec, &v) if err != nil { colexecerror.ExpectedError(err) } @@ -1889,8 +1882,6 @@ func (c *castDecimalInt2Op) Next() coldata.Batch { type castDecimalInt4Op struct { castOpBase - - overloadHelper execgen.OverloadHelper } var _ colexecop.ResettableOperator = &castDecimalInt4Op{} @@ -1902,9 +1893,6 @@ func (c *castDecimalInt4Op) Next() coldata.Batch { if n == 0 { return coldata.ZeroBatch } - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "execgen.OverloadHelper". - _overloadHelper := c.overloadHelper sel := batch.Selection() inputVec := batch.ColVec(c.colIdx) outputVec := batch.ColVec(c.outputIdx) @@ -1934,8 +1922,8 @@ func (c *castDecimalInt4Op) Next() coldata.Batch { var r int32 { - tmpDec := &_overloadHelper.TmpDec1 - _, err := tree.DecimalCtx.RoundToIntegralValue(tmpDec, &v) + var tmpDec apd.Decimal //gcassert:noescape + _, err := tree.DecimalCtx.RoundToIntegralValue(&tmpDec, &v) if err != nil { colexecerror.ExpectedError(err) } @@ -1973,8 +1961,8 @@ func (c *castDecimalInt4Op) Next() coldata.Batch { var r int32 { - tmpDec := &_overloadHelper.TmpDec1 - _, err := tree.DecimalCtx.RoundToIntegralValue(tmpDec, &v) + var tmpDec apd.Decimal //gcassert:noescape + _, err := tree.DecimalCtx.RoundToIntegralValue(&tmpDec, &v) if err != nil { colexecerror.ExpectedError(err) } @@ -2015,8 +2003,8 @@ func (c *castDecimalInt4Op) Next() coldata.Batch { var r int32 { - tmpDec := &_overloadHelper.TmpDec1 - _, err := tree.DecimalCtx.RoundToIntegralValue(tmpDec, &v) + var tmpDec apd.Decimal //gcassert:noescape + _, err := tree.DecimalCtx.RoundToIntegralValue(&tmpDec, &v) if err != nil { colexecerror.ExpectedError(err) } @@ -2054,8 +2042,8 @@ func (c *castDecimalInt4Op) Next() coldata.Batch { var r int32 { - tmpDec := &_overloadHelper.TmpDec1 - _, err := tree.DecimalCtx.RoundToIntegralValue(tmpDec, &v) + var tmpDec apd.Decimal //gcassert:noescape + _, err := tree.DecimalCtx.RoundToIntegralValue(&tmpDec, &v) if err != nil { colexecerror.ExpectedError(err) } @@ -2086,8 +2074,6 @@ func (c *castDecimalInt4Op) Next() coldata.Batch { type castDecimalIntOp struct { castOpBase - - overloadHelper execgen.OverloadHelper } var _ colexecop.ResettableOperator = &castDecimalIntOp{} @@ -2099,9 +2085,6 @@ func (c *castDecimalIntOp) Next() coldata.Batch { if n == 0 { return coldata.ZeroBatch } - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "execgen.OverloadHelper". - _overloadHelper := c.overloadHelper sel := batch.Selection() inputVec := batch.ColVec(c.colIdx) outputVec := batch.ColVec(c.outputIdx) @@ -2131,8 +2114,8 @@ func (c *castDecimalIntOp) Next() coldata.Batch { var r int64 { - tmpDec := &_overloadHelper.TmpDec1 - _, err := tree.DecimalCtx.RoundToIntegralValue(tmpDec, &v) + var tmpDec apd.Decimal //gcassert:noescape + _, err := tree.DecimalCtx.RoundToIntegralValue(&tmpDec, &v) if err != nil { colexecerror.ExpectedError(err) } @@ -2164,8 +2147,8 @@ func (c *castDecimalIntOp) Next() coldata.Batch { var r int64 { - tmpDec := &_overloadHelper.TmpDec1 - _, err := tree.DecimalCtx.RoundToIntegralValue(tmpDec, &v) + var tmpDec apd.Decimal //gcassert:noescape + _, err := tree.DecimalCtx.RoundToIntegralValue(&tmpDec, &v) if err != nil { colexecerror.ExpectedError(err) } @@ -2200,8 +2183,8 @@ func (c *castDecimalIntOp) Next() coldata.Batch { var r int64 { - tmpDec := &_overloadHelper.TmpDec1 - _, err := tree.DecimalCtx.RoundToIntegralValue(tmpDec, &v) + var tmpDec apd.Decimal //gcassert:noescape + _, err := tree.DecimalCtx.RoundToIntegralValue(&tmpDec, &v) if err != nil { colexecerror.ExpectedError(err) } @@ -2233,8 +2216,8 @@ func (c *castDecimalIntOp) Next() coldata.Batch { var r int64 { - tmpDec := &_overloadHelper.TmpDec1 - _, err := tree.DecimalCtx.RoundToIntegralValue(tmpDec, &v) + var tmpDec apd.Decimal //gcassert:noescape + _, err := tree.DecimalCtx.RoundToIntegralValue(&tmpDec, &v) if err != nil { colexecerror.ExpectedError(err) } diff --git a/pkg/sql/colexec/colexecbase/cast_tmpl.go b/pkg/sql/colexec/colexecbase/cast_tmpl.go index 0ad0de0c1bdb..16bca26afe2d 100644 --- a/pkg/sql/colexec/colexecbase/cast_tmpl.go +++ b/pkg/sql/colexec/colexecbase/cast_tmpl.go @@ -26,17 +26,15 @@ import ( "fmt" "math" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colconv" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/lex" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util" @@ -325,7 +323,7 @@ type castNativeToDatumOp struct { castOpBase scratch []tree.Datum - da rowenc.DatumAlloc + da tree.DatumAlloc } var _ colexecop.ClosableOperator = &castNativeToDatumOp{} @@ -419,14 +417,6 @@ func setNativeToDatumCast( type cast_NAMEOp struct { castOpBase - - // {{if and (eq $fromFamily "types.DecimalFamily") (eq $toFamily "types.IntFamily")}} - // {{/* - // overloadHelper is used only when we perform the cast from decimals to - // ints. In all other cases we don't want to wastefully allocate the helper. - // */}} - overloadHelper execgen.OverloadHelper - // {{end}} } var _ colexecop.ResettableOperator = &cast_NAMEOp{} @@ -438,11 +428,6 @@ func (c *cast_NAMEOp) Next() coldata.Batch { if n == 0 { return coldata.ZeroBatch } - // {{if and (eq $fromFamily "types.DecimalFamily") (eq $toFamily "types.IntFamily")}} - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "execgen.OverloadHelper". - _overloadHelper := c.overloadHelper - // {{end}} sel := batch.Selection() inputVec := batch.ColVec(c.colIdx) outputVec := batch.ColVec(c.outputIdx) diff --git a/pkg/sql/colexec/colexecbase/const.eg.go b/pkg/sql/colexec/colexecbase/const.eg.go index df2b6998ee79..2af1aa9157c5 100644 --- a/pkg/sql/colexec/colexecbase/const.eg.go +++ b/pkg/sql/colexec/colexecbase/const.eg.go @@ -12,7 +12,7 @@ package colexecbase import ( "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" diff --git a/pkg/sql/colexec/colexecbase/const_tmpl.go b/pkg/sql/colexec/colexecbase/const_tmpl.go index 08e1ed3113cf..fe85e32befee 100644 --- a/pkg/sql/colexec/colexecbase/const_tmpl.go +++ b/pkg/sql/colexec/colexecbase/const_tmpl.go @@ -22,7 +22,7 @@ package colexecbase import ( - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" diff --git a/pkg/sql/colexec/colexecbase/distinct.eg.go b/pkg/sql/colexec/colexecbase/distinct.eg.go index 83f311027de6..e54ba21b4995 100644 --- a/pkg/sql/colexec/colexecbase/distinct.eg.go +++ b/pkg/sql/colexec/colexecbase/distinct.eg.go @@ -16,7 +16,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexeccmp/BUILD.bazel b/pkg/sql/colexec/colexeccmp/BUILD.bazel index f137b6d2ebee..a4bebd7a0d2c 100644 --- a/pkg/sql/colexec/colexeccmp/BUILD.bazel +++ b/pkg/sql/colexec/colexeccmp/BUILD.bazel @@ -21,6 +21,7 @@ go_test( name = "colexeccmp_test", srcs = ["dep_test.go"], embed = [":colexeccmp"], + tags = ["no-remote"], deps = ["//pkg/testutils/buildutil"], ) diff --git a/pkg/sql/colexec/colexechash/BUILD.bazel b/pkg/sql/colexec/colexechash/BUILD.bazel index 4c3be4b6615c..7ce372d3b709 100644 --- a/pkg/sql/colexec/colexechash/BUILD.bazel +++ b/pkg/sql/colexec/colexechash/BUILD.bazel @@ -16,16 +16,15 @@ go_library( "//pkg/col/coldataext", # keep "//pkg/col/typeconv", # keep "//pkg/sql/colexec/colexecutils", - "//pkg/sql/colexec/execgen", "//pkg/sql/colexecerror", "//pkg/sql/colexecop", "//pkg/sql/colmem", "//pkg/sql/memsize", - "//pkg/sql/rowenc", "//pkg/sql/sem/tree", # keep "//pkg/sql/types", "//pkg/util/json", # keep "//pkg/util/randutil", + "@com_github_cockroachdb_apd_v3//:apd", # keep "@com_github_cockroachdb_errors//:errors", ], ) @@ -39,15 +38,14 @@ go_test( "main_test.go", ], embed = [":colexechash"], + tags = ["no-remote"], deps = [ "//pkg/col/coldata", "//pkg/col/coldataext", "//pkg/settings/cluster", "//pkg/sql/colexec/colexecutils", - "//pkg/sql/colexec/execgen", "//pkg/sql/colmem", "//pkg/sql/execinfra", - "//pkg/sql/rowenc", "//pkg/sql/sem/tree", "//pkg/sql/types", "//pkg/testutils/buildutil", diff --git a/pkg/sql/colexec/colexechash/hash_utils.eg.go b/pkg/sql/colexec/colexechash/hash_utils.eg.go index 8798522af590..bd3074128c08 100644 --- a/pkg/sql/colexec/colexechash/hash_utils.eg.go +++ b/pkg/sql/colexec/colexechash/hash_utils.eg.go @@ -14,13 +14,12 @@ import ( "reflect" "unsafe" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/json" @@ -34,6 +33,7 @@ var ( _ = coldataext.Hash _ json.JSON _ tree.Datum + _ apd.Context ) // rehash takes an element of a key (tuple representing a row of equality @@ -45,12 +45,8 @@ func rehash( nKeys int, sel []int, cancelChecker colexecutils.CancelChecker, - overloadHelper *execgen.OverloadHelper, - datumAlloc *rowenc.DatumAlloc, + datumAlloc *tree.DatumAlloc, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "execgen.OverloadHelper". - _overloadHelper := overloadHelper switch col.CanonicalTypeFamily() { case types.BoolFamily: switch col.Type().Width() { @@ -82,16 +78,17 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i if nulls.NullAt(selIdx) { continue } + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -105,7 +102,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } else { if sel != nil { @@ -129,13 +125,14 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -149,7 +146,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } } @@ -180,7 +176,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] @@ -200,7 +195,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } else { if sel != nil { @@ -221,7 +215,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] @@ -238,7 +231,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } } @@ -265,7 +257,7 @@ func rehash( // In order for equal decimals to hash to the same value we need to // remove the trailing zeroes if there are any. - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.Reduce(&v) b := []byte(tmpDec.String()) sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) @@ -274,23 +266,24 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i if nulls.NullAt(selIdx) { continue } + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) // In order for equal decimals to hash to the same value we need to // remove the trailing zeroes if there are any. - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.Reduce(&v) b := []byte(tmpDec.String()) sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) @@ -299,7 +292,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } else { if sel != nil { @@ -316,7 +308,7 @@ func rehash( // In order for equal decimals to hash to the same value we need to // remove the trailing zeroes if there are any. - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.Reduce(&v) b := []byte(tmpDec.String()) sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) @@ -325,20 +317,21 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) // In order for equal decimals to hash to the same value we need to // remove the trailing zeroes if there are any. - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.Reduce(&v) b := []byte(tmpDec.String()) sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) @@ -347,7 +340,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } } @@ -378,16 +370,17 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i if nulls.NullAt(selIdx) { continue } + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -399,7 +392,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } else { if sel != nil { @@ -421,13 +413,14 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -439,7 +432,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } case 32: @@ -467,16 +459,17 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i if nulls.NullAt(selIdx) { continue } + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -488,7 +481,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } else { if sel != nil { @@ -510,13 +502,14 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -528,7 +521,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } case -1: @@ -557,16 +549,17 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i if nulls.NullAt(selIdx) { continue } + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -578,7 +571,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } else { if sel != nil { @@ -600,13 +592,14 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -618,7 +611,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } } @@ -652,16 +644,17 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i if nulls.NullAt(selIdx) { continue } + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -675,7 +668,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } else { if sel != nil { @@ -699,13 +691,14 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -719,7 +712,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } } @@ -750,16 +742,17 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i if nulls.NullAt(selIdx) { continue } + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -770,7 +763,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } else { if sel != nil { @@ -791,13 +783,14 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -808,7 +801,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } } @@ -841,16 +833,17 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i if nulls.NullAt(selIdx) { continue } + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -863,7 +856,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } else { if sel != nil { @@ -886,13 +878,14 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] + _ = keys.Get(nKeys - 1) var selIdx int for i := 0; i < nKeys; i++ { selIdx = i + //gcassert:bce v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) @@ -905,7 +898,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } } @@ -926,16 +918,12 @@ func rehash( if nulls.NullAt(selIdx) { continue } - v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) - scratch := _overloadHelper.ByteScratch[:0] - _b, _err := json.EncodeJSON(scratch, v) - if _err != nil { - colexecerror.ExpectedError(_err) - } - _overloadHelper.ByteScratch = _b + // Access the underlying []byte directly which allows us to skip + // decoding-encoding of the JSON object. + _b := keys.Bytes.Get(selIdx) sh := (*reflect.SliceHeader)(unsafe.Pointer(&_b)) p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(_b))) @@ -943,7 +931,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] @@ -953,16 +940,12 @@ func rehash( if nulls.NullAt(selIdx) { continue } - v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) - scratch := _overloadHelper.ByteScratch[:0] - _b, _err := json.EncodeJSON(scratch, v) - if _err != nil { - colexecerror.ExpectedError(_err) - } - _overloadHelper.ByteScratch = _b + // Access the underlying []byte directly which allows us to skip + // decoding-encoding of the JSON object. + _b := keys.Bytes.Get(selIdx) sh := (*reflect.SliceHeader)(unsafe.Pointer(&_b)) p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(_b))) @@ -970,7 +953,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } else { if sel != nil { @@ -981,16 +963,12 @@ func rehash( for i := 0; i < nKeys; i++ { //gcassert:bce selIdx = sel[i] - v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) - scratch := _overloadHelper.ByteScratch[:0] - _b, _err := json.EncodeJSON(scratch, v) - if _err != nil { - colexecerror.ExpectedError(_err) - } - _overloadHelper.ByteScratch = _b + // Access the underlying []byte directly which allows us to skip + // decoding-encoding of the JSON object. + _b := keys.Bytes.Get(selIdx) sh := (*reflect.SliceHeader)(unsafe.Pointer(&_b)) p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(_b))) @@ -998,23 +976,18 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] var selIdx int for i := 0; i < nKeys; i++ { selIdx = i - v := keys.Get(selIdx) //gcassert:bce p := uintptr(buckets[i]) - scratch := _overloadHelper.ByteScratch[:0] - _b, _err := json.EncodeJSON(scratch, v) - if _err != nil { - colexecerror.ExpectedError(_err) - } - _overloadHelper.ByteScratch = _b + // Access the underlying []byte directly which allows us to skip + // decoding-encoding of the JSON object. + _b := keys.Bytes.Get(selIdx) sh := (*reflect.SliceHeader)(unsafe.Pointer(&_b)) p = memhash(unsafe.Pointer(sh.Data), p, uintptr(len(_b))) @@ -1022,7 +995,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } } @@ -1053,7 +1025,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] @@ -1073,7 +1044,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } else { if sel != nil { @@ -1094,7 +1064,6 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } else { // Early bounds checks. _ = buckets[nKeys-1] @@ -1111,11 +1080,11 @@ func rehash( //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() } } } default: colexecerror.InternalError(errors.AssertionFailedf("unhandled type %s", col.Type())) } + cancelChecker.CheckEveryCall() } diff --git a/pkg/sql/colexec/colexechash/hash_utils.go b/pkg/sql/colexec/colexechash/hash_utils.go index af823877b3cf..1515f514aa71 100644 --- a/pkg/sql/colexec/colexechash/hash_utils.go +++ b/pkg/sql/colexec/colexechash/hash_utils.go @@ -15,8 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" ) // initHash, rehash, and finalizeHash work together to compute the hash value @@ -117,9 +116,8 @@ type TupleHashDistributor struct { selections [][]int // cancelChecker is used during the hashing of the rows to distribute to // check for query cancellation. - cancelChecker colexecutils.CancelChecker - overloadHelper execgen.OverloadHelper - datumAlloc rowenc.DatumAlloc + cancelChecker colexecutils.CancelChecker + datumAlloc tree.DatumAlloc } // NewTupleHashDistributor returns a new TupleHashDistributor. @@ -157,7 +155,7 @@ func (d *TupleHashDistributor) Distribute(b coldata.Batch, hashCols []uint32) [] } for _, i := range hashCols { - rehash(d.buckets, b.ColVec(int(i)), n, b.Selection(), d.cancelChecker, &d.overloadHelper, &d.datumAlloc) + rehash(d.buckets, b.ColVec(int(i)), n, b.Selection(), d.cancelChecker, &d.datumAlloc) } finalizeHash(d.buckets, n, uint64(len(d.selections))) diff --git a/pkg/sql/colexec/colexechash/hash_utils_test.go b/pkg/sql/colexec/colexechash/hash_utils_test.go index d565ef9f8b39..4042b4e1ec2e 100644 --- a/pkg/sql/colexec/colexechash/hash_utils_test.go +++ b/pkg/sql/colexec/colexechash/hash_utils_test.go @@ -17,8 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -41,9 +40,8 @@ func TestHashFunctionFamily(t *testing.T) { } numBuckets := uint64(16) var ( - cancelChecker colexecutils.CancelChecker - overloadHelperVar execgen.OverloadHelper - datumAlloc rowenc.DatumAlloc + cancelChecker colexecutils.CancelChecker + datumAlloc tree.DatumAlloc ) cancelChecker.Init(context.Background()) @@ -51,7 +49,7 @@ func TestHashFunctionFamily(t *testing.T) { // We need +1 here because 0 is not a valid initial hash value. initHash(buckets, nKeys, uint64(initHashValue+1)) for _, keysCol := range keys { - rehash(buckets, keysCol, nKeys, nil /* sel */, cancelChecker, &overloadHelperVar, &datumAlloc) + rehash(buckets, keysCol, nKeys, nil /* sel */, cancelChecker, &datumAlloc) } finalizeHash(buckets, nKeys, numBuckets) } diff --git a/pkg/sql/colexec/colexechash/hash_utils_tmpl.go b/pkg/sql/colexec/colexechash/hash_utils_tmpl.go index fa96d29fcc58..e9adacf629d8 100644 --- a/pkg/sql/colexec/colexechash/hash_utils_tmpl.go +++ b/pkg/sql/colexec/colexechash/hash_utils_tmpl.go @@ -22,13 +22,12 @@ package colexechash import ( + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/json" @@ -42,6 +41,7 @@ var ( _ = coldataext.Hash _ json.JSON _ tree.Datum + _ apd.Context ) // {{/* @@ -78,7 +78,7 @@ func _REHASH_BODY( _ = buckets[nKeys-1] // {{if .HasSel}} _ = sel[nKeys-1] - // {{else if .Sliceable}} + // {{else if .Global.Sliceable}} _ = keys.Get(nKeys - 1) // {{end}} var selIdx int @@ -94,17 +94,22 @@ func _REHASH_BODY( continue } // {{end}} - // {{if .Sliceable}} + // {{if not (eq .Global.VecMethod "JSON")}} + // {{/* + // No need to decode the JSON value (which is done in Get) since + // we'll be operating directly on the underlying []byte. + // */}} + // {{if and (not .HasSel) .Global.Sliceable}} //gcassert:bce // {{end}} v := keys.Get(selIdx) + // {{end}} //gcassert:bce p := uintptr(buckets[i]) - _ASSIGN_HASH(p, v, _, keys) + _ASSIGN_HASH(p, v, keys, selIdx) //gcassert:bce buckets[i] = uint64(p) } - cancelChecker.CheckEveryCall() // {{end}} // {{/* @@ -121,12 +126,8 @@ func rehash( nKeys int, sel []int, cancelChecker colexecutils.CancelChecker, - overloadHelper *execgen.OverloadHelper, - datumAlloc *rowenc.DatumAlloc, + datumAlloc *tree.DatumAlloc, ) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "execgen.OverloadHelper". - _overloadHelper := overloadHelper switch col.CanonicalTypeFamily() { // {{range .}} case _CANONICAL_TYPE_FAMILY: @@ -153,4 +154,5 @@ func rehash( default: colexecerror.InternalError(errors.AssertionFailedf("unhandled type %s", col.Type())) } + cancelChecker.CheckEveryCall() } diff --git a/pkg/sql/colexec/colexechash/hashtable.go b/pkg/sql/colexec/colexechash/hashtable.go index 4cce95437949..9c24aba1a91a 100644 --- a/pkg/sql/colexec/colexechash/hashtable.go +++ b/pkg/sql/colexec/colexechash/hashtable.go @@ -15,12 +15,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/memsize" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" ) @@ -182,9 +181,8 @@ type HashTable struct { // each other. allowNullEquality bool - overloadHelper execgen.OverloadHelper - datumAlloc rowenc.DatumAlloc - cancelChecker colexecutils.CancelChecker + datumAlloc tree.DatumAlloc + cancelChecker colexecutils.CancelChecker BuildMode HashTableBuildMode probeMode HashTableProbeMode @@ -520,6 +518,7 @@ func (ht *HashTable) checkCols(probeVecs []coldata.Vec, nToCheck uint64, probeSe // checkColsForDistinctTuples performs a column by column check to find distinct // tuples in the probe table that are not present in the build table. +// NOTE: It assumes that probeSel has already been populated and it is not nil. func (ht *HashTable) checkColsForDistinctTuples( probeVecs []coldata.Vec, nToCheck uint64, probeSel []int, ) { @@ -552,7 +551,7 @@ func (ht *HashTable) ComputeBuckets(buckets []uint64, keys []coldata.Vec, nKeys } for i := range ht.keyCols { - rehash(buckets, keys[i], nKeys, sel, ht.cancelChecker, &ht.overloadHelper, &ht.datumAlloc) + rehash(buckets, keys[i], nKeys, sel, ht.cancelChecker, &ht.datumAlloc) } finalizeHash(buckets, nKeys, ht.numBuckets) diff --git a/pkg/sql/colexec/colexechash/hashtable_distinct.eg.go b/pkg/sql/colexec/colexechash/hashtable_distinct.eg.go index 195a5b2cdde1..a32cd23e2cf5 100644 --- a/pkg/sql/colexec/colexechash/hashtable_distinct.eg.go +++ b/pkg/sql/colexec/colexechash/hashtable_distinct.eg.go @@ -30,9 +30,10 @@ var ( _ tree.AggType ) -// checkColAgainstItself is similar to checkCol, but it probes the vector -// against itself. -func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel []int) { +// checkColAgainstItselfForDistinct is similar to checkCol, but it probes the +// vector against itself for the purposes of finding matches to unordered +// distinct columns. +func (ht *HashTable) checkColAgainstItselfForDistinct(vec coldata.Vec, nToCheck uint64, sel []int) { probeVec, buildVec, probeSel := vec, vec, sel switch probeVec.CanonicalTypeFamily() { case types.BoolFamily: @@ -56,58 +57,55 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - if !probeVal && buildVal { - cmpResult = -1 - } else if probeVal && !buildVal { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + var cmpResult int - unique = cmpResult != 0 + if !probeVal && buildVal { + cmpResult = -1 + } else if probeVal && !buildVal { + cmpResult = 1 + } else { + cmpResult = 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -118,57 +116,54 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - if !probeVal && buildVal { - cmpResult = -1 - } else if probeVal && !buildVal { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + var cmpResult int - unique = cmpResult != 0 + if !probeVal && buildVal { + cmpResult = -1 + } else if probeVal && !buildVal { + cmpResult = 1 + } else { + cmpResult = 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -181,57 +176,54 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - if !probeVal && buildVal { - cmpResult = -1 - } else if probeVal && !buildVal { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + var cmpResult int - unique = cmpResult != 0 + if !probeVal && buildVal { + cmpResult = -1 + } else if probeVal && !buildVal { + cmpResult = 1 + } else { + cmpResult = 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -242,56 +234,53 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - if !probeVal && buildVal { - cmpResult = -1 - } else if probeVal && !buildVal { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + var cmpResult int - unique = cmpResult != 0 + if !probeVal && buildVal { + cmpResult = -1 + } else if probeVal && !buildVal { + cmpResult = 1 + } else { + cmpResult = 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -306,56 +295,53 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - if !probeVal && buildVal { - cmpResult = -1 - } else if probeVal && !buildVal { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + var cmpResult int - unique = cmpResult != 0 + if !probeVal && buildVal { + cmpResult = -1 + } else if probeVal && !buildVal { + cmpResult = 1 + } else { + cmpResult = 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -366,55 +352,52 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - if !probeVal && buildVal { - cmpResult = -1 - } else if probeVal && !buildVal { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + var cmpResult int - unique = cmpResult != 0 + if !probeVal && buildVal { + cmpResult = -1 + } else if probeVal && !buildVal { + cmpResult = 1 + } else { + cmpResult = 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -427,55 +410,52 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - if !probeVal && buildVal { - cmpResult = -1 - } else if probeVal && !buildVal { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + var cmpResult int - unique = cmpResult != 0 + if !probeVal && buildVal { + cmpResult = -1 + } else if probeVal && !buildVal { + cmpResult = 1 + } else { + cmpResult = 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -486,54 +466,51 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - if !probeVal && buildVal { - cmpResult = -1 - } else if probeVal && !buildVal { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + var cmpResult int - unique = cmpResult != 0 + if !probeVal && buildVal { + cmpResult = -1 + } else if probeVal && !buildVal { + cmpResult = 1 + } else { + cmpResult = 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -563,50 +540,47 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 - } + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = bytes.Compare(probeVal, buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -617,49 +591,46 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 - } + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = bytes.Compare(probeVal, buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -672,49 +643,46 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 - } + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = bytes.Compare(probeVal, buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -725,48 +693,45 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 - } + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = bytes.Compare(probeVal, buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -781,48 +746,45 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 - } + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = bytes.Compare(probeVal, buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -833,47 +795,44 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 - } + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = bytes.Compare(probeVal, buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -886,47 +845,44 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 - } + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = bytes.Compare(probeVal, buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -937,46 +893,43 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 - } + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = bytes.Compare(probeVal, buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -1006,50 +959,47 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = tree.CompareDecimals(&probeVal, &buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -1060,49 +1010,46 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = tree.CompareDecimals(&probeVal, &buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -1115,49 +1062,46 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = tree.CompareDecimals(&probeVal, &buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -1168,48 +1112,45 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = tree.CompareDecimals(&probeVal, &buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -1224,48 +1165,45 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = tree.CompareDecimals(&probeVal, &buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -1276,47 +1214,44 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = tree.CompareDecimals(&probeVal, &buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -1329,47 +1264,44 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = tree.CompareDecimals(&probeVal, &buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -1380,46 +1312,43 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = tree.CompareDecimals(&probeVal, &buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -1447,61 +1376,58 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -1512,60 +1438,57 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -1578,60 +1501,57 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -1642,59 +1562,56 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -1709,59 +1626,56 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -1772,58 +1686,55 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -1836,58 +1747,55 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -1898,57 +1806,54 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -1973,61 +1878,58 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -2038,60 +1940,57 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -2104,60 +2003,57 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -2168,59 +2064,56 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -2235,59 +2128,56 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -2298,58 +2188,55 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -2362,58 +2249,55 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -2424,57 +2308,54 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -2501,61 +2382,58 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -2566,60 +2444,57 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -2632,60 +2507,57 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -2696,59 +2568,56 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -2763,59 +2632,56 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -2826,58 +2692,55 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -2890,58 +2753,55 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -2952,57 +2812,54 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -3032,69 +2889,66 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { + { + a, b := float64(probeVal), float64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else if a == b { + cmpResult = 0 + } else if math.IsNaN(a) { + if math.IsNaN(b) { cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } } else { - cmpResult = 1 + cmpResult = -1 } + } else { + cmpResult = 1 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -3105,68 +2959,65 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { + { + a, b := float64(probeVal), float64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else if a == b { + cmpResult = 0 + } else if math.IsNaN(a) { + if math.IsNaN(b) { cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } } else { - cmpResult = 1 + cmpResult = -1 } + } else { + cmpResult = 1 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -3179,68 +3030,65 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { + { + a, b := float64(probeVal), float64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else if a == b { + cmpResult = 0 + } else if math.IsNaN(a) { + if math.IsNaN(b) { cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } } else { - cmpResult = 1 + cmpResult = -1 } + } else { + cmpResult = 1 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -3251,67 +3099,64 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { + { + a, b := float64(probeVal), float64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else if a == b { + cmpResult = 0 + } else if math.IsNaN(a) { + if math.IsNaN(b) { cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } } else { - cmpResult = 1 + cmpResult = -1 } + } else { + cmpResult = 1 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -3326,67 +3171,64 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { + { + a, b := float64(probeVal), float64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else if a == b { + cmpResult = 0 + } else if math.IsNaN(a) { + if math.IsNaN(b) { cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } } else { - cmpResult = 1 + cmpResult = -1 } + } else { + cmpResult = 1 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -3397,66 +3239,63 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { + { + a, b := float64(probeVal), float64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else if a == b { + cmpResult = 0 + } else if math.IsNaN(a) { + if math.IsNaN(b) { cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } } else { - cmpResult = 1 + cmpResult = -1 } + } else { + cmpResult = 1 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -3469,66 +3308,63 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { + { + a, b := float64(probeVal), float64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else if a == b { + cmpResult = 0 + } else if math.IsNaN(a) { + if math.IsNaN(b) { cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } } else { - cmpResult = 1 + cmpResult = -1 } + } else { + cmpResult = 1 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -3539,65 +3375,62 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { + { + a, b := float64(probeVal), float64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else if a == b { + cmpResult = 0 + } else if math.IsNaN(a) { + if math.IsNaN(b) { cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } } else { - cmpResult = 1 + cmpResult = -1 } + } else { + cmpResult = 1 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -3627,57 +3460,54 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 + if probeVal.Before(buildVal) { + cmpResult = -1 + } else if buildVal.Before(probeVal) { + cmpResult = 1 + } else { + cmpResult = 0 } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -3688,56 +3518,53 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 + if probeVal.Before(buildVal) { + cmpResult = -1 + } else if buildVal.Before(probeVal) { + cmpResult = 1 + } else { + cmpResult = 0 } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -3750,56 +3577,53 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 + if probeVal.Before(buildVal) { + cmpResult = -1 + } else if buildVal.Before(probeVal) { + cmpResult = 1 + } else { + cmpResult = 0 } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -3810,55 +3634,52 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 + if probeVal.Before(buildVal) { + cmpResult = -1 + } else if buildVal.Before(probeVal) { + cmpResult = 1 + } else { + cmpResult = 0 } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -3873,55 +3694,52 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 + if probeVal.Before(buildVal) { + cmpResult = -1 + } else if buildVal.Before(probeVal) { + cmpResult = 1 + } else { + cmpResult = 0 } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -3932,54 +3750,51 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 + if probeVal.Before(buildVal) { + cmpResult = -1 + } else if buildVal.Before(probeVal) { + cmpResult = 1 + } else { + cmpResult = 0 } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -3992,54 +3807,51 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 + if probeVal.Before(buildVal) { + cmpResult = -1 + } else if buildVal.Before(probeVal) { + cmpResult = 1 + } else { + cmpResult = 0 } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -4050,53 +3862,50 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 + if probeVal.Before(buildVal) { + cmpResult = -1 + } else if buildVal.Before(probeVal) { + cmpResult = 1 + } else { + cmpResult = 0 } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -4126,50 +3935,47 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = probeVal.Compare(buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -4180,49 +3986,46 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = probeVal.Compare(buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -4235,49 +4038,46 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = probeVal.Compare(buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -4288,48 +4088,45 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = probeVal.Compare(buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -4344,48 +4141,45 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = probeVal.Compare(buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -4396,47 +4190,44 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = probeVal.Compare(buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -4449,47 +4240,44 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = probeVal.Compare(buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -4500,46 +4288,43 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = probeVal.Compare(buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -4569,56 +4354,53 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } + { + var cmpResult int - unique = cmpResult != 0 + var err error + cmpResult, err = probeVal.Compare(buildVal) + if err != nil { + colexecerror.ExpectedError(err) } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -4629,55 +4411,52 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } + { + var cmpResult int - unique = cmpResult != 0 + var err error + cmpResult, err = probeVal.Compare(buildVal) + if err != nil { + colexecerror.ExpectedError(err) } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -4690,55 +4469,52 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } + { + var cmpResult int - unique = cmpResult != 0 + var err error + cmpResult, err = probeVal.Compare(buildVal) + if err != nil { + colexecerror.ExpectedError(err) } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -4749,54 +4525,51 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } + { + var cmpResult int - unique = cmpResult != 0 + var err error + cmpResult, err = probeVal.Compare(buildVal) + if err != nil { + colexecerror.ExpectedError(err) } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -4811,54 +4584,51 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } + { + var cmpResult int - unique = cmpResult != 0 + var err error + cmpResult, err = probeVal.Compare(buildVal) + if err != nil { + colexecerror.ExpectedError(err) } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -4869,53 +4639,50 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } + { + var cmpResult int - unique = cmpResult != 0 + var err error + cmpResult, err = probeVal.Compare(buildVal) + if err != nil { + colexecerror.ExpectedError(err) } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -4928,53 +4695,50 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } + { + var cmpResult int - unique = cmpResult != 0 + var err error + cmpResult, err = probeVal.Compare(buildVal) + if err != nil { + colexecerror.ExpectedError(err) } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -4985,52 +4749,49 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } + { + var cmpResult int - unique = cmpResult != 0 + var err error + cmpResult, err = probeVal.Compare(buildVal) + if err != nil { + colexecerror.ExpectedError(err) } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -5060,52 +4821,49 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 - } + cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -5116,51 +4874,48 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 - } + cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -5173,51 +4928,48 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 - } + cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -5228,50 +4980,47 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - // The vector is probed against itself, so buildVec has the same - // selection vector as probeVec. - buildIdx = probeSel[keyID-1] - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = probeSel[toCheck] + // The vector is probed against itself, so buildVec has the same + // selection vector as probeVec. + buildIdx = probeSel[keyID-1] + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 - } + cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -5286,50 +5035,47 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 - } + cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -5340,49 +5086,46 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = int(toCheck) + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 - } + cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -5395,49 +5138,46 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 - } + cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } else { @@ -5448,48 +5188,45 @@ func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. - { - var cmpResult int + probeIdx = int(toCheck) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 - } + cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } } } @@ -5516,982 +5253,1536 @@ func (ht *HashTable) checkColForDistinctTuples( default: probeKeys := probeVec.Bool() buildKeys := buildVec.Bool() - if probeSel != nil { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int + if probeVec.MaybeHasNulls() { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - if !probeVal && buildVal { - cmpResult = -1 - } else if probeVal && !buildVal { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + var cmpResult int - unique = cmpResult != 0 + if !probeVal && buildVal { + cmpResult = -1 + } else if probeVal && !buildVal { + cmpResult = 1 + } else { + cmpResult = 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int + + if !probeVal && buildVal { + cmpResult = -1 + } else if probeVal && !buildVal { + cmpResult = 1 + } else { + cmpResult = 0 } + + unique = cmpResult != 0 } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - { - var cmpResult int + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + } + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } + } else { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - if !probeVal && buildVal { - cmpResult = -1 - } else if probeVal && !buildVal { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + var cmpResult int - unique = cmpResult != 0 + if !probeVal && buildVal { + cmpResult = -1 + } else if probeVal && !buildVal { + cmpResult = 1 + } else { + cmpResult = 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - if !probeVal && buildVal { - cmpResult = -1 - } else if probeVal && !buildVal { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + var cmpResult int - unique = cmpResult != 0 + if !probeVal && buildVal { + cmpResult = -1 + } else if probeVal && !buildVal { + cmpResult = 1 + } else { + cmpResult = 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int + } + } + } + } + } + } + case types.BytesFamily: + switch probeVec.Type().Width() { + case -1: + default: + switch probeVec.CanonicalTypeFamily() { + case types.BytesFamily: + switch probeVec.Type().Width() { + case -1: + default: + probeKeys := probeVec.Bytes() + buildKeys := buildVec.Bytes() + if probeVec.MaybeHasNulls() { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - if !probeVal && buildVal { - cmpResult = -1 - } else if probeVal && !buildVal { - cmpResult = 1 - } else { - cmpResult = 0 - } + { + var cmpResult int + cmpResult = bytes.Compare(probeVal, buildVal) + unique = cmpResult != 0 + } - unique = cmpResult != 0 - } + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + } + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int + cmpResult = bytes.Compare(probeVal, buildVal) + unique = cmpResult != 0 + } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + } + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } + } else { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = bytes.Compare(probeVal, buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int + cmpResult = bytes.Compare(probeVal, buildVal) + unique = cmpResult != 0 + } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } } + } + } + } + } + case types.DecimalFamily: + switch probeVec.Type().Width() { + case -1: + default: + switch probeVec.CanonicalTypeFamily() { + case types.IntFamily: + switch probeVec.Type().Width() { + } + case types.FloatFamily: + switch probeVec.Type().Width() { + } + case types.DecimalFamily: + switch probeVec.Type().Width() { + case -1: + default: + probeKeys := probeVec.Decimal() + buildKeys := buildVec.Decimal() + if probeVec.MaybeHasNulls() { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int + cmpResult = tree.CompareDecimals(&probeVal, &buildVal) + unique = cmpResult != 0 + } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + } + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int + cmpResult = tree.CompareDecimals(&probeVal, &buildVal) + unique = cmpResult != 0 + } + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + } + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } } else { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int + cmpResult = tree.CompareDecimals(&probeVal, &buildVal) + unique = cmpResult != 0 } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - { - var cmpResult int + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + } + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int + cmpResult = tree.CompareDecimals(&probeVal, &buildVal) + unique = cmpResult != 0 + } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + } + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } + } + } + } + } + case types.IntFamily: + switch probeVec.Type().Width() { + case 16: + switch probeVec.CanonicalTypeFamily() { + case types.IntFamily: + switch probeVec.Type().Width() { + case 16: + probeKeys := probeVec.Int16() + buildKeys := buildVec.Int16() + if probeVec.MaybeHasNulls() { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int - if !probeVal && buildVal { + { + a, b := int64(probeVal), int64(buildVal) + if a < b { cmpResult = -1 - } else if probeVal && !buildVal { + } else if a > b { cmpResult = 1 } else { cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - if !probeVal && buildVal { + { + a, b := int64(probeVal), int64(buildVal) + if a < b { cmpResult = -1 - } else if probeVal && !buildVal { + } else if a > b { cmpResult = 1 } else { cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + } + } else { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - if !probeVal && buildVal { + { + a, b := int64(probeVal), int64(buildVal) + if a < b { cmpResult = -1 - } else if probeVal && !buildVal { + } else if a > b { cmpResult = 1 } else { cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - { - var cmpResult int + { + var cmpResult int - if !probeVal && buildVal { + { + a, b := int64(probeVal), int64(buildVal) + if a < b { cmpResult = -1 - } else if probeVal && !buildVal { + } else if a > b { cmpResult = 1 } else { cmpResult = 0 } - - unique = cmpResult != 0 } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } } - } } + case types.FloatFamily: + switch probeVec.Type().Width() { + } + case types.DecimalFamily: + switch probeVec.Type().Width() { + } } - } - case types.BytesFamily: - switch probeVec.Type().Width() { - case -1: - default: + case 32: switch probeVec.CanonicalTypeFamily() { - case types.BytesFamily: + case types.IntFamily: switch probeVec.Type().Width() { - case -1: - default: - probeKeys := probeVec.Bytes() - buildKeys := buildVec.Bytes() - if probeSel != nil { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + case 32: + probeKeys := probeVec.Int32() + buildKeys := buildVec.Int32() + if probeVec.MaybeHasNulls() { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 + } } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 + } } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + } + } else { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 + } } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 + } } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } } + } + } + case types.FloatFamily: + switch probeVec.Type().Width() { + } + case types.DecimalFamily: + switch probeVec.Type().Width() { + } + } + case -1: + default: + switch probeVec.CanonicalTypeFamily() { + case types.IntFamily: + switch probeVec.Type().Width() { + case -1: + default: + probeKeys := probeVec.Int64() + buildKeys := buildVec.Int64() + if probeVec.MaybeHasNulls() { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - } else { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + { + var cmpResult int { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 + } } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 + } } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + } + } else { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 + } } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int { - var cmpResult int - cmpResult = bytes.Compare(probeVal, buildVal) - unique = cmpResult != 0 + a, b := int64(probeVal), int64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else { + cmpResult = 0 + } } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } } - } } + case types.FloatFamily: + switch probeVec.Type().Width() { + } + case types.DecimalFamily: + switch probeVec.Type().Width() { + } } } - case types.DecimalFamily: + case types.FloatFamily: switch probeVec.Type().Width() { case -1: default: @@ -6500,4698 +6791,1270 @@ func (ht *HashTable) checkColForDistinctTuples( switch probeVec.Type().Width() { } case types.FloatFamily: - switch probeVec.Type().Width() { - } - case types.DecimalFamily: switch probeVec.Type().Width() { case -1: default: - probeKeys := probeVec.Decimal() - buildKeys := buildVec.Decimal() - if probeSel != nil { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + probeKeys := probeVec.Float64() + buildKeys := buildVec.Float64() + if probeVec.MaybeHasNulls() { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 + a, b := float64(probeVal), float64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else if a == b { + cmpResult = 0 + } else if math.IsNaN(a) { + if math.IsNaN(b) { + cmpResult = 0 + } else { + cmpResult = -1 + } + } else { + cmpResult = 1 + } } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 + a, b := float64(probeVal), float64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else if a == b { + cmpResult = 0 + } else if math.IsNaN(a) { + if math.IsNaN(b) { + cmpResult = 0 + } else { + cmpResult = -1 + } + } else { + cmpResult = 1 + } } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } + } else { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int + + { + a, b := float64(probeVal), float64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else if a == b { + cmpResult = 0 + } else if math.IsNaN(a) { + if math.IsNaN(b) { + cmpResult = 0 + } else { + cmpResult = -1 + } + } else { + cmpResult = 1 + } + } + + unique = cmpResult != 0 + } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } else { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - cmpResult = tree.CompareDecimals(&probeVal, &buildVal) - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } - } - } - } - case types.IntFamily: - switch probeVec.Type().Width() { - case 16: - switch probeVec.CanonicalTypeFamily() { - case types.IntFamily: - switch probeVec.Type().Width() { - case 16: - probeKeys := probeVec.Int16() - buildKeys := buildVec.Int16() - if probeSel != nil { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } else { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } - } - case types.FloatFamily: - switch probeVec.Type().Width() { - } - case types.DecimalFamily: - switch probeVec.Type().Width() { - } - } - case 32: - switch probeVec.CanonicalTypeFamily() { - case types.IntFamily: - switch probeVec.Type().Width() { - case 32: - probeKeys := probeVec.Int32() - buildKeys := buildVec.Int32() - if probeSel != nil { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } else { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } - } - case types.FloatFamily: - switch probeVec.Type().Width() { - } - case types.DecimalFamily: - switch probeVec.Type().Width() { - } - } - case -1: - default: - switch probeVec.CanonicalTypeFamily() { - case types.IntFamily: - switch probeVec.Type().Width() { - case -1: - default: - probeKeys := probeVec.Int64() - buildKeys := buildVec.Int64() - if probeSel != nil { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } else { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := int64(probeVal), int64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else { - cmpResult = 0 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } - } - case types.FloatFamily: - switch probeVec.Type().Width() { - } - case types.DecimalFamily: - switch probeVec.Type().Width() { - } - } - } - case types.FloatFamily: - switch probeVec.Type().Width() { - case -1: - default: - switch probeVec.CanonicalTypeFamily() { - case types.IntFamily: - switch probeVec.Type().Width() { - } - case types.FloatFamily: - switch probeVec.Type().Width() { - case -1: - default: - probeKeys := probeVec.Float64() - buildKeys := buildVec.Float64() - if probeSel != nil { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { - cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } - } else { - cmpResult = 1 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { - cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } - } else { - cmpResult = 1 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { - cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } - } else { - cmpResult = 1 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { - cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } - } else { - cmpResult = 1 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } else { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { - cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } - } else { - cmpResult = 1 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { - cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } - } else { - cmpResult = 1 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { - cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } - } else { - cmpResult = 1 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - { - a, b := float64(probeVal), float64(buildVal) - if a < b { - cmpResult = -1 - } else if a > b { - cmpResult = 1 - } else if a == b { - cmpResult = 0 - } else if math.IsNaN(a) { - if math.IsNaN(b) { - cmpResult = 0 - } else { - cmpResult = -1 - } - } else { - cmpResult = 1 - } - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } - } - case types.DecimalFamily: - switch probeVec.Type().Width() { - } - } - } - case types.TimestampTZFamily: - switch probeVec.Type().Width() { - case -1: - default: - switch probeVec.CanonicalTypeFamily() { - case types.TimestampTZFamily: - switch probeVec.Type().Width() { - case -1: - default: - probeKeys := probeVec.Timestamp() - buildKeys := buildVec.Timestamp() - if probeSel != nil { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } else { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - if probeVal.Before(buildVal) { - cmpResult = -1 - } else if buildVal.Before(probeVal) { - cmpResult = 1 - } else { - cmpResult = 0 - } - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } - } - } - } - case types.IntervalFamily: - switch probeVec.Type().Width() { - case -1: - default: - switch probeVec.CanonicalTypeFamily() { - case types.IntervalFamily: - switch probeVec.Type().Width() { - case -1: - default: - probeKeys := probeVec.Interval() - buildKeys := buildVec.Interval() - if probeSel != nil { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } else { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool + { + var cmpResult int { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 + a, b := float64(probeVal), float64(buildVal) + if a < b { + cmpResult = -1 + } else if a > b { + cmpResult = 1 + } else if a == b { + cmpResult = 0 + } else if math.IsNaN(a) { + if math.IsNaN(b) { + cmpResult = 0 + } else { + cmpResult = -1 + } + } else { + cmpResult = 1 + } } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } + unique = cmpResult != 0 } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - { - var cmpResult int - cmpResult = probeVal.Compare(buildVal) - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } } - } } + case types.DecimalFamily: + switch probeVec.Type().Width() { + } } } - case types.JsonFamily: + case types.TimestampTZFamily: switch probeVec.Type().Width() { case -1: default: switch probeVec.CanonicalTypeFamily() { - case types.JsonFamily: + case types.TimestampTZFamily: switch probeVec.Type().Width() { case -1: default: - probeKeys := probeVec.JSON() - buildKeys := buildVec.JSON() - if probeSel != nil { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } - - } else { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } - - unique = cmpResult != 0 - } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { + probeKeys := probeVec.Timestamp() + buildKeys := buildVec.Timestamp() + if probeVec.MaybeHasNulls() { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { ht.ProbeScratch.distinct[toCheck] = true - } - } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - unique = cmpResult != 0 - } + { + var cmpResult int - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique - } - } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } - } - } - } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue + if probeVal.Before(buildVal) { + cmpResult = -1 + } else if buildVal.Before(probeVal) { + cmpResult = 1 + } else { + cmpResult = 0 } + unique = cmpResult != 0 } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - { - var cmpResult int + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + } + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } + { + var cmpResult int - unique = cmpResult != 0 + if probeVal.Before(buildVal) { + cmpResult = -1 + } else if buildVal.Before(probeVal) { + cmpResult = 1 + } else { + cmpResult = 0 } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue + } + } + } else { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int + + if probeVal.Before(buildVal) { + cmpResult = -1 + } else if buildVal.Before(probeVal) { + cmpResult = 1 + } else { + cmpResult = 0 } + unique = cmpResult != 0 } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - { - var cmpResult int + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + } + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - var err error - cmpResult, err = probeVal.Compare(buildVal) - if err != nil { - colexecerror.ExpectedError(err) - } + { + var cmpResult int - unique = cmpResult != 0 + if probeVal.Before(buildVal) { + cmpResult = -1 + } else if buildVal.Before(probeVal) { + cmpResult = 1 + } else { + cmpResult = 0 } - - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } } - } } } } - case typeconv.DatumVecCanonicalTypeFamily: + case types.IntervalFamily: switch probeVec.Type().Width() { case -1: default: switch probeVec.CanonicalTypeFamily() { - case typeconv.DatumVecCanonicalTypeFamily: + case types.IntervalFamily: switch probeVec.Type().Width() { case -1: default: - probeKeys := probeVec.Datum() - buildKeys := buildVec.Datum() - if probeSel != nil { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) - - unique = cmpResult != 0 - } + probeKeys := probeVec.Interval() + buildKeys := buildVec.Interval() + if probeVec.MaybeHasNulls() { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = probeVal.Compare(buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int - - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) - - unique = cmpResult != 0 - } + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + { + var cmpResult int + cmpResult = probeVal.Compare(buildVal) + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } + } else { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int + cmpResult = probeVal.Compare(buildVal) + unique = cmpResult != 0 + } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int + cmpResult = probeVal.Compare(buildVal) + unique = cmpResult != 0 } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - { - var cmpResult int + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + } + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } + } + } + } + } + case types.JsonFamily: + switch probeVec.Type().Width() { + case -1: + default: + switch probeVec.CanonicalTypeFamily() { + case types.JsonFamily: + switch probeVec.Type().Width() { + case -1: + default: + probeKeys := probeVec.JSON() + buildKeys := buildVec.JSON() + if probeVec.MaybeHasNulls() { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 + var err error + cmpResult, err = probeVal.Compare(buildVal) + if err != nil { + colexecerror.ExpectedError(err) } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = probeSel[toCheck] - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 + var err error + cmpResult, err = probeVal.Compare(buildVal) + if err != nil { + colexecerror.ExpectedError(err) } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } } - } else { - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 + var err error + cmpResult, err = probeVal.Compare(buildVal) + if err != nil { + colexecerror.ExpectedError(err) } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - probeIsNull = probeVec.Nulls().NullAt(probeIdx) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 + var err error + cmpResult, err = probeVal.Compare(buildVal) + if err != nil { + colexecerror.ExpectedError(err) } - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } + } + } + } + } + case typeconv.DatumVecCanonicalTypeFamily: + switch probeVec.Type().Width() { + case -1: + default: + switch probeVec.CanonicalTypeFamily() { + case typeconv.DatumVecCanonicalTypeFamily: + switch probeVec.Type().Width() { + case -1: + default: + probeKeys := probeVec.Datum() + buildKeys := buildVec.Datum() + if probeVec.MaybeHasNulls() { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int + + cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + + unique = cmpResult != 0 + } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } } else { - if buildVec.MaybeHasNulls() { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - buildIsNull = buildVec.Nulls().NullAt(buildIdx) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + probeIsNull = probeVec.Nulls().NullAt(probeIdx) + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 - } + cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { - ht.ProbeScratch.distinct[toCheck] = true - } + } else { + ht.ProbeScratch.distinct[toCheck] = true } - } else { - var ( - probeIdx, buildIdx int - probeIsNull, buildIsNull bool - ) - for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { - // keyID of 0 is reserved to represent the end of the next chain. - keyID := ht.ProbeScratch.GroupID[toCheck] - if keyID != 0 { - // the build table key (calculated using keys[keyID - 1] = key) is - // compared to the corresponding probe table to determine if a match is - // found. - - probeIdx = int(toCheck) - buildIdx = int(keyID - 1) - if ht.allowNullEquality { - if probeIsNull && buildIsNull { - // Both values are NULLs, and since we're allowing null equality, we - // proceed to the next value to check. - continue - } else if probeIsNull { - // Only probing value is NULL, so it is different from the build value - // (which is non-NULL). We mark it as "different" and proceed to the - // next value to check. This behavior is special in case of allowing - // null equality because we don't want to reset the GroupID of the - // current probing tuple. - ht.ProbeScratch.differs[toCheck] = true - continue - } - } - if probeIsNull { - ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 - } else if buildIsNull { - ht.ProbeScratch.differs[toCheck] = true - } else { - probeVal := probeKeys.Get(probeIdx) - buildVal := buildKeys.Get(buildIdx) - var unique bool - - { - var cmpResult int + } + } + } else { + if buildVec.MaybeHasNulls() { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + buildIsNull = buildVec.Nulls().NullAt(buildIdx) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { + ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool - cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + { + var cmpResult int - unique = cmpResult != 0 - } + cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) - ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique + unique = cmpResult != 0 } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - if keyID == 0 { + } else { + ht.ProbeScratch.distinct[toCheck] = true + } + } + } else { + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) + for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { + // keyID of 0 is reserved to represent the end of the next chain. + keyID := ht.ProbeScratch.GroupID[toCheck] + if keyID != 0 { + // the build table key (calculated using keys[keyID - 1] = key) is + // compared to the corresponding probe table to determine if a match is + // found. + + probeIdx = probeSel[toCheck] + buildIdx = int(keyID - 1) + if ht.allowNullEquality { + if probeIsNull && buildIsNull { + // Both values are NULLs, and since we're allowing null equality, we + // proceed to the next value to check. + continue + } else if probeIsNull { + // Only probing value is NULL, so it is different from the build value + // (which is non-NULL). We mark it as "different" and proceed to the + // next value to check. This behavior is special in case of allowing + // null equality because we don't want to reset the GroupID of the + // current probing tuple. + ht.ProbeScratch.differs[toCheck] = true + continue + } + } + if probeIsNull { ht.ProbeScratch.distinct[toCheck] = true + } else if buildIsNull { + ht.ProbeScratch.differs[toCheck] = true + } else { + probeVal := probeKeys.Get(probeIdx) + buildVal := buildKeys.Get(buildIdx) + var unique bool + + { + var cmpResult int + + cmpResult = coldataext.CompareDatum(probeVal, probeKeys, buildVal) + + unique = cmpResult != 0 + } + + ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } + } else { + ht.ProbeScratch.distinct[toCheck] = true } } } - } } } @@ -11203,7 +8066,7 @@ func (ht *HashTable) checkColForDistinctTuples( // in the probe table. func (ht *HashTable) CheckProbeForDistinct(vecs []coldata.Vec, nToCheck uint64, sel []int) uint64 { for i := range ht.keyCols { - ht.checkColAgainstItself(vecs[i], nToCheck, sel) + ht.checkColAgainstItselfForDistinct(vecs[i], nToCheck, sel) } nDiffers := uint64(0) toCheckSlice := ht.ProbeScratch.ToCheck @@ -11212,7 +8075,7 @@ func (ht *HashTable) CheckProbeForDistinct(vecs []coldata.Vec, nToCheck uint64, //gcassert:bce toCheck := toCheckSlice[toCheckPos] if ht.ProbeScratch.distinct[toCheck] { - ht.ProbeScratch.HeadID[toCheck] = ht.ProbeScratch.GroupID[toCheck] + ht.ProbeScratch.HeadID[toCheck] = toCheck + 1 continue } if !ht.ProbeScratch.differs[toCheck] { diff --git a/pkg/sql/colexec/colexechash/hashtable_tmpl.go b/pkg/sql/colexec/colexechash/hashtable_tmpl.go index 15feb875b687..96a62c8da0ab 100644 --- a/pkg/sql/colexec/colexechash/hashtable_tmpl.go +++ b/pkg/sql/colexec/colexechash/hashtable_tmpl.go @@ -63,13 +63,22 @@ func _ASSIGN_NE(_, _, _, _, _, _ interface{}) int { // This is a code snippet that is the main body of checkCol* functions. It // takes in the following template "meta" variables that enable/disable certain // code paths: +// _GLOBAL - a string replaced by "$global" (the local template variable +// referring to the NotEqual overload) before performing the template function +// call. It is needed because _CHECK_COL_BODY template function is called from +// two places where the overload is in different template context (in one case +// it is `.`, and in another it is `.Global`). We work around it by replacing +// _GLOBAL with the local variable during the initial preprocessing of the +// template. // _PROBE_HAS_NULLS - a boolean as .ProbeHasNulls that determines whether the // probe vector might have NULL values. // _BUILD_HAS_NULLS - a boolean as .BuildHasNulls that determines whether the // build vector might have NULL values. // _SELECT_DISTINCT - a boolean as .SelectDistinct that determines whether a -// probe tuple should be marked as "distinct" if its GroupID is zero (meaning -// that there is no tuple in the hash table with the same hash code). +// probe tuple should be marked as "distinct" if there is no tuple in the hash +// table that might be a duplicate of the probe tuple (either because the +// GroupID of the probe tuple is 0 - meaning no hash matches - or because the +// probe tuple has a NULL value when NULLs are treated as not equal). // _USE_PROBE_SEL - a boolean as .UseProbeSel that determines whether there is // a selection vector on the probe vector. // _PROBING_AGAINST_ITSELF - a boolean as .ProbingAgainstItself that tells us @@ -82,6 +91,7 @@ func _ASSIGN_NE(_, _, _, _, _, _ interface{}) int { // When it is true, the HashTable uses 'visited' slice to mark previously // matched tuples as "deleted" so they won't get matched again. func _CHECK_COL_BODY( + _GLOBAL interface{}, _PROBE_HAS_NULLS bool, _BUILD_HAS_NULLS bool, _SELECT_DISTINCT bool, @@ -97,7 +107,19 @@ func _CHECK_COL_BODY( for _, toCheck := range ht.ProbeScratch.ToCheck[:nToCheck] { // keyID of 0 is reserved to represent the end of the next chain. keyID := ht.ProbeScratch.GroupID[toCheck] + // {{if or (not .SelectDistinct) (not .ProbingAgainstItself)}} + // {{/* + // When we're selecting distinct tuples and probing against itself, + // we're in the code path of the unordered distinct where we're + // trying to find duplicates within a single input batch. In such a + // case we will never hit keyID of 0 because each tuple in the + // batch is equal to itself (and possibly others). Once we find a + // match, the tuple is no longer checked, so we never reach the end + // of the corresponding hash chain which could result in keyID + // being 0. + // */}} if keyID != 0 { + // {{end}} // the build table key (calculated using keys[keyID - 1] = key) is // compared to the corresponding probe table to determine if a match is // found. @@ -153,20 +175,14 @@ func _CHECK_COL_BODY( } } if probeIsNull { - // {{if or (.SelectDistinct) (.ProbingAgainstItself)}} + // {{if .SelectDistinct}} // {{/* // We know that nulls are distinct (because // allowNullEquality case is handled above) and our probing // tuple has a NULL value in the current column, so the - // probing tuple is distinct from the build table. Both - // parts of the template condition above are only 'true' if - // the hash table is used for the unordered distinct - // operator, and in that scenario we want to mark the - // current probing tuple as distinct but also set its - // GroupID such that it (the probing tuple) matches itself. + // probing tuple is distinct from the build table. // */}} ht.ProbeScratch.distinct[toCheck] = true - ht.ProbeScratch.GroupID[toCheck] = toCheck + 1 // {{else}} ht.ProbeScratch.GroupID[toCheck] = 0 // {{end}} @@ -179,10 +195,11 @@ func _CHECK_COL_BODY( _ASSIGN_NE(unique, probeVal, buildVal, _, probeKeys, buildKeys) ht.ProbeScratch.differs[toCheck] = ht.ProbeScratch.differs[toCheck] || unique } - } - // {{if .SelectDistinct}} - if keyID == 0 { + // {{if and .SelectDistinct (not .ProbingAgainstItself)}} + } else { ht.ProbeScratch.distinct[toCheck] = true + // {{end}} + // {{if or (not .SelectDistinct) (not .ProbingAgainstItself)}} } // {{end}} } @@ -191,30 +208,38 @@ func _CHECK_COL_BODY( } func _CHECK_COL_WITH_NULLS( - _USE_PROBE_SEL bool, _PROBING_AGAINST_ITSELF bool, _DELETING_PROBE_MODE bool, + _SELECT_DISTINCT bool, + _USE_PROBE_SEL bool, + _PROBING_AGAINST_ITSELF bool, + _DELETING_PROBE_MODE bool, ) { // */}} // {{define "checkColWithNulls" -}} + // {{$global := .Global}} + // {{$selectDistinct := .SelectDistinct}} // {{$probingAgainstItself := .ProbingAgainstItself}} // {{$deletingProbeMode := .DeletingProbeMode}} if probeVec.MaybeHasNulls() { if buildVec.MaybeHasNulls() { - _CHECK_COL_BODY(true, true, false, _USE_PROBE_SEL, _PROBING_AGAINST_ITSELF, _DELETING_PROBE_MODE) + _CHECK_COL_BODY(_GLOBAL, true, true, _SELECT_DISTINCT, _USE_PROBE_SEL, _PROBING_AGAINST_ITSELF, _DELETING_PROBE_MODE) } else { - _CHECK_COL_BODY(true, false, false, _USE_PROBE_SEL, _PROBING_AGAINST_ITSELF, _DELETING_PROBE_MODE) + _CHECK_COL_BODY(_GLOBAL, true, false, _SELECT_DISTINCT, _USE_PROBE_SEL, _PROBING_AGAINST_ITSELF, _DELETING_PROBE_MODE) } } else { if buildVec.MaybeHasNulls() { - _CHECK_COL_BODY(false, true, false, _USE_PROBE_SEL, _PROBING_AGAINST_ITSELF, _DELETING_PROBE_MODE) + _CHECK_COL_BODY(_GLOBAL, false, true, _SELECT_DISTINCT, _USE_PROBE_SEL, _PROBING_AGAINST_ITSELF, _DELETING_PROBE_MODE) } else { - _CHECK_COL_BODY(false, false, false, _USE_PROBE_SEL, _PROBING_AGAINST_ITSELF, _DELETING_PROBE_MODE) + _CHECK_COL_BODY(_GLOBAL, false, false, _SELECT_DISTINCT, _USE_PROBE_SEL, _PROBING_AGAINST_ITSELF, _DELETING_PROBE_MODE) } } // {{end}} // {{/* } -func _CHECK_COL_FUNCTION_TEMPLATE(_PROBING_AGAINST_ITSELF bool, _DELETING_PROBE_MODE bool) { // */}} +func _CHECK_COL_FUNCTION_TEMPLATE( + _SELECT_DISTINCT bool, _PROBING_AGAINST_ITSELF bool, _DELETING_PROBE_MODE bool, +) { // */}} // {{define "checkColFunctionTemplate" -}} + // {{$selectDistinct := .SelectDistinct}} // {{$probingAgainstItself := .ProbingAgainstItself}} // {{$deletingProbeMode := .DeletingProbeMode}} // {{with .Global}} @@ -252,9 +277,9 @@ func _CHECK_COL_FUNCTION_TEMPLATE(_PROBING_AGAINST_ITSELF bool, _DELETING_PROBE_ probeKeys := probeVec._ProbeType() buildKeys := buildVec._BuildType() if probeSel != nil { - _CHECK_COL_WITH_NULLS(true, _PROBING_AGAINST_ITSELF, _DELETING_PROBE_MODE) + _CHECK_COL_WITH_NULLS(_SELECT_DISTINCT, true, _PROBING_AGAINST_ITSELF, _DELETING_PROBE_MODE) } else { - _CHECK_COL_WITH_NULLS(false, _PROBING_AGAINST_ITSELF, _DELETING_PROBE_MODE) + _CHECK_COL_WITH_NULLS(_SELECT_DISTINCT, false, _PROBING_AGAINST_ITSELF, _DELETING_PROBE_MODE) } // {{end}} // {{end}} @@ -282,7 +307,7 @@ func (ht *HashTable) checkCol( probeVec, buildVec coldata.Vec, keyColIdx int, nToCheck uint64, probeSel []int, ) { // {{with .Overloads}} - _CHECK_COL_FUNCTION_TEMPLATE(false, false) + _CHECK_COL_FUNCTION_TEMPLATE(false, false, false) // {{end}} } @@ -290,16 +315,17 @@ func (ht *HashTable) checkCol( // {{if .HashTableMode.IsDistinctBuild}} -// checkColAgainstItself is similar to checkCol, but it probes the vector -// against itself. -func (ht *HashTable) checkColAgainstItself(vec coldata.Vec, nToCheck uint64, sel []int) { +// checkColAgainstItselfForDistinct is similar to checkCol, but it probes the +// vector against itself for the purposes of finding matches to unordered +// distinct columns. +func (ht *HashTable) checkColAgainstItselfForDistinct(vec coldata.Vec, nToCheck uint64, sel []int) { // {{/* // In order to reuse the same template function as checkCol uses, we use // the same variable names. // */}} probeVec, buildVec, probeSel := vec, vec, sel // {{with .Overloads}} - _CHECK_COL_FUNCTION_TEMPLATE(true, false) + _CHECK_COL_FUNCTION_TEMPLATE(true, true, false) // {{end}} } @@ -316,33 +342,12 @@ func (ht *HashTable) checkColDeleting( probeVec, buildVec coldata.Vec, keyColIdx int, nToCheck uint64, probeSel []int, ) { // {{with .Overloads}} - _CHECK_COL_FUNCTION_TEMPLATE(false, true) + _CHECK_COL_FUNCTION_TEMPLATE(false, false, true) // {{end}} } // {{end}} -// {{/* -func _CHECK_COL_FOR_DISTINCT_WITH_NULLS(_USE_PROBE_SEL bool) { // */}} - // {{define "checkColForDistinctWithNulls" -}} - if probeVec.MaybeHasNulls() { - if buildVec.MaybeHasNulls() { - _CHECK_COL_BODY(true, true, true, _USE_PROBE_SEL, false, false) - } else { - _CHECK_COL_BODY(true, false, true, _USE_PROBE_SEL, false, false) - } - } else { - if buildVec.MaybeHasNulls() { - _CHECK_COL_BODY(false, true, true, _USE_PROBE_SEL, false, false) - } else { - _CHECK_COL_BODY(false, false, true, _USE_PROBE_SEL, false, false) - } - } - - // {{end}} - // {{/* -} // */}} - // {{if .HashTableMode.IsDistinctBuild}} // {{with .Overloads}} @@ -376,10 +381,19 @@ func (ht *HashTable) checkColForDistinctTuples( case _RIGHT_TYPE_WIDTH: probeKeys := probeVec._ProbeType() buildKeys := buildVec._ProbeType() - if probeSel != nil { - _CHECK_COL_FOR_DISTINCT_WITH_NULLS(true) + // {{$global := .}} + if probeVec.MaybeHasNulls() { + if buildVec.MaybeHasNulls() { + _CHECK_COL_BODY(_GLOBAL, true, true, true, true, false, false) + } else { + _CHECK_COL_BODY(_GLOBAL, true, false, true, true, false, false) + } } else { - _CHECK_COL_FOR_DISTINCT_WITH_NULLS(false) + if buildVec.MaybeHasNulls() { + _CHECK_COL_BODY(_GLOBAL, false, true, true, true, false, false) + } else { + _CHECK_COL_BODY(_GLOBAL, false, false, true, true, false, false) + } } // {{end}} // {{end}} @@ -405,7 +419,16 @@ func _CHECK_BODY(_SELECT_SAME_TUPLES bool, _DELETING_PROBE_MODE bool, _SELECT_DI toCheck := toCheckSlice[toCheckPos] // {{if .SelectDistinct}} if ht.ProbeScratch.distinct[toCheck] { - ht.ProbeScratch.HeadID[toCheck] = ht.ProbeScratch.GroupID[toCheck] + // {{/* + // The hash table is used for the unordered distinct operator. + // This code block is only relevant when we're probing the batch + // against itself in order to separate all tuples in the batch + // into equality buckets (where equality buckets are specified + // by the same HeadID values). In this case we see that the + // probing tuple is distinct (i.e. it is unique in the batch), + // so we want to mark it as equal to itself only. + // */}} + ht.ProbeScratch.HeadID[toCheck] = toCheck + 1 continue } // {{end}} @@ -514,7 +537,7 @@ func (ht *HashTable) Check(probeVecs []coldata.Vec, nToCheck uint64, probeSel [] // in the probe table. func (ht *HashTable) CheckProbeForDistinct(vecs []coldata.Vec, nToCheck uint64, sel []int) uint64 { for i := range ht.keyCols { - ht.checkColAgainstItself(vecs[i], nToCheck, sel) + ht.checkColAgainstItselfForDistinct(vecs[i], nToCheck, sel) } nDiffers := uint64(0) _CHECK_BODY(false, false, true) diff --git a/pkg/sql/colexec/colexecjoin/BUILD.bazel b/pkg/sql/colexec/colexecjoin/BUILD.bazel index 59f4c062885c..8167e8c76958 100644 --- a/pkg/sql/colexec/colexecjoin/BUILD.bazel +++ b/pkg/sql/colexec/colexecjoin/BUILD.bazel @@ -33,7 +33,7 @@ go_library( "//pkg/util/duration", # keep "//pkg/util/json", # keep "//pkg/util/mon", - "@com_github_cockroachdb_apd_v2//:apd", # keep + "@com_github_cockroachdb_apd_v3//:apd", # keep "@com_github_cockroachdb_errors//:errors", "@com_github_marusama_semaphore//:semaphore", ], @@ -47,6 +47,7 @@ go_test( "mergejoiner_test.go", ], embed = [":colexecjoin"], + tags = ["no-remote"], deps = [ "//pkg/col/coldata", "//pkg/col/coldataext", diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_exceptall.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_exceptall.eg.go index 86e91620a528..6623b4e55d34 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_exceptall.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_exceptall.eg.go @@ -14,7 +14,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_fullouter.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_fullouter.eg.go index 0d8d1fde22d9..996be9989579 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_fullouter.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_fullouter.eg.go @@ -14,7 +14,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_inner.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_inner.eg.go index fdd9d665d452..17ccdaf40312 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_inner.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_inner.eg.go @@ -14,7 +14,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_intersectall.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_intersectall.eg.go index 55c13e7a4139..7aee581338ad 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_intersectall.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_intersectall.eg.go @@ -14,7 +14,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_leftanti.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_leftanti.eg.go index dbc70f4fa95c..3e18b7bee080 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_leftanti.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_leftanti.eg.go @@ -14,7 +14,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_leftouter.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_leftouter.eg.go index f8d6f065d6bd..9df2b39bbbb2 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_leftouter.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_leftouter.eg.go @@ -14,7 +14,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_leftsemi.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_leftsemi.eg.go index ddab25e50f6c..c6ea41924f21 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_leftsemi.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_leftsemi.eg.go @@ -14,7 +14,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_rightanti.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_rightanti.eg.go index d782f4429bf2..a6d1d9dca1b7 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_rightanti.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_rightanti.eg.go @@ -14,7 +14,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_rightouter.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_rightouter.eg.go index bd6ae5db6595..582f0d59a60e 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_rightouter.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_rightouter.eg.go @@ -14,7 +14,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_rightsemi.eg.go b/pkg/sql/colexec/colexecjoin/mergejoiner_rightsemi.eg.go index d2cd18ca9289..a5d983bc8f91 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_rightsemi.eg.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_rightsemi.eg.go @@ -14,7 +14,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecjoin/mergejoiner_tmpl.go b/pkg/sql/colexec/colexecjoin/mergejoiner_tmpl.go index ff29ce6837f9..b145f9d805bf 100644 --- a/pkg/sql/colexec/colexecjoin/mergejoiner_tmpl.go +++ b/pkg/sql/colexec/colexecjoin/mergejoiner_tmpl.go @@ -22,7 +22,7 @@ package colexecjoin import ( - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecproj/BUILD.bazel b/pkg/sql/colexec/colexecproj/BUILD.bazel index 24eef0f6185f..89f64d7c8482 100644 --- a/pkg/sql/colexec/colexecproj/BUILD.bazel +++ b/pkg/sql/colexec/colexecproj/BUILD.bazel @@ -27,7 +27,7 @@ go_library( "//pkg/sql/types", "//pkg/util/duration", # keep "//pkg/util/json", # keep - "@com_github_cockroachdb_apd_v2//:apd", # keep + "@com_github_cockroachdb_apd_v3//:apd", # keep "@com_github_cockroachdb_errors//:errors", ], ) @@ -42,6 +42,7 @@ go_test( "projection_ops_test.go", ], embed = [":colexecproj"], + tags = ["no-remote"], deps = [ "//pkg/col/coldata", "//pkg/col/coldataext", @@ -56,7 +57,6 @@ go_test( "//pkg/sql/colexecop", "//pkg/sql/colmem", "//pkg/sql/execinfra", - "//pkg/sql/rowenc", "//pkg/sql/sem/tree", "//pkg/sql/types", "//pkg/testutils/buildutil", diff --git a/pkg/sql/colexec/colexecproj/proj_const_left_ops.eg.go b/pkg/sql/colexec/colexecproj/proj_const_left_ops.eg.go index 3970a3d8e2ab..b3e122caaf83 100644 --- a/pkg/sql/colexec/colexecproj/proj_const_left_ops.eg.go +++ b/pkg/sql/colexec/colexecproj/proj_const_left_ops.eg.go @@ -14,7 +14,7 @@ import ( "time" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" @@ -50,12 +50,6 @@ type projBitandInt16ConstInt16Op struct { } func (p projBitandInt16ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -112,7 +106,7 @@ func (p projBitandInt16ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -152,12 +146,6 @@ type projBitandInt16ConstInt32Op struct { } func (p projBitandInt16ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -214,7 +202,7 @@ func (p projBitandInt16ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -254,12 +242,6 @@ type projBitandInt16ConstInt64Op struct { } func (p projBitandInt16ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -316,7 +298,7 @@ func (p projBitandInt16ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -356,12 +338,6 @@ type projBitandInt32ConstInt16Op struct { } func (p projBitandInt32ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -418,7 +394,7 @@ func (p projBitandInt32ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -458,12 +434,6 @@ type projBitandInt32ConstInt32Op struct { } func (p projBitandInt32ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -520,7 +490,7 @@ func (p projBitandInt32ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -560,12 +530,6 @@ type projBitandInt32ConstInt64Op struct { } func (p projBitandInt32ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -622,7 +586,7 @@ func (p projBitandInt32ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -662,12 +626,6 @@ type projBitandInt64ConstInt16Op struct { } func (p projBitandInt64ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -724,7 +682,7 @@ func (p projBitandInt64ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -764,12 +722,6 @@ type projBitandInt64ConstInt32Op struct { } func (p projBitandInt64ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -826,7 +778,7 @@ func (p projBitandInt64ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -866,12 +818,6 @@ type projBitandInt64ConstInt64Op struct { } func (p projBitandInt64ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -928,7 +874,7 @@ func (p projBitandInt64ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -964,16 +910,15 @@ func (p projBitandInt64ConstInt64Op) Next() coldata.Batch { type projBitandDatumConstDatumOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projBitandDatumConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1043,7 +988,7 @@ func (p projBitandDatumConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1096,12 +1041,6 @@ type projBitorInt16ConstInt16Op struct { } func (p projBitorInt16ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1158,7 +1097,7 @@ func (p projBitorInt16ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1198,12 +1137,6 @@ type projBitorInt16ConstInt32Op struct { } func (p projBitorInt16ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1260,7 +1193,7 @@ func (p projBitorInt16ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1300,12 +1233,6 @@ type projBitorInt16ConstInt64Op struct { } func (p projBitorInt16ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1362,7 +1289,7 @@ func (p projBitorInt16ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1402,12 +1329,6 @@ type projBitorInt32ConstInt16Op struct { } func (p projBitorInt32ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1464,7 +1385,7 @@ func (p projBitorInt32ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1504,12 +1425,6 @@ type projBitorInt32ConstInt32Op struct { } func (p projBitorInt32ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1566,7 +1481,7 @@ func (p projBitorInt32ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1606,12 +1521,6 @@ type projBitorInt32ConstInt64Op struct { } func (p projBitorInt32ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1668,7 +1577,7 @@ func (p projBitorInt32ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1708,12 +1617,6 @@ type projBitorInt64ConstInt16Op struct { } func (p projBitorInt64ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1770,7 +1673,7 @@ func (p projBitorInt64ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1810,12 +1713,6 @@ type projBitorInt64ConstInt32Op struct { } func (p projBitorInt64ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1872,7 +1769,7 @@ func (p projBitorInt64ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1912,12 +1809,6 @@ type projBitorInt64ConstInt64Op struct { } func (p projBitorInt64ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1974,7 +1865,7 @@ func (p projBitorInt64ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2010,16 +1901,15 @@ func (p projBitorInt64ConstInt64Op) Next() coldata.Batch { type projBitorDatumConstDatumOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projBitorDatumConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2089,7 +1979,7 @@ func (p projBitorDatumConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2142,12 +2032,6 @@ type projBitxorInt16ConstInt16Op struct { } func (p projBitxorInt16ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2204,7 +2088,7 @@ func (p projBitxorInt16ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2244,12 +2128,6 @@ type projBitxorInt16ConstInt32Op struct { } func (p projBitxorInt16ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2306,7 +2184,7 @@ func (p projBitxorInt16ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2346,12 +2224,6 @@ type projBitxorInt16ConstInt64Op struct { } func (p projBitxorInt16ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2408,7 +2280,7 @@ func (p projBitxorInt16ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2448,12 +2320,6 @@ type projBitxorInt32ConstInt16Op struct { } func (p projBitxorInt32ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2510,7 +2376,7 @@ func (p projBitxorInt32ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2550,12 +2416,6 @@ type projBitxorInt32ConstInt32Op struct { } func (p projBitxorInt32ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2612,7 +2472,7 @@ func (p projBitxorInt32ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2652,12 +2512,6 @@ type projBitxorInt32ConstInt64Op struct { } func (p projBitxorInt32ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2714,7 +2568,7 @@ func (p projBitxorInt32ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2754,12 +2608,6 @@ type projBitxorInt64ConstInt16Op struct { } func (p projBitxorInt64ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2816,7 +2664,7 @@ func (p projBitxorInt64ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2856,12 +2704,6 @@ type projBitxorInt64ConstInt32Op struct { } func (p projBitxorInt64ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2918,7 +2760,7 @@ func (p projBitxorInt64ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2958,12 +2800,6 @@ type projBitxorInt64ConstInt64Op struct { } func (p projBitxorInt64ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3020,7 +2856,7 @@ func (p projBitxorInt64ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3056,16 +2892,15 @@ func (p projBitxorInt64ConstInt64Op) Next() coldata.Batch { type projBitxorDatumConstDatumOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projBitxorDatumConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3135,7 +2970,7 @@ func (p projBitxorDatumConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3188,12 +3023,6 @@ type projPlusDecimalConstInt16Op struct { } func (p projPlusDecimalConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3229,9 +3058,9 @@ func (p projPlusDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3249,9 +3078,9 @@ func (p projPlusDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3264,7 +3093,7 @@ func (p projPlusDecimalConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3273,9 +3102,9 @@ func (p projPlusDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3290,9 +3119,9 @@ func (p projPlusDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3318,12 +3147,6 @@ type projPlusDecimalConstInt32Op struct { } func (p projPlusDecimalConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3359,9 +3182,9 @@ func (p projPlusDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3379,9 +3202,9 @@ func (p projPlusDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3394,7 +3217,7 @@ func (p projPlusDecimalConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3403,9 +3226,9 @@ func (p projPlusDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3420,9 +3243,9 @@ func (p projPlusDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3448,12 +3271,6 @@ type projPlusDecimalConstInt64Op struct { } func (p projPlusDecimalConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3489,9 +3306,9 @@ func (p projPlusDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3509,9 +3326,9 @@ func (p projPlusDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3524,7 +3341,7 @@ func (p projPlusDecimalConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3533,9 +3350,9 @@ func (p projPlusDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3550,9 +3367,9 @@ func (p projPlusDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3578,12 +3395,6 @@ type projPlusDecimalConstDecimalOp struct { } func (p projPlusDecimalConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3652,7 +3463,7 @@ func (p projPlusDecimalConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3704,12 +3515,6 @@ type projPlusInt16ConstInt16Op struct { } func (p projPlusInt16ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3778,7 +3583,7 @@ func (p projPlusInt16ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3830,12 +3635,6 @@ type projPlusInt16ConstInt32Op struct { } func (p projPlusInt16ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3904,7 +3703,7 @@ func (p projPlusInt16ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3956,12 +3755,6 @@ type projPlusInt16ConstInt64Op struct { } func (p projPlusInt16ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4030,7 +3823,7 @@ func (p projPlusInt16ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4082,12 +3875,6 @@ type projPlusInt16ConstDecimalOp struct { } func (p projPlusInt16ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4123,9 +3910,9 @@ func (p projPlusInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -4144,9 +3931,9 @@ func (p projPlusInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -4160,7 +3947,7 @@ func (p projPlusInt16ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4169,9 +3956,9 @@ func (p projPlusInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -4187,9 +3974,9 @@ func (p projPlusInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -4212,16 +3999,15 @@ func (p projPlusInt16ConstDecimalOp) Next() coldata.Batch { type projPlusInt16ConstDatumOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int16 } func (p projPlusInt16ConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4300,7 +4086,7 @@ func (p projPlusInt16ConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4362,12 +4148,6 @@ type projPlusInt32ConstInt16Op struct { } func (p projPlusInt32ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4436,7 +4216,7 @@ func (p projPlusInt32ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4488,12 +4268,6 @@ type projPlusInt32ConstInt32Op struct { } func (p projPlusInt32ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4562,7 +4336,7 @@ func (p projPlusInt32ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4614,12 +4388,6 @@ type projPlusInt32ConstInt64Op struct { } func (p projPlusInt32ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4688,7 +4456,7 @@ func (p projPlusInt32ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4740,12 +4508,6 @@ type projPlusInt32ConstDecimalOp struct { } func (p projPlusInt32ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4781,9 +4543,9 @@ func (p projPlusInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -4802,9 +4564,9 @@ func (p projPlusInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -4818,7 +4580,7 @@ func (p projPlusInt32ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4827,9 +4589,9 @@ func (p projPlusInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -4845,9 +4607,9 @@ func (p projPlusInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -4870,16 +4632,15 @@ func (p projPlusInt32ConstDecimalOp) Next() coldata.Batch { type projPlusInt32ConstDatumOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int32 } func (p projPlusInt32ConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4958,7 +4719,7 @@ func (p projPlusInt32ConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5020,12 +4781,6 @@ type projPlusInt64ConstInt16Op struct { } func (p projPlusInt64ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5094,7 +4849,7 @@ func (p projPlusInt64ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5146,12 +4901,6 @@ type projPlusInt64ConstInt32Op struct { } func (p projPlusInt64ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5220,7 +4969,7 @@ func (p projPlusInt64ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5272,12 +5021,6 @@ type projPlusInt64ConstInt64Op struct { } func (p projPlusInt64ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5346,7 +5089,7 @@ func (p projPlusInt64ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5398,12 +5141,6 @@ type projPlusInt64ConstDecimalOp struct { } func (p projPlusInt64ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5439,9 +5176,9 @@ func (p projPlusInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -5460,9 +5197,9 @@ func (p projPlusInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -5476,7 +5213,7 @@ func (p projPlusInt64ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5485,9 +5222,9 @@ func (p projPlusInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -5503,9 +5240,9 @@ func (p projPlusInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -5528,16 +5265,15 @@ func (p projPlusInt64ConstDecimalOp) Next() coldata.Batch { type projPlusInt64ConstDatumOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int64 } func (p projPlusInt64ConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5616,7 +5352,7 @@ func (p projPlusInt64ConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5678,12 +5414,6 @@ type projPlusFloat64ConstFloat64Op struct { } func (p projPlusFloat64ConstFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5746,7 +5476,7 @@ func (p projPlusFloat64ConstFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5792,12 +5522,6 @@ type projPlusTimestampConstIntervalOp struct { } func (p projPlusTimestampConstIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5860,7 +5584,7 @@ func (p projPlusTimestampConstIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5906,12 +5630,6 @@ type projPlusIntervalConstTimestampOp struct { } func (p projPlusIntervalConstTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5974,7 +5692,7 @@ func (p projPlusIntervalConstTimestampOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6020,12 +5738,6 @@ type projPlusIntervalConstIntervalOp struct { } func (p projPlusIntervalConstIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6078,7 +5790,7 @@ func (p projPlusIntervalConstIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6110,16 +5822,15 @@ func (p projPlusIntervalConstIntervalOp) Next() coldata.Batch { type projPlusIntervalConstDatumOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg duration.Duration } func (p projPlusIntervalConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6198,7 +5909,7 @@ func (p projPlusIntervalConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6256,16 +5967,15 @@ func (p projPlusIntervalConstDatumOp) Next() coldata.Batch { type projPlusDatumConstIntervalOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projPlusDatumConstIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6343,7 +6053,7 @@ func (p projPlusDatumConstIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6400,16 +6110,15 @@ func (p projPlusDatumConstIntervalOp) Next() coldata.Batch { type projPlusDatumConstInt16Op struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projPlusDatumConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6487,7 +6196,7 @@ func (p projPlusDatumConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6544,16 +6253,15 @@ func (p projPlusDatumConstInt16Op) Next() coldata.Batch { type projPlusDatumConstInt32Op struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projPlusDatumConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6631,7 +6339,7 @@ func (p projPlusDatumConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6688,16 +6396,15 @@ func (p projPlusDatumConstInt32Op) Next() coldata.Batch { type projPlusDatumConstInt64Op struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projPlusDatumConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6775,7 +6482,7 @@ func (p projPlusDatumConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6836,12 +6543,6 @@ type projMinusDecimalConstInt16Op struct { } func (p projMinusDecimalConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6877,9 +6578,9 @@ func (p projMinusDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -6897,9 +6598,9 @@ func (p projMinusDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -6912,7 +6613,7 @@ func (p projMinusDecimalConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6921,9 +6622,9 @@ func (p projMinusDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -6938,9 +6639,9 @@ func (p projMinusDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -6966,12 +6667,6 @@ type projMinusDecimalConstInt32Op struct { } func (p projMinusDecimalConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7007,9 +6702,9 @@ func (p projMinusDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7027,9 +6722,9 @@ func (p projMinusDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7042,7 +6737,7 @@ func (p projMinusDecimalConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7051,9 +6746,9 @@ func (p projMinusDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7068,9 +6763,9 @@ func (p projMinusDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7096,12 +6791,6 @@ type projMinusDecimalConstInt64Op struct { } func (p projMinusDecimalConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7137,9 +6826,9 @@ func (p projMinusDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7157,9 +6846,9 @@ func (p projMinusDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7172,7 +6861,7 @@ func (p projMinusDecimalConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7181,9 +6870,9 @@ func (p projMinusDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7198,9 +6887,9 @@ func (p projMinusDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7226,12 +6915,6 @@ type projMinusDecimalConstDecimalOp struct { } func (p projMinusDecimalConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7300,7 +6983,7 @@ func (p projMinusDecimalConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7352,12 +7035,6 @@ type projMinusInt16ConstInt16Op struct { } func (p projMinusInt16ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7426,7 +7103,7 @@ func (p projMinusInt16ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7478,12 +7155,6 @@ type projMinusInt16ConstInt32Op struct { } func (p projMinusInt16ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7552,7 +7223,7 @@ func (p projMinusInt16ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7604,12 +7275,6 @@ type projMinusInt16ConstInt64Op struct { } func (p projMinusInt16ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7678,7 +7343,7 @@ func (p projMinusInt16ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7730,12 +7395,6 @@ type projMinusInt16ConstDecimalOp struct { } func (p projMinusInt16ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7771,9 +7430,9 @@ func (p projMinusInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -7792,9 +7451,9 @@ func (p projMinusInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -7808,7 +7467,7 @@ func (p projMinusInt16ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7817,9 +7476,9 @@ func (p projMinusInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -7835,9 +7494,9 @@ func (p projMinusInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -7860,16 +7519,15 @@ func (p projMinusInt16ConstDecimalOp) Next() coldata.Batch { type projMinusInt16ConstDatumOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int16 } func (p projMinusInt16ConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7948,7 +7606,7 @@ func (p projMinusInt16ConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8010,12 +7668,6 @@ type projMinusInt32ConstInt16Op struct { } func (p projMinusInt32ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8084,7 +7736,7 @@ func (p projMinusInt32ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8136,12 +7788,6 @@ type projMinusInt32ConstInt32Op struct { } func (p projMinusInt32ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8210,7 +7856,7 @@ func (p projMinusInt32ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8262,12 +7908,6 @@ type projMinusInt32ConstInt64Op struct { } func (p projMinusInt32ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8336,7 +7976,7 @@ func (p projMinusInt32ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8388,12 +8028,6 @@ type projMinusInt32ConstDecimalOp struct { } func (p projMinusInt32ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8429,9 +8063,9 @@ func (p projMinusInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -8450,9 +8084,9 @@ func (p projMinusInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -8466,7 +8100,7 @@ func (p projMinusInt32ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8475,9 +8109,9 @@ func (p projMinusInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -8493,9 +8127,9 @@ func (p projMinusInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -8518,16 +8152,15 @@ func (p projMinusInt32ConstDecimalOp) Next() coldata.Batch { type projMinusInt32ConstDatumOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int32 } func (p projMinusInt32ConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8606,7 +8239,7 @@ func (p projMinusInt32ConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8668,12 +8301,6 @@ type projMinusInt64ConstInt16Op struct { } func (p projMinusInt64ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8742,7 +8369,7 @@ func (p projMinusInt64ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8794,12 +8421,6 @@ type projMinusInt64ConstInt32Op struct { } func (p projMinusInt64ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8868,7 +8489,7 @@ func (p projMinusInt64ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8920,12 +8541,6 @@ type projMinusInt64ConstInt64Op struct { } func (p projMinusInt64ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8994,7 +8609,7 @@ func (p projMinusInt64ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9046,12 +8661,6 @@ type projMinusInt64ConstDecimalOp struct { } func (p projMinusInt64ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9087,9 +8696,9 @@ func (p projMinusInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -9108,9 +8717,9 @@ func (p projMinusInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -9124,7 +8733,7 @@ func (p projMinusInt64ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9133,9 +8742,9 @@ func (p projMinusInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -9151,9 +8760,9 @@ func (p projMinusInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -9176,16 +8785,15 @@ func (p projMinusInt64ConstDecimalOp) Next() coldata.Batch { type projMinusInt64ConstDatumOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int64 } func (p projMinusInt64ConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9264,7 +8872,7 @@ func (p projMinusInt64ConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9326,12 +8934,6 @@ type projMinusFloat64ConstFloat64Op struct { } func (p projMinusFloat64ConstFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9394,7 +8996,7 @@ func (p projMinusFloat64ConstFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9440,12 +9042,6 @@ type projMinusTimestampConstTimestampOp struct { } func (p projMinusTimestampConstTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9504,7 +9100,7 @@ func (p projMinusTimestampConstTimestampOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9546,12 +9142,6 @@ type projMinusTimestampConstIntervalOp struct { } func (p projMinusTimestampConstIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9614,7 +9204,7 @@ func (p projMinusTimestampConstIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9660,12 +9250,6 @@ type projMinusIntervalConstIntervalOp struct { } func (p projMinusIntervalConstIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9718,7 +9302,7 @@ func (p projMinusIntervalConstIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9750,16 +9334,15 @@ func (p projMinusIntervalConstIntervalOp) Next() coldata.Batch { type projMinusIntervalConstDatumOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg duration.Duration } func (p projMinusIntervalConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9838,7 +9421,7 @@ func (p projMinusIntervalConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9900,12 +9483,6 @@ type projMinusJSONConstBytesOp struct { } func (p projMinusJSONConstBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9975,7 +9552,7 @@ func (p projMinusJSONConstBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10028,12 +9605,6 @@ type projMinusJSONConstInt16Op struct { } func (p projMinusJSONConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10095,7 +9666,7 @@ func (p projMinusJSONConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10140,12 +9711,6 @@ type projMinusJSONConstInt32Op struct { } func (p projMinusJSONConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10207,7 +9772,7 @@ func (p projMinusJSONConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10252,12 +9817,6 @@ type projMinusJSONConstInt64Op struct { } func (p projMinusJSONConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10319,7 +9878,7 @@ func (p projMinusJSONConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10360,16 +9919,15 @@ func (p projMinusJSONConstInt64Op) Next() coldata.Batch { type projMinusDatumConstDatumOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projMinusDatumConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10439,7 +9997,7 @@ func (p projMinusDatumConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10488,16 +10046,15 @@ func (p projMinusDatumConstDatumOp) Next() coldata.Batch { type projMinusDatumConstIntervalOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projMinusDatumConstIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10575,7 +10132,7 @@ func (p projMinusDatumConstIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10632,16 +10189,15 @@ func (p projMinusDatumConstIntervalOp) Next() coldata.Batch { type projMinusDatumConstBytesOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projMinusDatumConstBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10719,7 +10275,7 @@ func (p projMinusDatumConstBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10776,16 +10332,15 @@ func (p projMinusDatumConstBytesOp) Next() coldata.Batch { type projMinusDatumConstInt16Op struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projMinusDatumConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10863,7 +10418,7 @@ func (p projMinusDatumConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10920,16 +10475,15 @@ func (p projMinusDatumConstInt16Op) Next() coldata.Batch { type projMinusDatumConstInt32Op struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projMinusDatumConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11007,7 +10561,7 @@ func (p projMinusDatumConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11064,16 +10618,15 @@ func (p projMinusDatumConstInt32Op) Next() coldata.Batch { type projMinusDatumConstInt64Op struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projMinusDatumConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11151,7 +10704,7 @@ func (p projMinusDatumConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11212,12 +10765,6 @@ type projMultDecimalConstInt16Op struct { } func (p projMultDecimalConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11253,9 +10800,9 @@ func (p projMultDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11273,9 +10820,9 @@ func (p projMultDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11288,7 +10835,7 @@ func (p projMultDecimalConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11297,9 +10844,9 @@ func (p projMultDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11314,9 +10861,9 @@ func (p projMultDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11342,12 +10889,6 @@ type projMultDecimalConstInt32Op struct { } func (p projMultDecimalConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11383,9 +10924,9 @@ func (p projMultDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11403,9 +10944,9 @@ func (p projMultDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11418,7 +10959,7 @@ func (p projMultDecimalConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11427,9 +10968,9 @@ func (p projMultDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11444,9 +10985,9 @@ func (p projMultDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11472,12 +11013,6 @@ type projMultDecimalConstInt64Op struct { } func (p projMultDecimalConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11513,9 +11048,9 @@ func (p projMultDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11533,9 +11068,9 @@ func (p projMultDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11548,7 +11083,7 @@ func (p projMultDecimalConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11557,9 +11092,9 @@ func (p projMultDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11574,9 +11109,9 @@ func (p projMultDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11602,12 +11137,6 @@ type projMultDecimalConstDecimalOp struct { } func (p projMultDecimalConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11676,7 +11205,7 @@ func (p projMultDecimalConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11728,12 +11257,6 @@ type projMultDecimalConstIntervalOp struct { } func (p projMultDecimalConstIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11796,7 +11319,7 @@ func (p projMultDecimalConstIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11842,12 +11365,6 @@ type projMultInt16ConstInt16Op struct { } func (p projMultInt16ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11932,7 +11449,7 @@ func (p projMultInt16ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12000,12 +11517,6 @@ type projMultInt16ConstInt32Op struct { } func (p projMultInt16ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12090,7 +11601,7 @@ func (p projMultInt16ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12158,12 +11669,6 @@ type projMultInt16ConstInt64Op struct { } func (p projMultInt16ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12248,7 +11753,7 @@ func (p projMultInt16ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12316,12 +11821,6 @@ type projMultInt16ConstDecimalOp struct { } func (p projMultInt16ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12357,9 +11856,9 @@ func (p projMultInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -12378,9 +11877,9 @@ func (p projMultInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -12394,7 +11893,7 @@ func (p projMultInt16ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12403,9 +11902,9 @@ func (p projMultInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -12421,9 +11920,9 @@ func (p projMultInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -12450,12 +11949,6 @@ type projMultInt16ConstIntervalOp struct { } func (p projMultInt16ConstIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12508,7 +12001,7 @@ func (p projMultInt16ConstIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12544,12 +12037,6 @@ type projMultInt32ConstInt16Op struct { } func (p projMultInt32ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12634,7 +12121,7 @@ func (p projMultInt32ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12702,12 +12189,6 @@ type projMultInt32ConstInt32Op struct { } func (p projMultInt32ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12792,7 +12273,7 @@ func (p projMultInt32ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12860,12 +12341,6 @@ type projMultInt32ConstInt64Op struct { } func (p projMultInt32ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12950,7 +12425,7 @@ func (p projMultInt32ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13018,12 +12493,6 @@ type projMultInt32ConstDecimalOp struct { } func (p projMultInt32ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13059,9 +12528,9 @@ func (p projMultInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -13080,9 +12549,9 @@ func (p projMultInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -13096,7 +12565,7 @@ func (p projMultInt32ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13105,9 +12574,9 @@ func (p projMultInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -13123,9 +12592,9 @@ func (p projMultInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -13152,12 +12621,6 @@ type projMultInt32ConstIntervalOp struct { } func (p projMultInt32ConstIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13210,7 +12673,7 @@ func (p projMultInt32ConstIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13246,12 +12709,6 @@ type projMultInt64ConstInt16Op struct { } func (p projMultInt64ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13336,7 +12793,7 @@ func (p projMultInt64ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13404,12 +12861,6 @@ type projMultInt64ConstInt32Op struct { } func (p projMultInt64ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13494,7 +12945,7 @@ func (p projMultInt64ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13562,12 +13013,6 @@ type projMultInt64ConstInt64Op struct { } func (p projMultInt64ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13652,7 +13097,7 @@ func (p projMultInt64ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13720,12 +13165,6 @@ type projMultInt64ConstDecimalOp struct { } func (p projMultInt64ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13761,9 +13200,9 @@ func (p projMultInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -13782,9 +13221,9 @@ func (p projMultInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -13798,7 +13237,7 @@ func (p projMultInt64ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13807,9 +13246,9 @@ func (p projMultInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -13825,9 +13264,9 @@ func (p projMultInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -13854,12 +13293,6 @@ type projMultInt64ConstIntervalOp struct { } func (p projMultInt64ConstIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13912,7 +13345,7 @@ func (p projMultInt64ConstIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13948,12 +13381,6 @@ type projMultFloat64ConstFloat64Op struct { } func (p projMultFloat64ConstFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14016,7 +13443,7 @@ func (p projMultFloat64ConstFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14062,12 +13489,6 @@ type projMultFloat64ConstIntervalOp struct { } func (p projMultFloat64ConstIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14120,7 +13541,7 @@ func (p projMultFloat64ConstIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14156,12 +13577,6 @@ type projMultIntervalConstInt16Op struct { } func (p projMultIntervalConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14214,7 +13629,7 @@ func (p projMultIntervalConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14250,12 +13665,6 @@ type projMultIntervalConstInt32Op struct { } func (p projMultIntervalConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14308,7 +13717,7 @@ func (p projMultIntervalConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14344,12 +13753,6 @@ type projMultIntervalConstInt64Op struct { } func (p projMultIntervalConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14402,7 +13805,7 @@ func (p projMultIntervalConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14438,12 +13841,6 @@ type projMultIntervalConstFloat64Op struct { } func (p projMultIntervalConstFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14496,7 +13893,7 @@ func (p projMultIntervalConstFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14532,12 +13929,6 @@ type projMultIntervalConstDecimalOp struct { } func (p projMultIntervalConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14600,7 +13991,7 @@ func (p projMultIntervalConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14646,12 +14037,6 @@ type projDivDecimalConstInt16Op struct { } func (p projDivDecimalConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14691,9 +14076,9 @@ func (p projDivDecimalConstInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14715,9 +14100,9 @@ func (p projDivDecimalConstInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14730,7 +14115,7 @@ func (p projDivDecimalConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14743,9 +14128,9 @@ func (p projDivDecimalConstInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14764,9 +14149,9 @@ func (p projDivDecimalConstInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14792,12 +14177,6 @@ type projDivDecimalConstInt32Op struct { } func (p projDivDecimalConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14837,9 +14216,9 @@ func (p projDivDecimalConstInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14861,9 +14240,9 @@ func (p projDivDecimalConstInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14876,7 +14255,7 @@ func (p projDivDecimalConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14889,9 +14268,9 @@ func (p projDivDecimalConstInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14910,9 +14289,9 @@ func (p projDivDecimalConstInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14938,12 +14317,6 @@ type projDivDecimalConstInt64Op struct { } func (p projDivDecimalConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14983,9 +14356,9 @@ func (p projDivDecimalConstInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15007,9 +14380,9 @@ func (p projDivDecimalConstInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15022,7 +14395,7 @@ func (p projDivDecimalConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15035,9 +14408,9 @@ func (p projDivDecimalConstInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15056,9 +14429,9 @@ func (p projDivDecimalConstInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15084,12 +14457,6 @@ type projDivDecimalConstDecimalOp struct { } func (p projDivDecimalConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15166,7 +14533,7 @@ func (p projDivDecimalConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15226,12 +14593,6 @@ type projDivInt16ConstInt16Op struct { } func (p projDivInt16ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15269,10 +14630,10 @@ func (p projDivInt16ConstInt16Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15292,10 +14653,10 @@ func (p projDivInt16ConstInt16Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15308,7 +14669,7 @@ func (p projDivInt16ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15319,10 +14680,10 @@ func (p projDivInt16ConstInt16Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15339,10 +14700,10 @@ func (p projDivInt16ConstInt16Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15368,12 +14729,6 @@ type projDivInt16ConstInt32Op struct { } func (p projDivInt16ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15411,10 +14766,10 @@ func (p projDivInt16ConstInt32Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15434,10 +14789,10 @@ func (p projDivInt16ConstInt32Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15450,7 +14805,7 @@ func (p projDivInt16ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15461,10 +14816,10 @@ func (p projDivInt16ConstInt32Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15481,10 +14836,10 @@ func (p projDivInt16ConstInt32Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15510,12 +14865,6 @@ type projDivInt16ConstInt64Op struct { } func (p projDivInt16ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15553,10 +14902,10 @@ func (p projDivInt16ConstInt64Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15576,10 +14925,10 @@ func (p projDivInt16ConstInt64Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15592,7 +14941,7 @@ func (p projDivInt16ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15603,10 +14952,10 @@ func (p projDivInt16ConstInt64Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15623,10 +14972,10 @@ func (p projDivInt16ConstInt64Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15652,12 +15001,6 @@ type projDivInt16ConstDecimalOp struct { } func (p projDivInt16ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15697,9 +15040,9 @@ func (p projDivInt16ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -15722,9 +15065,9 @@ func (p projDivInt16ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -15738,7 +15081,7 @@ func (p projDivInt16ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15751,9 +15094,9 @@ func (p projDivInt16ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -15773,9 +15116,9 @@ func (p projDivInt16ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -15802,12 +15145,6 @@ type projDivInt32ConstInt16Op struct { } func (p projDivInt32ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15845,10 +15182,10 @@ func (p projDivInt32ConstInt16Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15868,10 +15205,10 @@ func (p projDivInt32ConstInt16Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15884,7 +15221,7 @@ func (p projDivInt32ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15895,10 +15232,10 @@ func (p projDivInt32ConstInt16Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15915,10 +15252,10 @@ func (p projDivInt32ConstInt16Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15944,12 +15281,6 @@ type projDivInt32ConstInt32Op struct { } func (p projDivInt32ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15987,10 +15318,10 @@ func (p projDivInt32ConstInt32Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16010,10 +15341,10 @@ func (p projDivInt32ConstInt32Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16026,7 +15357,7 @@ func (p projDivInt32ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16037,10 +15368,10 @@ func (p projDivInt32ConstInt32Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16057,10 +15388,10 @@ func (p projDivInt32ConstInt32Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16086,12 +15417,6 @@ type projDivInt32ConstInt64Op struct { } func (p projDivInt32ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16129,10 +15454,10 @@ func (p projDivInt32ConstInt64Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16152,10 +15477,10 @@ func (p projDivInt32ConstInt64Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16168,7 +15493,7 @@ func (p projDivInt32ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16179,10 +15504,10 @@ func (p projDivInt32ConstInt64Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16199,10 +15524,10 @@ func (p projDivInt32ConstInt64Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16228,12 +15553,6 @@ type projDivInt32ConstDecimalOp struct { } func (p projDivInt32ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16273,9 +15592,9 @@ func (p projDivInt32ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -16298,9 +15617,9 @@ func (p projDivInt32ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -16314,7 +15633,7 @@ func (p projDivInt32ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16327,9 +15646,9 @@ func (p projDivInt32ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -16349,9 +15668,9 @@ func (p projDivInt32ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -16378,12 +15697,6 @@ type projDivInt64ConstInt16Op struct { } func (p projDivInt64ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16421,10 +15734,10 @@ func (p projDivInt64ConstInt16Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16444,10 +15757,10 @@ func (p projDivInt64ConstInt16Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16460,7 +15773,7 @@ func (p projDivInt64ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16471,10 +15784,10 @@ func (p projDivInt64ConstInt16Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16491,10 +15804,10 @@ func (p projDivInt64ConstInt16Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16520,12 +15833,6 @@ type projDivInt64ConstInt32Op struct { } func (p projDivInt64ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16563,10 +15870,10 @@ func (p projDivInt64ConstInt32Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16586,10 +15893,10 @@ func (p projDivInt64ConstInt32Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16602,7 +15909,7 @@ func (p projDivInt64ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16613,10 +15920,10 @@ func (p projDivInt64ConstInt32Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16633,10 +15940,10 @@ func (p projDivInt64ConstInt32Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16662,12 +15969,6 @@ type projDivInt64ConstInt64Op struct { } func (p projDivInt64ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16705,10 +16006,10 @@ func (p projDivInt64ConstInt64Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16728,10 +16029,10 @@ func (p projDivInt64ConstInt64Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16744,7 +16045,7 @@ func (p projDivInt64ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16755,10 +16056,10 @@ func (p projDivInt64ConstInt64Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16775,10 +16076,10 @@ func (p projDivInt64ConstInt64Op) Next() coldata.Batch { if int64(arg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16804,12 +16105,6 @@ type projDivInt64ConstDecimalOp struct { } func (p projDivInt64ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16849,9 +16144,9 @@ func (p projDivInt64ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -16874,9 +16169,9 @@ func (p projDivInt64ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -16890,7 +16185,7 @@ func (p projDivInt64ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16903,9 +16198,9 @@ func (p projDivInt64ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -16925,9 +16220,9 @@ func (p projDivInt64ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -16954,12 +16249,6 @@ type projDivFloat64ConstFloat64Op struct { } func (p projDivFloat64ConstFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17030,7 +16319,7 @@ func (p projDivFloat64ConstFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17084,12 +16373,6 @@ type projDivIntervalConstInt64Op struct { } func (p projDivIntervalConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17150,7 +16433,7 @@ func (p projDivIntervalConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17194,12 +16477,6 @@ type projDivIntervalConstFloat64Op struct { } func (p projDivIntervalConstFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17260,7 +16537,7 @@ func (p projDivIntervalConstFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17304,12 +16581,6 @@ type projFloorDivDecimalConstInt16Op struct { } func (p projFloorDivDecimalConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17349,9 +16620,9 @@ func (p projFloorDivDecimalConstInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17373,9 +16644,9 @@ func (p projFloorDivDecimalConstInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17388,7 +16659,7 @@ func (p projFloorDivDecimalConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17401,9 +16672,9 @@ func (p projFloorDivDecimalConstInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17422,9 +16693,9 @@ func (p projFloorDivDecimalConstInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17450,12 +16721,6 @@ type projFloorDivDecimalConstInt32Op struct { } func (p projFloorDivDecimalConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17495,9 +16760,9 @@ func (p projFloorDivDecimalConstInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17519,9 +16784,9 @@ func (p projFloorDivDecimalConstInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17534,7 +16799,7 @@ func (p projFloorDivDecimalConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17547,9 +16812,9 @@ func (p projFloorDivDecimalConstInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17568,9 +16833,9 @@ func (p projFloorDivDecimalConstInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17596,12 +16861,6 @@ type projFloorDivDecimalConstInt64Op struct { } func (p projFloorDivDecimalConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17641,9 +16900,9 @@ func (p projFloorDivDecimalConstInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17665,9 +16924,9 @@ func (p projFloorDivDecimalConstInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17680,7 +16939,7 @@ func (p projFloorDivDecimalConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17693,9 +16952,9 @@ func (p projFloorDivDecimalConstInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17714,9 +16973,9 @@ func (p projFloorDivDecimalConstInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17742,12 +17001,6 @@ type projFloorDivDecimalConstDecimalOp struct { } func (p projFloorDivDecimalConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17824,7 +17077,7 @@ func (p projFloorDivDecimalConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17884,12 +17137,6 @@ type projFloorDivInt16ConstInt16Op struct { } func (p projFloorDivInt16ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17956,7 +17203,7 @@ func (p projFloorDivInt16ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18006,12 +17253,6 @@ type projFloorDivInt16ConstInt32Op struct { } func (p projFloorDivInt16ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18078,7 +17319,7 @@ func (p projFloorDivInt16ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18128,12 +17369,6 @@ type projFloorDivInt16ConstInt64Op struct { } func (p projFloorDivInt16ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18200,7 +17435,7 @@ func (p projFloorDivInt16ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18250,12 +17485,6 @@ type projFloorDivInt16ConstDecimalOp struct { } func (p projFloorDivInt16ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18295,9 +17524,9 @@ func (p projFloorDivInt16ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -18320,9 +17549,9 @@ func (p projFloorDivInt16ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -18336,7 +17565,7 @@ func (p projFloorDivInt16ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18349,9 +17578,9 @@ func (p projFloorDivInt16ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -18371,9 +17600,9 @@ func (p projFloorDivInt16ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -18400,12 +17629,6 @@ type projFloorDivInt32ConstInt16Op struct { } func (p projFloorDivInt32ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18472,7 +17695,7 @@ func (p projFloorDivInt32ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18522,12 +17745,6 @@ type projFloorDivInt32ConstInt32Op struct { } func (p projFloorDivInt32ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18594,7 +17811,7 @@ func (p projFloorDivInt32ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18644,12 +17861,6 @@ type projFloorDivInt32ConstInt64Op struct { } func (p projFloorDivInt32ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18716,7 +17927,7 @@ func (p projFloorDivInt32ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18766,12 +17977,6 @@ type projFloorDivInt32ConstDecimalOp struct { } func (p projFloorDivInt32ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18811,9 +18016,9 @@ func (p projFloorDivInt32ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -18836,9 +18041,9 @@ func (p projFloorDivInt32ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -18852,7 +18057,7 @@ func (p projFloorDivInt32ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18865,9 +18070,9 @@ func (p projFloorDivInt32ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -18887,9 +18092,9 @@ func (p projFloorDivInt32ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -18916,12 +18121,6 @@ type projFloorDivInt64ConstInt16Op struct { } func (p projFloorDivInt64ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18988,7 +18187,7 @@ func (p projFloorDivInt64ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19038,12 +18237,6 @@ type projFloorDivInt64ConstInt32Op struct { } func (p projFloorDivInt64ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19110,7 +18303,7 @@ func (p projFloorDivInt64ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19160,12 +18353,6 @@ type projFloorDivInt64ConstInt64Op struct { } func (p projFloorDivInt64ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19232,7 +18419,7 @@ func (p projFloorDivInt64ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19282,12 +18469,6 @@ type projFloorDivInt64ConstDecimalOp struct { } func (p projFloorDivInt64ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19327,9 +18508,9 @@ func (p projFloorDivInt64ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -19352,9 +18533,9 @@ func (p projFloorDivInt64ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -19368,7 +18549,7 @@ func (p projFloorDivInt64ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19381,9 +18562,9 @@ func (p projFloorDivInt64ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -19403,9 +18584,9 @@ func (p projFloorDivInt64ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -19432,12 +18613,6 @@ type projFloorDivFloat64ConstFloat64Op struct { } func (p projFloorDivFloat64ConstFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19508,7 +18683,7 @@ func (p projFloorDivFloat64ConstFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19562,12 +18737,6 @@ type projModDecimalConstInt16Op struct { } func (p projModDecimalConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19607,9 +18776,9 @@ func (p projModDecimalConstInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19631,9 +18800,9 @@ func (p projModDecimalConstInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19646,7 +18815,7 @@ func (p projModDecimalConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19659,9 +18828,9 @@ func (p projModDecimalConstInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19680,9 +18849,9 @@ func (p projModDecimalConstInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19708,12 +18877,6 @@ type projModDecimalConstInt32Op struct { } func (p projModDecimalConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19753,9 +18916,9 @@ func (p projModDecimalConstInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19777,9 +18940,9 @@ func (p projModDecimalConstInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19792,7 +18955,7 @@ func (p projModDecimalConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19805,9 +18968,9 @@ func (p projModDecimalConstInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19826,9 +18989,9 @@ func (p projModDecimalConstInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19854,12 +19017,6 @@ type projModDecimalConstInt64Op struct { } func (p projModDecimalConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19899,9 +19056,9 @@ func (p projModDecimalConstInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19923,9 +19080,9 @@ func (p projModDecimalConstInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19938,7 +19095,7 @@ func (p projModDecimalConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19951,9 +19108,9 @@ func (p projModDecimalConstInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19972,9 +19129,9 @@ func (p projModDecimalConstInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -20000,12 +19157,6 @@ type projModDecimalConstDecimalOp struct { } func (p projModDecimalConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20082,7 +19233,7 @@ func (p projModDecimalConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20142,12 +19293,6 @@ type projModInt16ConstInt16Op struct { } func (p projModInt16ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20214,7 +19359,7 @@ func (p projModInt16ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20264,12 +19409,6 @@ type projModInt16ConstInt32Op struct { } func (p projModInt16ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20336,7 +19475,7 @@ func (p projModInt16ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20386,12 +19525,6 @@ type projModInt16ConstInt64Op struct { } func (p projModInt16ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20458,7 +19591,7 @@ func (p projModInt16ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20508,12 +19641,6 @@ type projModInt16ConstDecimalOp struct { } func (p projModInt16ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20553,9 +19680,9 @@ func (p projModInt16ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -20578,9 +19705,9 @@ func (p projModInt16ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -20594,7 +19721,7 @@ func (p projModInt16ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20607,9 +19734,9 @@ func (p projModInt16ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -20629,9 +19756,9 @@ func (p projModInt16ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -20658,12 +19785,6 @@ type projModInt32ConstInt16Op struct { } func (p projModInt32ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20730,7 +19851,7 @@ func (p projModInt32ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20780,12 +19901,6 @@ type projModInt32ConstInt32Op struct { } func (p projModInt32ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20852,7 +19967,7 @@ func (p projModInt32ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20902,12 +20017,6 @@ type projModInt32ConstInt64Op struct { } func (p projModInt32ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20974,7 +20083,7 @@ func (p projModInt32ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21024,12 +20133,6 @@ type projModInt32ConstDecimalOp struct { } func (p projModInt32ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21069,9 +20172,9 @@ func (p projModInt32ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -21094,9 +20197,9 @@ func (p projModInt32ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -21110,7 +20213,7 @@ func (p projModInt32ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21123,9 +20226,9 @@ func (p projModInt32ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -21145,9 +20248,9 @@ func (p projModInt32ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -21174,12 +20277,6 @@ type projModInt64ConstInt16Op struct { } func (p projModInt64ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21246,7 +20343,7 @@ func (p projModInt64ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21296,12 +20393,6 @@ type projModInt64ConstInt32Op struct { } func (p projModInt64ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21368,7 +20459,7 @@ func (p projModInt64ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21418,12 +20509,6 @@ type projModInt64ConstInt64Op struct { } func (p projModInt64ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21490,7 +20575,7 @@ func (p projModInt64ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21540,12 +20625,6 @@ type projModInt64ConstDecimalOp struct { } func (p projModInt64ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21585,9 +20664,9 @@ func (p projModInt64ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -21610,9 +20689,9 @@ func (p projModInt64ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -21626,7 +20705,7 @@ func (p projModInt64ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21639,9 +20718,9 @@ func (p projModInt64ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -21661,9 +20740,9 @@ func (p projModInt64ConstDecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -21690,12 +20769,6 @@ type projModFloat64ConstFloat64Op struct { } func (p projModFloat64ConstFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21766,7 +20839,7 @@ func (p projModFloat64ConstFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21820,12 +20893,6 @@ type projPowDecimalConstInt16Op struct { } func (p projPowDecimalConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21861,9 +20928,9 @@ func (p projPowDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21881,9 +20948,9 @@ func (p projPowDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21896,7 +20963,7 @@ func (p projPowDecimalConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21905,9 +20972,9 @@ func (p projPowDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21922,9 +20989,9 @@ func (p projPowDecimalConstInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21950,12 +21017,6 @@ type projPowDecimalConstInt32Op struct { } func (p projPowDecimalConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21991,9 +21052,9 @@ func (p projPowDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22011,9 +21072,9 @@ func (p projPowDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22026,7 +21087,7 @@ func (p projPowDecimalConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22035,9 +21096,9 @@ func (p projPowDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22052,9 +21113,9 @@ func (p projPowDecimalConstInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22080,12 +21141,6 @@ type projPowDecimalConstInt64Op struct { } func (p projPowDecimalConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22121,9 +21176,9 @@ func (p projPowDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22141,9 +21196,9 @@ func (p projPowDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22156,7 +21211,7 @@ func (p projPowDecimalConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22165,9 +21220,9 @@ func (p projPowDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22182,9 +21237,9 @@ func (p projPowDecimalConstInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &p.constArg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22210,12 +21265,6 @@ type projPowDecimalConstDecimalOp struct { } func (p projPowDecimalConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22284,7 +21333,7 @@ func (p projPowDecimalConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22336,12 +21385,6 @@ type projPowInt16ConstInt16Op struct { } func (p projPowInt16ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22376,10 +21419,10 @@ func (p projPowInt16ConstInt16Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22401,10 +21444,10 @@ func (p projPowInt16ConstInt16Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22422,7 +21465,7 @@ func (p projPowInt16ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22430,10 +21473,10 @@ func (p projPowInt16ConstInt16Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22452,10 +21495,10 @@ func (p projPowInt16ConstInt16Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22486,12 +21529,6 @@ type projPowInt16ConstInt32Op struct { } func (p projPowInt16ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22526,10 +21563,10 @@ func (p projPowInt16ConstInt32Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22551,10 +21588,10 @@ func (p projPowInt16ConstInt32Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22572,7 +21609,7 @@ func (p projPowInt16ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22580,10 +21617,10 @@ func (p projPowInt16ConstInt32Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22602,10 +21639,10 @@ func (p projPowInt16ConstInt32Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22636,12 +21673,6 @@ type projPowInt16ConstInt64Op struct { } func (p projPowInt16ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22676,10 +21707,10 @@ func (p projPowInt16ConstInt64Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22701,10 +21732,10 @@ func (p projPowInt16ConstInt64Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22722,7 +21753,7 @@ func (p projPowInt16ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22730,10 +21761,10 @@ func (p projPowInt16ConstInt64Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22752,10 +21783,10 @@ func (p projPowInt16ConstInt64Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22786,12 +21817,6 @@ type projPowInt16ConstDecimalOp struct { } func (p projPowInt16ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22827,9 +21852,9 @@ func (p projPowInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -22848,9 +21873,9 @@ func (p projPowInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -22864,7 +21889,7 @@ func (p projPowInt16ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22873,9 +21898,9 @@ func (p projPowInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -22891,9 +21916,9 @@ func (p projPowInt16ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -22920,12 +21945,6 @@ type projPowInt32ConstInt16Op struct { } func (p projPowInt32ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22960,10 +21979,10 @@ func (p projPowInt32ConstInt16Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22985,10 +22004,10 @@ func (p projPowInt32ConstInt16Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23006,7 +22025,7 @@ func (p projPowInt32ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23014,10 +22033,10 @@ func (p projPowInt32ConstInt16Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23036,10 +22055,10 @@ func (p projPowInt32ConstInt16Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23070,12 +22089,6 @@ type projPowInt32ConstInt32Op struct { } func (p projPowInt32ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23110,10 +22123,10 @@ func (p projPowInt32ConstInt32Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23135,10 +22148,10 @@ func (p projPowInt32ConstInt32Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23156,7 +22169,7 @@ func (p projPowInt32ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23164,10 +22177,10 @@ func (p projPowInt32ConstInt32Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23186,10 +22199,10 @@ func (p projPowInt32ConstInt32Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23220,12 +22233,6 @@ type projPowInt32ConstInt64Op struct { } func (p projPowInt32ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23260,10 +22267,10 @@ func (p projPowInt32ConstInt64Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23285,10 +22292,10 @@ func (p projPowInt32ConstInt64Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23306,7 +22313,7 @@ func (p projPowInt32ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23314,10 +22321,10 @@ func (p projPowInt32ConstInt64Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23336,10 +22343,10 @@ func (p projPowInt32ConstInt64Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23370,12 +22377,6 @@ type projPowInt32ConstDecimalOp struct { } func (p projPowInt32ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23411,9 +22412,9 @@ func (p projPowInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -23432,9 +22433,9 @@ func (p projPowInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -23448,7 +22449,7 @@ func (p projPowInt32ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23457,9 +22458,9 @@ func (p projPowInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -23475,9 +22476,9 @@ func (p projPowInt32ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -23504,12 +22505,6 @@ type projPowInt64ConstInt16Op struct { } func (p projPowInt64ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23544,10 +22539,10 @@ func (p projPowInt64ConstInt16Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23569,10 +22564,10 @@ func (p projPowInt64ConstInt16Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23590,7 +22585,7 @@ func (p projPowInt64ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23598,10 +22593,10 @@ func (p projPowInt64ConstInt16Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23620,10 +22615,10 @@ func (p projPowInt64ConstInt16Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23654,12 +22649,6 @@ type projPowInt64ConstInt32Op struct { } func (p projPowInt64ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23694,10 +22683,10 @@ func (p projPowInt64ConstInt32Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23719,10 +22708,10 @@ func (p projPowInt64ConstInt32Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23740,7 +22729,7 @@ func (p projPowInt64ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23748,10 +22737,10 @@ func (p projPowInt64ConstInt32Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23770,10 +22759,10 @@ func (p projPowInt64ConstInt32Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23804,12 +22793,6 @@ type projPowInt64ConstInt64Op struct { } func (p projPowInt64ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23844,10 +22827,10 @@ func (p projPowInt64ConstInt64Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23869,10 +22852,10 @@ func (p projPowInt64ConstInt64Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23890,7 +22873,7 @@ func (p projPowInt64ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23898,10 +22881,10 @@ func (p projPowInt64ConstInt64Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23920,10 +22903,10 @@ func (p projPowInt64ConstInt64Op) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(p.constArg))) rightTmpDec.SetInt64(int64(int64(arg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23954,12 +22937,6 @@ type projPowInt64ConstDecimalOp struct { } func (p projPowInt64ConstDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23995,9 +22972,9 @@ func (p projPowInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -24016,9 +22993,9 @@ func (p projPowInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -24032,7 +23009,7 @@ func (p projPowInt64ConstDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24041,9 +23018,9 @@ func (p projPowInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -24059,9 +23036,9 @@ func (p projPowInt64ConstDecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg) if err != nil { colexecerror.ExpectedError(err) } @@ -24088,12 +23065,6 @@ type projPowFloat64ConstFloat64Op struct { } func (p projPowFloat64ConstFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24156,7 +23127,7 @@ func (p projPowFloat64ConstFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24202,12 +23173,6 @@ type projConcatBytesConstBytesOp struct { } func (p projConcatBytesConstBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24273,7 +23238,7 @@ func (p projConcatBytesConstBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24322,12 +23287,6 @@ type projConcatJSONConstJSONOp struct { } func (p projConcatJSONConstJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24391,7 +23350,7 @@ func (p projConcatJSONConstJSONOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24434,16 +23393,15 @@ func (p projConcatJSONConstJSONOp) Next() coldata.Batch { type projConcatDatumConstDatumOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projConcatDatumConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24513,7 +23471,7 @@ func (p projConcatDatumConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24566,12 +23524,6 @@ type projLShiftInt16ConstInt16Op struct { } func (p projLShiftInt16ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24640,7 +23592,7 @@ func (p projLShiftInt16ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24692,12 +23644,6 @@ type projLShiftInt16ConstInt32Op struct { } func (p projLShiftInt16ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24766,7 +23712,7 @@ func (p projLShiftInt16ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24818,12 +23764,6 @@ type projLShiftInt16ConstInt64Op struct { } func (p projLShiftInt16ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24892,7 +23832,7 @@ func (p projLShiftInt16ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24944,12 +23884,6 @@ type projLShiftInt32ConstInt16Op struct { } func (p projLShiftInt32ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25018,7 +23952,7 @@ func (p projLShiftInt32ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25070,12 +24004,6 @@ type projLShiftInt32ConstInt32Op struct { } func (p projLShiftInt32ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25144,7 +24072,7 @@ func (p projLShiftInt32ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25196,12 +24124,6 @@ type projLShiftInt32ConstInt64Op struct { } func (p projLShiftInt32ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25270,7 +24192,7 @@ func (p projLShiftInt32ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25322,12 +24244,6 @@ type projLShiftInt64ConstInt16Op struct { } func (p projLShiftInt64ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25396,7 +24312,7 @@ func (p projLShiftInt64ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25448,12 +24364,6 @@ type projLShiftInt64ConstInt32Op struct { } func (p projLShiftInt64ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25522,7 +24432,7 @@ func (p projLShiftInt64ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25574,12 +24484,6 @@ type projLShiftInt64ConstInt64Op struct { } func (p projLShiftInt64ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25648,7 +24552,7 @@ func (p projLShiftInt64ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25696,16 +24600,15 @@ func (p projLShiftInt64ConstInt64Op) Next() coldata.Batch { type projLShiftDatumConstInt16Op struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projLShiftDatumConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25783,7 +24686,7 @@ func (p projLShiftDatumConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25840,16 +24743,15 @@ func (p projLShiftDatumConstInt16Op) Next() coldata.Batch { type projLShiftDatumConstInt32Op struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projLShiftDatumConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25927,7 +24829,7 @@ func (p projLShiftDatumConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25984,16 +24886,15 @@ func (p projLShiftDatumConstInt32Op) Next() coldata.Batch { type projLShiftDatumConstInt64Op struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projLShiftDatumConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26071,7 +24972,7 @@ func (p projLShiftDatumConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26132,12 +25033,6 @@ type projRShiftInt16ConstInt16Op struct { } func (p projRShiftInt16ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26206,7 +25101,7 @@ func (p projRShiftInt16ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26258,12 +25153,6 @@ type projRShiftInt16ConstInt32Op struct { } func (p projRShiftInt16ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26332,7 +25221,7 @@ func (p projRShiftInt16ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26384,12 +25273,6 @@ type projRShiftInt16ConstInt64Op struct { } func (p projRShiftInt16ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26458,7 +25341,7 @@ func (p projRShiftInt16ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26510,12 +25393,6 @@ type projRShiftInt32ConstInt16Op struct { } func (p projRShiftInt32ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26584,7 +25461,7 @@ func (p projRShiftInt32ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26636,12 +25513,6 @@ type projRShiftInt32ConstInt32Op struct { } func (p projRShiftInt32ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26710,7 +25581,7 @@ func (p projRShiftInt32ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26762,12 +25633,6 @@ type projRShiftInt32ConstInt64Op struct { } func (p projRShiftInt32ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26836,7 +25701,7 @@ func (p projRShiftInt32ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26888,12 +25753,6 @@ type projRShiftInt64ConstInt16Op struct { } func (p projRShiftInt64ConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26962,7 +25821,7 @@ func (p projRShiftInt64ConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27014,12 +25873,6 @@ type projRShiftInt64ConstInt32Op struct { } func (p projRShiftInt64ConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27088,7 +25941,7 @@ func (p projRShiftInt64ConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27140,12 +25993,6 @@ type projRShiftInt64ConstInt64Op struct { } func (p projRShiftInt64ConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27214,7 +26061,7 @@ func (p projRShiftInt64ConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27262,16 +26109,15 @@ func (p projRShiftInt64ConstInt64Op) Next() coldata.Batch { type projRShiftDatumConstInt16Op struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projRShiftDatumConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27349,7 +26195,7 @@ func (p projRShiftDatumConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27406,16 +26252,15 @@ func (p projRShiftDatumConstInt16Op) Next() coldata.Batch { type projRShiftDatumConstInt32Op struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projRShiftDatumConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27493,7 +26338,7 @@ func (p projRShiftDatumConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27550,16 +26395,15 @@ func (p projRShiftDatumConstInt32Op) Next() coldata.Batch { type projRShiftDatumConstInt64Op struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projRShiftDatumConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27637,7 +26481,7 @@ func (p projRShiftDatumConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27698,12 +26542,6 @@ type projJSONFetchValJSONConstBytesOp struct { } func (p projJSONFetchValJSONConstBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27779,7 +26617,7 @@ func (p projJSONFetchValJSONConstBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27838,12 +26676,6 @@ type projJSONFetchValJSONConstInt16Op struct { } func (p projJSONFetchValJSONConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27913,7 +26745,7 @@ func (p projJSONFetchValJSONConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27966,12 +26798,6 @@ type projJSONFetchValJSONConstInt32Op struct { } func (p projJSONFetchValJSONConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28041,7 +26867,7 @@ func (p projJSONFetchValJSONConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28094,12 +26920,6 @@ type projJSONFetchValJSONConstInt64Op struct { } func (p projJSONFetchValJSONConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28169,7 +26989,7 @@ func (p projJSONFetchValJSONConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28222,12 +27042,6 @@ type projJSONFetchTextJSONConstBytesOp struct { } func (p projJSONFetchTextJSONConstBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28321,7 +27135,7 @@ func (p projJSONFetchTextJSONConstBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28398,12 +27212,6 @@ type projJSONFetchTextJSONConstInt16Op struct { } func (p projJSONFetchTextJSONConstInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28491,7 +27299,7 @@ func (p projJSONFetchTextJSONConstInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28562,12 +27370,6 @@ type projJSONFetchTextJSONConstInt32Op struct { } func (p projJSONFetchTextJSONConstInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28655,7 +27457,7 @@ func (p projJSONFetchTextJSONConstInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28726,12 +27528,6 @@ type projJSONFetchTextJSONConstInt64Op struct { } func (p projJSONFetchTextJSONConstInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28819,7 +27615,7 @@ func (p projJSONFetchTextJSONConstInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28890,12 +27686,6 @@ type projJSONFetchValPathJSONConstDatumOp struct { } func (p projJSONFetchValPathJSONConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28965,7 +27755,7 @@ func (p projJSONFetchValPathJSONConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29018,12 +27808,6 @@ type projJSONFetchTextPathJSONConstDatumOp struct { } func (p projJSONFetchTextPathJSONConstDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29113,7 +27897,7 @@ func (p projJSONFetchTextPathJSONConstDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29202,7 +27986,6 @@ func GetProjectionLConstOperator( allocator: allocator, colIdx: colIdx, outputIdx: outputIdx, - overloadHelper: execgen.OverloadHelper{BinFn: binFn, EvalCtx: evalCtx}, } c := colconv.GetDatumToPhysicalFn(constType)(constArg) leftType, rightType := constType, inputTypes[colIdx] @@ -29218,21 +28001,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitandInt16ConstInt16Op{ + op := &projBitandInt16ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projBitandInt16ConstInt32Op{ + op := &projBitandInt16ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case -1: default: - return &projBitandInt16ConstInt64Op{ + op := &projBitandInt16ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } } case 32: @@ -29240,21 +28026,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitandInt32ConstInt16Op{ + op := &projBitandInt32ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case 32: - return &projBitandInt32ConstInt32Op{ + op := &projBitandInt32ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projBitandInt32ConstInt64Op{ + op := &projBitandInt32ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } } case -1: @@ -29263,21 +28052,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitandInt64ConstInt16Op{ + op := &projBitandInt64ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case 32: - return &projBitandInt64ConstInt32Op{ + op := &projBitandInt64ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case -1: default: - return &projBitandInt64ConstInt64Op{ + op := &projBitandInt64ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -29290,10 +28082,12 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projBitandDatumConstDatumOp{ + op := &projBitandDatumConstDatumOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -29307,21 +28101,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitorInt16ConstInt16Op{ + op := &projBitorInt16ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projBitorInt16ConstInt32Op{ + op := &projBitorInt16ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case -1: default: - return &projBitorInt16ConstInt64Op{ + op := &projBitorInt16ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } } case 32: @@ -29329,21 +28126,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitorInt32ConstInt16Op{ + op := &projBitorInt32ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case 32: - return &projBitorInt32ConstInt32Op{ + op := &projBitorInt32ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projBitorInt32ConstInt64Op{ + op := &projBitorInt32ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } } case -1: @@ -29352,21 +28152,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitorInt64ConstInt16Op{ + op := &projBitorInt64ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case 32: - return &projBitorInt64ConstInt32Op{ + op := &projBitorInt64ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case -1: default: - return &projBitorInt64ConstInt64Op{ + op := &projBitorInt64ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -29379,10 +28182,12 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projBitorDatumConstDatumOp{ + op := &projBitorDatumConstDatumOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -29396,21 +28201,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitxorInt16ConstInt16Op{ + op := &projBitxorInt16ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projBitxorInt16ConstInt32Op{ + op := &projBitxorInt16ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case -1: default: - return &projBitxorInt16ConstInt64Op{ + op := &projBitxorInt16ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } } case 32: @@ -29418,21 +28226,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitxorInt32ConstInt16Op{ + op := &projBitxorInt32ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case 32: - return &projBitxorInt32ConstInt32Op{ + op := &projBitxorInt32ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projBitxorInt32ConstInt64Op{ + op := &projBitxorInt32ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } } case -1: @@ -29441,21 +28252,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitxorInt64ConstInt16Op{ + op := &projBitxorInt64ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case 32: - return &projBitxorInt64ConstInt32Op{ + op := &projBitxorInt64ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case -1: default: - return &projBitxorInt64ConstInt64Op{ + op := &projBitxorInt64ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -29468,10 +28282,12 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projBitxorDatumConstDatumOp{ + op := &projBitxorDatumConstDatumOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -29486,30 +28302,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusDecimalConstInt16Op{ + op := &projPlusDecimalConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case 32: - return &projPlusDecimalConstInt32Op{ + op := &projPlusDecimalConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case -1: default: - return &projPlusDecimalConstInt64Op{ + op := &projPlusDecimalConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPlusDecimalConstDecimalOp{ + op := &projPlusDecimalConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -29520,39 +28340,45 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusInt16ConstInt16Op{ + op := &projPlusInt16ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projPlusInt16ConstInt32Op{ + op := &projPlusInt16ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case -1: default: - return &projPlusInt16ConstInt64Op{ + op := &projPlusInt16ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPlusInt16ConstDecimalOp{ + op := &projPlusInt16ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projPlusInt16ConstDatumOp{ + op := &projPlusInt16ConstDatumOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } case 32: @@ -29560,39 +28386,45 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusInt32ConstInt16Op{ + op := &projPlusInt32ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case 32: - return &projPlusInt32ConstInt32Op{ + op := &projPlusInt32ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projPlusInt32ConstInt64Op{ + op := &projPlusInt32ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPlusInt32ConstDecimalOp{ + op := &projPlusInt32ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projPlusInt32ConstDatumOp{ + op := &projPlusInt32ConstDatumOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } case -1: @@ -29601,39 +28433,45 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusInt64ConstInt16Op{ + op := &projPlusInt64ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case 32: - return &projPlusInt64ConstInt32Op{ + op := &projPlusInt64ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case -1: default: - return &projPlusInt64ConstInt64Op{ + op := &projPlusInt64ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPlusInt64ConstDecimalOp{ + op := &projPlusInt64ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projPlusInt64ConstDatumOp{ + op := &projPlusInt64ConstDatumOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -29646,10 +28484,11 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projPlusFloat64ConstFloat64Op{ + op := &projPlusFloat64ConstFloat64Op{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -29662,10 +28501,11 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projPlusTimestampConstIntervalOp{ + op := &projPlusTimestampConstIntervalOp{ projConstOpBase: projConstOpBase, constArg: c.(time.Time), - }, nil + } + return op, nil } } } @@ -29678,28 +28518,32 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projPlusIntervalConstTimestampOp{ + op := &projPlusIntervalConstTimestampOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projPlusIntervalConstIntervalOp{ + op := &projPlusIntervalConstIntervalOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projPlusIntervalConstDatumOp{ + op := &projPlusIntervalConstDatumOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -29712,29 +28556,37 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projPlusDatumConstIntervalOp{ + op := &projPlusDatumConstIntervalOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusDatumConstInt16Op{ + op := &projPlusDatumConstInt16Op{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case 32: - return &projPlusDatumConstInt32Op{ + op := &projPlusDatumConstInt32Op{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case -1: default: - return &projPlusDatumConstInt64Op{ + op := &projPlusDatumConstInt64Op{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -29749,30 +28601,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusDecimalConstInt16Op{ + op := &projMinusDecimalConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case 32: - return &projMinusDecimalConstInt32Op{ + op := &projMinusDecimalConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case -1: default: - return &projMinusDecimalConstInt64Op{ + op := &projMinusDecimalConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMinusDecimalConstDecimalOp{ + op := &projMinusDecimalConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -29783,39 +28639,45 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusInt16ConstInt16Op{ + op := &projMinusInt16ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projMinusInt16ConstInt32Op{ + op := &projMinusInt16ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case -1: default: - return &projMinusInt16ConstInt64Op{ + op := &projMinusInt16ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMinusInt16ConstDecimalOp{ + op := &projMinusInt16ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projMinusInt16ConstDatumOp{ + op := &projMinusInt16ConstDatumOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } case 32: @@ -29823,39 +28685,45 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusInt32ConstInt16Op{ + op := &projMinusInt32ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case 32: - return &projMinusInt32ConstInt32Op{ + op := &projMinusInt32ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projMinusInt32ConstInt64Op{ + op := &projMinusInt32ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMinusInt32ConstDecimalOp{ + op := &projMinusInt32ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projMinusInt32ConstDatumOp{ + op := &projMinusInt32ConstDatumOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } case -1: @@ -29864,39 +28732,45 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusInt64ConstInt16Op{ + op := &projMinusInt64ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case 32: - return &projMinusInt64ConstInt32Op{ + op := &projMinusInt64ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case -1: default: - return &projMinusInt64ConstInt64Op{ + op := &projMinusInt64ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMinusInt64ConstDecimalOp{ + op := &projMinusInt64ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projMinusInt64ConstDatumOp{ + op := &projMinusInt64ConstDatumOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -29909,10 +28783,11 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projMinusFloat64ConstFloat64Op{ + op := &projMinusFloat64ConstFloat64Op{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -29925,19 +28800,21 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projMinusTimestampConstTimestampOp{ + op := &projMinusTimestampConstTimestampOp{ projConstOpBase: projConstOpBase, constArg: c.(time.Time), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMinusTimestampConstIntervalOp{ + op := &projMinusTimestampConstIntervalOp{ projConstOpBase: projConstOpBase, constArg: c.(time.Time), - }, nil + } + return op, nil } } } @@ -29950,19 +28827,22 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projMinusIntervalConstIntervalOp{ + op := &projMinusIntervalConstIntervalOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projMinusIntervalConstDatumOp{ + op := &projMinusIntervalConstDatumOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -29975,29 +28855,33 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projMinusJSONConstBytesOp{ + op := &projMinusJSONConstBytesOp{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusJSONConstInt16Op{ + op := &projMinusJSONConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil case 32: - return &projMinusJSONConstInt32Op{ + op := &projMinusJSONConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil case -1: default: - return &projMinusJSONConstInt64Op{ + op := &projMinusJSONConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil } } } @@ -30010,47 +28894,59 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projMinusDatumConstDatumOp{ + op := &projMinusDatumConstDatumOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMinusDatumConstIntervalOp{ + op := &projMinusDatumConstIntervalOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } case types.BytesFamily: switch rightType.Width() { case -1: default: - return &projMinusDatumConstBytesOp{ + op := &projMinusDatumConstBytesOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusDatumConstInt16Op{ + op := &projMinusDatumConstInt16Op{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case 32: - return &projMinusDatumConstInt32Op{ + op := &projMinusDatumConstInt32Op{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case -1: default: - return &projMinusDatumConstInt64Op{ + op := &projMinusDatumConstInt64Op{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -30065,39 +28961,44 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultDecimalConstInt16Op{ + op := &projMultDecimalConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case 32: - return &projMultDecimalConstInt32Op{ + op := &projMultDecimalConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case -1: default: - return &projMultDecimalConstInt64Op{ + op := &projMultDecimalConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultDecimalConstDecimalOp{ + op := &projMultDecimalConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultDecimalConstIntervalOp{ + op := &projMultDecimalConstIntervalOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -30108,39 +29009,44 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultInt16ConstInt16Op{ + op := &projMultInt16ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projMultInt16ConstInt32Op{ + op := &projMultInt16ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case -1: default: - return &projMultInt16ConstInt64Op{ + op := &projMultInt16ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultInt16ConstDecimalOp{ + op := &projMultInt16ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultInt16ConstIntervalOp{ + op := &projMultInt16ConstIntervalOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } } case 32: @@ -30148,39 +29054,44 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultInt32ConstInt16Op{ + op := &projMultInt32ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case 32: - return &projMultInt32ConstInt32Op{ + op := &projMultInt32ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projMultInt32ConstInt64Op{ + op := &projMultInt32ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultInt32ConstDecimalOp{ + op := &projMultInt32ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultInt32ConstIntervalOp{ + op := &projMultInt32ConstIntervalOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } } case -1: @@ -30189,39 +29100,44 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultInt64ConstInt16Op{ + op := &projMultInt64ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case 32: - return &projMultInt64ConstInt32Op{ + op := &projMultInt64ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case -1: default: - return &projMultInt64ConstInt64Op{ + op := &projMultInt64ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultInt64ConstDecimalOp{ + op := &projMultInt64ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultInt64ConstIntervalOp{ + op := &projMultInt64ConstIntervalOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -30234,19 +29150,21 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projMultFloat64ConstFloat64Op{ + op := &projMultFloat64ConstFloat64Op{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultFloat64ConstIntervalOp{ + op := &projMultFloat64ConstIntervalOp{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -30258,39 +29176,44 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultIntervalConstInt16Op{ + op := &projMultIntervalConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil case 32: - return &projMultIntervalConstInt32Op{ + op := &projMultIntervalConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil case -1: default: - return &projMultIntervalConstInt64Op{ + op := &projMultIntervalConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } case types.FloatFamily: switch rightType.Width() { case -1: default: - return &projMultIntervalConstFloat64Op{ + op := &projMultIntervalConstFloat64Op{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultIntervalConstDecimalOp{ + op := &projMultIntervalConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } } } @@ -30305,30 +29228,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projDivDecimalConstInt16Op{ + op := &projDivDecimalConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case 32: - return &projDivDecimalConstInt32Op{ + op := &projDivDecimalConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case -1: default: - return &projDivDecimalConstInt64Op{ + op := &projDivDecimalConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projDivDecimalConstDecimalOp{ + op := &projDivDecimalConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -30339,30 +29266,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projDivInt16ConstInt16Op{ + op := &projDivInt16ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projDivInt16ConstInt32Op{ + op := &projDivInt16ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case -1: default: - return &projDivInt16ConstInt64Op{ + op := &projDivInt16ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projDivInt16ConstDecimalOp{ + op := &projDivInt16ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } } case 32: @@ -30370,30 +29301,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projDivInt32ConstInt16Op{ + op := &projDivInt32ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case 32: - return &projDivInt32ConstInt32Op{ + op := &projDivInt32ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projDivInt32ConstInt64Op{ + op := &projDivInt32ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projDivInt32ConstDecimalOp{ + op := &projDivInt32ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } } case -1: @@ -30402,30 +29337,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projDivInt64ConstInt16Op{ + op := &projDivInt64ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case 32: - return &projDivInt64ConstInt32Op{ + op := &projDivInt64ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case -1: default: - return &projDivInt64ConstInt64Op{ + op := &projDivInt64ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projDivInt64ConstDecimalOp{ + op := &projDivInt64ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -30438,10 +29377,11 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projDivFloat64ConstFloat64Op{ + op := &projDivFloat64ConstFloat64Op{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -30454,19 +29394,21 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projDivIntervalConstInt64Op{ + op := &projDivIntervalConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } case types.FloatFamily: switch rightType.Width() { case -1: default: - return &projDivIntervalConstFloat64Op{ + op := &projDivIntervalConstFloat64Op{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } } } @@ -30481,30 +29423,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projFloorDivDecimalConstInt16Op{ + op := &projFloorDivDecimalConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case 32: - return &projFloorDivDecimalConstInt32Op{ + op := &projFloorDivDecimalConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case -1: default: - return &projFloorDivDecimalConstInt64Op{ + op := &projFloorDivDecimalConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projFloorDivDecimalConstDecimalOp{ + op := &projFloorDivDecimalConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -30515,30 +29461,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projFloorDivInt16ConstInt16Op{ + op := &projFloorDivInt16ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projFloorDivInt16ConstInt32Op{ + op := &projFloorDivInt16ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case -1: default: - return &projFloorDivInt16ConstInt64Op{ + op := &projFloorDivInt16ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projFloorDivInt16ConstDecimalOp{ + op := &projFloorDivInt16ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } } case 32: @@ -30546,30 +29496,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projFloorDivInt32ConstInt16Op{ + op := &projFloorDivInt32ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case 32: - return &projFloorDivInt32ConstInt32Op{ + op := &projFloorDivInt32ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projFloorDivInt32ConstInt64Op{ + op := &projFloorDivInt32ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projFloorDivInt32ConstDecimalOp{ + op := &projFloorDivInt32ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } } case -1: @@ -30578,30 +29532,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projFloorDivInt64ConstInt16Op{ + op := &projFloorDivInt64ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case 32: - return &projFloorDivInt64ConstInt32Op{ + op := &projFloorDivInt64ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case -1: default: - return &projFloorDivInt64ConstInt64Op{ + op := &projFloorDivInt64ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projFloorDivInt64ConstDecimalOp{ + op := &projFloorDivInt64ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -30614,10 +29572,11 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projFloorDivFloat64ConstFloat64Op{ + op := &projFloorDivFloat64ConstFloat64Op{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -30632,30 +29591,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projModDecimalConstInt16Op{ + op := &projModDecimalConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case 32: - return &projModDecimalConstInt32Op{ + op := &projModDecimalConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case -1: default: - return &projModDecimalConstInt64Op{ + op := &projModDecimalConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projModDecimalConstDecimalOp{ + op := &projModDecimalConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -30666,30 +29629,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projModInt16ConstInt16Op{ + op := &projModInt16ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projModInt16ConstInt32Op{ + op := &projModInt16ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case -1: default: - return &projModInt16ConstInt64Op{ + op := &projModInt16ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projModInt16ConstDecimalOp{ + op := &projModInt16ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } } case 32: @@ -30697,30 +29664,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projModInt32ConstInt16Op{ + op := &projModInt32ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case 32: - return &projModInt32ConstInt32Op{ + op := &projModInt32ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projModInt32ConstInt64Op{ + op := &projModInt32ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projModInt32ConstDecimalOp{ + op := &projModInt32ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } } case -1: @@ -30729,30 +29700,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projModInt64ConstInt16Op{ + op := &projModInt64ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case 32: - return &projModInt64ConstInt32Op{ + op := &projModInt64ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case -1: default: - return &projModInt64ConstInt64Op{ + op := &projModInt64ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projModInt64ConstDecimalOp{ + op := &projModInt64ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -30765,10 +29740,11 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projModFloat64ConstFloat64Op{ + op := &projModFloat64ConstFloat64Op{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -30783,30 +29759,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPowDecimalConstInt16Op{ + op := &projPowDecimalConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case 32: - return &projPowDecimalConstInt32Op{ + op := &projPowDecimalConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil case -1: default: - return &projPowDecimalConstInt64Op{ + op := &projPowDecimalConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPowDecimalConstDecimalOp{ + op := &projPowDecimalConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -30817,30 +29797,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPowInt16ConstInt16Op{ + op := &projPowInt16ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projPowInt16ConstInt32Op{ + op := &projPowInt16ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case -1: default: - return &projPowInt16ConstInt64Op{ + op := &projPowInt16ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPowInt16ConstDecimalOp{ + op := &projPowInt16ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } } case 32: @@ -30848,30 +29832,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPowInt32ConstInt16Op{ + op := &projPowInt32ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case 32: - return &projPowInt32ConstInt32Op{ + op := &projPowInt32ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projPowInt32ConstInt64Op{ + op := &projPowInt32ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPowInt32ConstDecimalOp{ + op := &projPowInt32ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } } case -1: @@ -30880,30 +29868,34 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPowInt64ConstInt16Op{ + op := &projPowInt64ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case 32: - return &projPowInt64ConstInt32Op{ + op := &projPowInt64ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case -1: default: - return &projPowInt64ConstInt64Op{ + op := &projPowInt64ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPowInt64ConstDecimalOp{ + op := &projPowInt64ConstDecimalOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -30916,10 +29908,11 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projPowFloat64ConstFloat64Op{ + op := &projPowFloat64ConstFloat64Op{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -30935,10 +29928,11 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projConcatBytesConstBytesOp{ + op := &projConcatBytesConstBytesOp{ projConstOpBase: projConstOpBase, constArg: c.([]byte), - }, nil + } + return op, nil } } } @@ -30951,10 +29945,11 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projConcatJSONConstJSONOp{ + op := &projConcatJSONConstJSONOp{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil } } } @@ -30967,10 +29962,12 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projConcatDatumConstDatumOp{ + op := &projConcatDatumConstDatumOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -30984,21 +29981,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projLShiftInt16ConstInt16Op{ + op := &projLShiftInt16ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projLShiftInt16ConstInt32Op{ + op := &projLShiftInt16ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case -1: default: - return &projLShiftInt16ConstInt64Op{ + op := &projLShiftInt16ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } } case 32: @@ -31006,21 +30006,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projLShiftInt32ConstInt16Op{ + op := &projLShiftInt32ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case 32: - return &projLShiftInt32ConstInt32Op{ + op := &projLShiftInt32ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projLShiftInt32ConstInt64Op{ + op := &projLShiftInt32ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } } case -1: @@ -31029,21 +30032,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projLShiftInt64ConstInt16Op{ + op := &projLShiftInt64ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case 32: - return &projLShiftInt64ConstInt32Op{ + op := &projLShiftInt64ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case -1: default: - return &projLShiftInt64ConstInt64Op{ + op := &projLShiftInt64ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -31055,21 +30061,27 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projLShiftDatumConstInt16Op{ + op := &projLShiftDatumConstInt16Op{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case 32: - return &projLShiftDatumConstInt32Op{ + op := &projLShiftDatumConstInt32Op{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case -1: default: - return &projLShiftDatumConstInt64Op{ + op := &projLShiftDatumConstInt64Op{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -31083,21 +30095,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projRShiftInt16ConstInt16Op{ + op := &projRShiftInt16ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projRShiftInt16ConstInt32Op{ + op := &projRShiftInt16ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case -1: default: - return &projRShiftInt16ConstInt64Op{ + op := &projRShiftInt16ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil } } case 32: @@ -31105,21 +30120,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projRShiftInt32ConstInt16Op{ + op := &projRShiftInt32ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case 32: - return &projRShiftInt32ConstInt32Op{ + op := &projRShiftInt32ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projRShiftInt32ConstInt64Op{ + op := &projRShiftInt32ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil } } case -1: @@ -31128,21 +30146,24 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projRShiftInt64ConstInt16Op{ + op := &projRShiftInt64ConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case 32: - return &projRShiftInt64ConstInt32Op{ + op := &projRShiftInt64ConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil case -1: default: - return &projRShiftInt64ConstInt64Op{ + op := &projRShiftInt64ConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -31154,21 +30175,27 @@ func GetProjectionLConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projRShiftDatumConstInt16Op{ + op := &projRShiftDatumConstInt16Op{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case 32: - return &projRShiftDatumConstInt32Op{ + op := &projRShiftDatumConstInt32Op{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case -1: default: - return &projRShiftDatumConstInt64Op{ + op := &projRShiftDatumConstInt64Op{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -31184,29 +30211,33 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projJSONFetchValJSONConstBytesOp{ + op := &projJSONFetchValJSONConstBytesOp{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projJSONFetchValJSONConstInt16Op{ + op := &projJSONFetchValJSONConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil case 32: - return &projJSONFetchValJSONConstInt32Op{ + op := &projJSONFetchValJSONConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil case -1: default: - return &projJSONFetchValJSONConstInt64Op{ + op := &projJSONFetchValJSONConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil } } } @@ -31222,29 +30253,33 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projJSONFetchTextJSONConstBytesOp{ + op := &projJSONFetchTextJSONConstBytesOp{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projJSONFetchTextJSONConstInt16Op{ + op := &projJSONFetchTextJSONConstInt16Op{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil case 32: - return &projJSONFetchTextJSONConstInt32Op{ + op := &projJSONFetchTextJSONConstInt32Op{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil case -1: default: - return &projJSONFetchTextJSONConstInt64Op{ + op := &projJSONFetchTextJSONConstInt64Op{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil } } } @@ -31260,10 +30295,11 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projJSONFetchValPathJSONConstDatumOp{ + op := &projJSONFetchValPathJSONConstDatumOp{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil } } } @@ -31279,10 +30315,11 @@ func GetProjectionLConstOperator( switch rightType.Width() { case -1: default: - return &projJSONFetchTextPathJSONConstDatumOp{ + op := &projJSONFetchTextPathJSONConstDatumOp{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil } } } diff --git a/pkg/sql/colexec/colexecproj/proj_const_ops_tmpl.go b/pkg/sql/colexec/colexecproj/proj_const_ops_tmpl.go index 4b5f0ed9c76c..5def139fd856 100644 --- a/pkg/sql/colexec/colexecproj/proj_const_ops_tmpl.go +++ b/pkg/sql/colexec/colexecproj/proj_const_ops_tmpl.go @@ -22,7 +22,7 @@ package colexecproj import ( - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" @@ -83,6 +83,9 @@ func _ASSIGN(_, _, _, _, _, _ interface{}) { type _OP_CONST_NAME struct { projConstOpBase + // {{if .NeedsBinaryOverloadHelper}} + execgen.BinaryOverloadHelper + // {{end}} // {{if _IS_CONST_LEFT}} constArg _L_GO_TYPE // {{else}} @@ -91,12 +94,12 @@ type _OP_CONST_NAME struct { } func (p _OP_CONST_NAME) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // {{if .NeedsBinaryOverloadHelper}} + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper + // {{end}} batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -166,7 +169,7 @@ func _SET_PROJECTION(_HAS_NULLS bool) { // If _HAS_NULLS is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). // {{if _HAS_NULLS}} - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) // {{end}} // {{end}} // {{end}} @@ -269,7 +272,6 @@ func GetProjection_CONST_SIDEConstOperator( allocator: allocator, colIdx: colIdx, outputIdx: outputIdx, - overloadHelper: execgen.OverloadHelper{BinFn: binFn, EvalCtx: evalCtx}, } c := colconv.GetDatumToPhysicalFn(constType)(constArg) // {{if _IS_CONST_LEFT}} @@ -296,7 +298,7 @@ func GetProjection_CONST_SIDEConstOperator( switch rightType.Width() { // {{range .RightWidths}} case _RIGHT_TYPE_WIDTH: - return &_OP_CONST_NAME{ + op := &_OP_CONST_NAME{ projConstOpBase: projConstOpBase, // {{if _IS_CONST_LEFT}} // {{if eq $leftFamilyStr "typeconv.DatumVecCanonicalTypeFamily"}} @@ -311,7 +313,11 @@ func GetProjection_CONST_SIDEConstOperator( constArg: c.(_R_GO_TYPE), // {{end}} // {{end}} - }, nil + } + // {{if .NeedsBinaryOverloadHelper}} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + // {{end}} + return op, nil // {{end}} } // {{end}} @@ -338,7 +344,6 @@ func GetProjection_CONST_SIDEConstOperator( case tree._NAME: switch typeconv.TypeFamilyToCanonicalTypeFamily(leftType.Family()) { // {{range .LeftFamilies}} - // {{$leftFamilyStr := .LeftCanonicalFamilyStr}} case _LEFT_CANONICAL_TYPE_FAMILY: switch leftType.Width() { // {{range .LeftWidths}} diff --git a/pkg/sql/colexec/colexecproj/proj_const_right_ops.eg.go b/pkg/sql/colexec/colexecproj/proj_const_right_ops.eg.go index 7bf40de86e6c..c8d9fc423b2f 100644 --- a/pkg/sql/colexec/colexecproj/proj_const_right_ops.eg.go +++ b/pkg/sql/colexec/colexecproj/proj_const_right_ops.eg.go @@ -15,7 +15,7 @@ import ( "time" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" @@ -52,12 +52,6 @@ type projBitandInt16Int16ConstOp struct { } func (p projBitandInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -114,7 +108,7 @@ func (p projBitandInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -154,12 +148,6 @@ type projBitandInt16Int32ConstOp struct { } func (p projBitandInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -216,7 +204,7 @@ func (p projBitandInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -256,12 +244,6 @@ type projBitandInt16Int64ConstOp struct { } func (p projBitandInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -318,7 +300,7 @@ func (p projBitandInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -358,12 +340,6 @@ type projBitandInt32Int16ConstOp struct { } func (p projBitandInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -420,7 +396,7 @@ func (p projBitandInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -460,12 +436,6 @@ type projBitandInt32Int32ConstOp struct { } func (p projBitandInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -522,7 +492,7 @@ func (p projBitandInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -562,12 +532,6 @@ type projBitandInt32Int64ConstOp struct { } func (p projBitandInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -624,7 +588,7 @@ func (p projBitandInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -664,12 +628,6 @@ type projBitandInt64Int16ConstOp struct { } func (p projBitandInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -726,7 +684,7 @@ func (p projBitandInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -766,12 +724,6 @@ type projBitandInt64Int32ConstOp struct { } func (p projBitandInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -828,7 +780,7 @@ func (p projBitandInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -868,12 +820,6 @@ type projBitandInt64Int64ConstOp struct { } func (p projBitandInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -930,7 +876,7 @@ func (p projBitandInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -966,16 +912,15 @@ func (p projBitandInt64Int64ConstOp) Next() coldata.Batch { type projBitandDatumDatumConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projBitandDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1045,7 +990,7 @@ func (p projBitandDatumDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1098,12 +1043,6 @@ type projBitorInt16Int16ConstOp struct { } func (p projBitorInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1160,7 +1099,7 @@ func (p projBitorInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1200,12 +1139,6 @@ type projBitorInt16Int32ConstOp struct { } func (p projBitorInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1262,7 +1195,7 @@ func (p projBitorInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1302,12 +1235,6 @@ type projBitorInt16Int64ConstOp struct { } func (p projBitorInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1364,7 +1291,7 @@ func (p projBitorInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1404,12 +1331,6 @@ type projBitorInt32Int16ConstOp struct { } func (p projBitorInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1466,7 +1387,7 @@ func (p projBitorInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1506,12 +1427,6 @@ type projBitorInt32Int32ConstOp struct { } func (p projBitorInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1568,7 +1483,7 @@ func (p projBitorInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1608,12 +1523,6 @@ type projBitorInt32Int64ConstOp struct { } func (p projBitorInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1670,7 +1579,7 @@ func (p projBitorInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1710,12 +1619,6 @@ type projBitorInt64Int16ConstOp struct { } func (p projBitorInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1772,7 +1675,7 @@ func (p projBitorInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1812,12 +1715,6 @@ type projBitorInt64Int32ConstOp struct { } func (p projBitorInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1874,7 +1771,7 @@ func (p projBitorInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1914,12 +1811,6 @@ type projBitorInt64Int64ConstOp struct { } func (p projBitorInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1976,7 +1867,7 @@ func (p projBitorInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2012,16 +1903,15 @@ func (p projBitorInt64Int64ConstOp) Next() coldata.Batch { type projBitorDatumDatumConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projBitorDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2091,7 +1981,7 @@ func (p projBitorDatumDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2144,12 +2034,6 @@ type projBitxorInt16Int16ConstOp struct { } func (p projBitxorInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2206,7 +2090,7 @@ func (p projBitxorInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2246,12 +2130,6 @@ type projBitxorInt16Int32ConstOp struct { } func (p projBitxorInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2308,7 +2186,7 @@ func (p projBitxorInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2348,12 +2226,6 @@ type projBitxorInt16Int64ConstOp struct { } func (p projBitxorInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2410,7 +2282,7 @@ func (p projBitxorInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2450,12 +2322,6 @@ type projBitxorInt32Int16ConstOp struct { } func (p projBitxorInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2512,7 +2378,7 @@ func (p projBitxorInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2552,12 +2418,6 @@ type projBitxorInt32Int32ConstOp struct { } func (p projBitxorInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2614,7 +2474,7 @@ func (p projBitxorInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2654,12 +2514,6 @@ type projBitxorInt32Int64ConstOp struct { } func (p projBitxorInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2716,7 +2570,7 @@ func (p projBitxorInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2756,12 +2610,6 @@ type projBitxorInt64Int16ConstOp struct { } func (p projBitxorInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2818,7 +2666,7 @@ func (p projBitxorInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2858,12 +2706,6 @@ type projBitxorInt64Int32ConstOp struct { } func (p projBitxorInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2920,7 +2762,7 @@ func (p projBitxorInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2960,12 +2802,6 @@ type projBitxorInt64Int64ConstOp struct { } func (p projBitxorInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3022,7 +2858,7 @@ func (p projBitxorInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3058,16 +2894,15 @@ func (p projBitxorInt64Int64ConstOp) Next() coldata.Batch { type projBitxorDatumDatumConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projBitxorDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3137,7 +2972,7 @@ func (p projBitxorDatumDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3190,12 +3025,6 @@ type projPlusDecimalInt16ConstOp struct { } func (p projPlusDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3231,9 +3060,9 @@ func (p projPlusDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3251,9 +3080,9 @@ func (p projPlusDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3266,7 +3095,7 @@ func (p projPlusDecimalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3275,9 +3104,9 @@ func (p projPlusDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3292,9 +3121,9 @@ func (p projPlusDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3320,12 +3149,6 @@ type projPlusDecimalInt32ConstOp struct { } func (p projPlusDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3361,9 +3184,9 @@ func (p projPlusDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3381,9 +3204,9 @@ func (p projPlusDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3396,7 +3219,7 @@ func (p projPlusDecimalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3405,9 +3228,9 @@ func (p projPlusDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3422,9 +3245,9 @@ func (p projPlusDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3450,12 +3273,6 @@ type projPlusDecimalInt64ConstOp struct { } func (p projPlusDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3491,9 +3308,9 @@ func (p projPlusDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3511,9 +3328,9 @@ func (p projPlusDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3526,7 +3343,7 @@ func (p projPlusDecimalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3535,9 +3352,9 @@ func (p projPlusDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3552,9 +3369,9 @@ func (p projPlusDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3580,12 +3397,6 @@ type projPlusDecimalDecimalConstOp struct { } func (p projPlusDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3654,7 +3465,7 @@ func (p projPlusDecimalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3706,12 +3517,6 @@ type projPlusInt16Int16ConstOp struct { } func (p projPlusInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3780,7 +3585,7 @@ func (p projPlusInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3832,12 +3637,6 @@ type projPlusInt16Int32ConstOp struct { } func (p projPlusInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3906,7 +3705,7 @@ func (p projPlusInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3958,12 +3757,6 @@ type projPlusInt16Int64ConstOp struct { } func (p projPlusInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4032,7 +3825,7 @@ func (p projPlusInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4084,12 +3877,6 @@ type projPlusInt16DecimalConstOp struct { } func (p projPlusInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4125,9 +3912,9 @@ func (p projPlusInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -4146,9 +3933,9 @@ func (p projPlusInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -4162,7 +3949,7 @@ func (p projPlusInt16DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4171,9 +3958,9 @@ func (p projPlusInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -4189,9 +3976,9 @@ func (p projPlusInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -4214,16 +4001,15 @@ func (p projPlusInt16DecimalConstOp) Next() coldata.Batch { type projPlusInt16DatumConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projPlusInt16DatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4301,7 +4087,7 @@ func (p projPlusInt16DatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4362,12 +4148,6 @@ type projPlusInt32Int16ConstOp struct { } func (p projPlusInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4436,7 +4216,7 @@ func (p projPlusInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4488,12 +4268,6 @@ type projPlusInt32Int32ConstOp struct { } func (p projPlusInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4562,7 +4336,7 @@ func (p projPlusInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4614,12 +4388,6 @@ type projPlusInt32Int64ConstOp struct { } func (p projPlusInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4688,7 +4456,7 @@ func (p projPlusInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4740,12 +4508,6 @@ type projPlusInt32DecimalConstOp struct { } func (p projPlusInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4781,9 +4543,9 @@ func (p projPlusInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -4802,9 +4564,9 @@ func (p projPlusInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -4818,7 +4580,7 @@ func (p projPlusInt32DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4827,9 +4589,9 @@ func (p projPlusInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -4845,9 +4607,9 @@ func (p projPlusInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -4870,16 +4632,15 @@ func (p projPlusInt32DecimalConstOp) Next() coldata.Batch { type projPlusInt32DatumConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projPlusInt32DatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4957,7 +4718,7 @@ func (p projPlusInt32DatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5018,12 +4779,6 @@ type projPlusInt64Int16ConstOp struct { } func (p projPlusInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5092,7 +4847,7 @@ func (p projPlusInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5144,12 +4899,6 @@ type projPlusInt64Int32ConstOp struct { } func (p projPlusInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5218,7 +4967,7 @@ func (p projPlusInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5270,12 +5019,6 @@ type projPlusInt64Int64ConstOp struct { } func (p projPlusInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5344,7 +5087,7 @@ func (p projPlusInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5396,12 +5139,6 @@ type projPlusInt64DecimalConstOp struct { } func (p projPlusInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5437,9 +5174,9 @@ func (p projPlusInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -5458,9 +5195,9 @@ func (p projPlusInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -5474,7 +5211,7 @@ func (p projPlusInt64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5483,9 +5220,9 @@ func (p projPlusInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -5501,9 +5238,9 @@ func (p projPlusInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -5526,16 +5263,15 @@ func (p projPlusInt64DecimalConstOp) Next() coldata.Batch { type projPlusInt64DatumConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projPlusInt64DatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5613,7 +5349,7 @@ func (p projPlusInt64DatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5674,12 +5410,6 @@ type projPlusFloat64Float64ConstOp struct { } func (p projPlusFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5742,7 +5472,7 @@ func (p projPlusFloat64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5788,12 +5518,6 @@ type projPlusTimestampIntervalConstOp struct { } func (p projPlusTimestampIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5856,7 +5580,7 @@ func (p projPlusTimestampIntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5902,12 +5626,6 @@ type projPlusIntervalTimestampConstOp struct { } func (p projPlusIntervalTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5970,7 +5688,7 @@ func (p projPlusIntervalTimestampConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6016,12 +5734,6 @@ type projPlusIntervalIntervalConstOp struct { } func (p projPlusIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6074,7 +5786,7 @@ func (p projPlusIntervalIntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6106,16 +5818,15 @@ func (p projPlusIntervalIntervalConstOp) Next() coldata.Batch { type projPlusIntervalDatumConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projPlusIntervalDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6193,7 +5904,7 @@ func (p projPlusIntervalDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6250,16 +5961,15 @@ func (p projPlusIntervalDatumConstOp) Next() coldata.Batch { type projPlusDatumIntervalConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg duration.Duration } func (p projPlusDatumIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6338,7 +6048,7 @@ func (p projPlusDatumIntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6396,16 +6106,15 @@ func (p projPlusDatumIntervalConstOp) Next() coldata.Batch { type projPlusDatumInt16ConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int16 } func (p projPlusDatumInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6484,7 +6193,7 @@ func (p projPlusDatumInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6542,16 +6251,15 @@ func (p projPlusDatumInt16ConstOp) Next() coldata.Batch { type projPlusDatumInt32ConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int32 } func (p projPlusDatumInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6630,7 +6338,7 @@ func (p projPlusDatumInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6688,16 +6396,15 @@ func (p projPlusDatumInt32ConstOp) Next() coldata.Batch { type projPlusDatumInt64ConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int64 } func (p projPlusDatumInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6776,7 +6483,7 @@ func (p projPlusDatumInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6838,12 +6545,6 @@ type projMinusDecimalInt16ConstOp struct { } func (p projMinusDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6879,9 +6580,9 @@ func (p projMinusDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -6899,9 +6600,9 @@ func (p projMinusDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -6914,7 +6615,7 @@ func (p projMinusDecimalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6923,9 +6624,9 @@ func (p projMinusDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -6940,9 +6641,9 @@ func (p projMinusDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -6968,12 +6669,6 @@ type projMinusDecimalInt32ConstOp struct { } func (p projMinusDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7009,9 +6704,9 @@ func (p projMinusDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7029,9 +6724,9 @@ func (p projMinusDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7044,7 +6739,7 @@ func (p projMinusDecimalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7053,9 +6748,9 @@ func (p projMinusDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7070,9 +6765,9 @@ func (p projMinusDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7098,12 +6793,6 @@ type projMinusDecimalInt64ConstOp struct { } func (p projMinusDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7139,9 +6828,9 @@ func (p projMinusDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7159,9 +6848,9 @@ func (p projMinusDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7174,7 +6863,7 @@ func (p projMinusDecimalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7183,9 +6872,9 @@ func (p projMinusDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7200,9 +6889,9 @@ func (p projMinusDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7228,12 +6917,6 @@ type projMinusDecimalDecimalConstOp struct { } func (p projMinusDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7302,7 +6985,7 @@ func (p projMinusDecimalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7354,12 +7037,6 @@ type projMinusInt16Int16ConstOp struct { } func (p projMinusInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7428,7 +7105,7 @@ func (p projMinusInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7480,12 +7157,6 @@ type projMinusInt16Int32ConstOp struct { } func (p projMinusInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7554,7 +7225,7 @@ func (p projMinusInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7606,12 +7277,6 @@ type projMinusInt16Int64ConstOp struct { } func (p projMinusInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7680,7 +7345,7 @@ func (p projMinusInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7732,12 +7397,6 @@ type projMinusInt16DecimalConstOp struct { } func (p projMinusInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7773,9 +7432,9 @@ func (p projMinusInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -7794,9 +7453,9 @@ func (p projMinusInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -7810,7 +7469,7 @@ func (p projMinusInt16DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7819,9 +7478,9 @@ func (p projMinusInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -7837,9 +7496,9 @@ func (p projMinusInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -7862,16 +7521,15 @@ func (p projMinusInt16DecimalConstOp) Next() coldata.Batch { type projMinusInt16DatumConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projMinusInt16DatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7949,7 +7607,7 @@ func (p projMinusInt16DatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8010,12 +7668,6 @@ type projMinusInt32Int16ConstOp struct { } func (p projMinusInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8084,7 +7736,7 @@ func (p projMinusInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8136,12 +7788,6 @@ type projMinusInt32Int32ConstOp struct { } func (p projMinusInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8210,7 +7856,7 @@ func (p projMinusInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8262,12 +7908,6 @@ type projMinusInt32Int64ConstOp struct { } func (p projMinusInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8336,7 +7976,7 @@ func (p projMinusInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8388,12 +8028,6 @@ type projMinusInt32DecimalConstOp struct { } func (p projMinusInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8429,9 +8063,9 @@ func (p projMinusInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -8450,9 +8084,9 @@ func (p projMinusInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -8466,7 +8100,7 @@ func (p projMinusInt32DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8475,9 +8109,9 @@ func (p projMinusInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -8493,9 +8127,9 @@ func (p projMinusInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -8518,16 +8152,15 @@ func (p projMinusInt32DecimalConstOp) Next() coldata.Batch { type projMinusInt32DatumConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projMinusInt32DatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8605,7 +8238,7 @@ func (p projMinusInt32DatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8666,12 +8299,6 @@ type projMinusInt64Int16ConstOp struct { } func (p projMinusInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8740,7 +8367,7 @@ func (p projMinusInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8792,12 +8419,6 @@ type projMinusInt64Int32ConstOp struct { } func (p projMinusInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8866,7 +8487,7 @@ func (p projMinusInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8918,12 +8539,6 @@ type projMinusInt64Int64ConstOp struct { } func (p projMinusInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8992,7 +8607,7 @@ func (p projMinusInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9044,12 +8659,6 @@ type projMinusInt64DecimalConstOp struct { } func (p projMinusInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9085,9 +8694,9 @@ func (p projMinusInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -9106,9 +8715,9 @@ func (p projMinusInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -9122,7 +8731,7 @@ func (p projMinusInt64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9131,9 +8740,9 @@ func (p projMinusInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -9149,9 +8758,9 @@ func (p projMinusInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -9174,16 +8783,15 @@ func (p projMinusInt64DecimalConstOp) Next() coldata.Batch { type projMinusInt64DatumConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projMinusInt64DatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9261,7 +8869,7 @@ func (p projMinusInt64DatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9322,12 +8930,6 @@ type projMinusFloat64Float64ConstOp struct { } func (p projMinusFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9390,7 +8992,7 @@ func (p projMinusFloat64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9436,12 +9038,6 @@ type projMinusTimestampTimestampConstOp struct { } func (p projMinusTimestampTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9500,7 +9096,7 @@ func (p projMinusTimestampTimestampConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9542,12 +9138,6 @@ type projMinusTimestampIntervalConstOp struct { } func (p projMinusTimestampIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9610,7 +9200,7 @@ func (p projMinusTimestampIntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9656,12 +9246,6 @@ type projMinusIntervalIntervalConstOp struct { } func (p projMinusIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9714,7 +9298,7 @@ func (p projMinusIntervalIntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9746,16 +9330,15 @@ func (p projMinusIntervalIntervalConstOp) Next() coldata.Batch { type projMinusIntervalDatumConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projMinusIntervalDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9833,7 +9416,7 @@ func (p projMinusIntervalDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9894,12 +9477,6 @@ type projMinusJSONBytesConstOp struct { } func (p projMinusJSONBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9969,7 +9546,7 @@ func (p projMinusJSONBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10022,12 +9599,6 @@ type projMinusJSONInt16ConstOp struct { } func (p projMinusJSONInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10090,7 +9661,7 @@ func (p projMinusJSONInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10136,12 +9707,6 @@ type projMinusJSONInt32ConstOp struct { } func (p projMinusJSONInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10204,7 +9769,7 @@ func (p projMinusJSONInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10250,12 +9815,6 @@ type projMinusJSONInt64ConstOp struct { } func (p projMinusJSONInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10318,7 +9877,7 @@ func (p projMinusJSONInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10360,16 +9919,15 @@ func (p projMinusJSONInt64ConstOp) Next() coldata.Batch { type projMinusDatumDatumConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projMinusDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10439,7 +9997,7 @@ func (p projMinusDatumDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10488,16 +10046,15 @@ func (p projMinusDatumDatumConstOp) Next() coldata.Batch { type projMinusDatumIntervalConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg duration.Duration } func (p projMinusDatumIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10576,7 +10133,7 @@ func (p projMinusDatumIntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10634,16 +10191,15 @@ func (p projMinusDatumIntervalConstOp) Next() coldata.Batch { type projMinusDatumBytesConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg []byte } func (p projMinusDatumBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10721,7 +10277,7 @@ func (p projMinusDatumBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10778,16 +10334,15 @@ func (p projMinusDatumBytesConstOp) Next() coldata.Batch { type projMinusDatumInt16ConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int16 } func (p projMinusDatumInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10866,7 +10421,7 @@ func (p projMinusDatumInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10924,16 +10479,15 @@ func (p projMinusDatumInt16ConstOp) Next() coldata.Batch { type projMinusDatumInt32ConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int32 } func (p projMinusDatumInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11012,7 +10566,7 @@ func (p projMinusDatumInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11070,16 +10624,15 @@ func (p projMinusDatumInt32ConstOp) Next() coldata.Batch { type projMinusDatumInt64ConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int64 } func (p projMinusDatumInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11158,7 +10711,7 @@ func (p projMinusDatumInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11220,12 +10773,6 @@ type projMultDecimalInt16ConstOp struct { } func (p projMultDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11261,9 +10808,9 @@ func (p projMultDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11281,9 +10828,9 @@ func (p projMultDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11296,7 +10843,7 @@ func (p projMultDecimalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11305,9 +10852,9 @@ func (p projMultDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11322,9 +10869,9 @@ func (p projMultDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11350,12 +10897,6 @@ type projMultDecimalInt32ConstOp struct { } func (p projMultDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11391,9 +10932,9 @@ func (p projMultDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11411,9 +10952,9 @@ func (p projMultDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11426,7 +10967,7 @@ func (p projMultDecimalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11435,9 +10976,9 @@ func (p projMultDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11452,9 +10993,9 @@ func (p projMultDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11480,12 +11021,6 @@ type projMultDecimalInt64ConstOp struct { } func (p projMultDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11521,9 +11056,9 @@ func (p projMultDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11541,9 +11076,9 @@ func (p projMultDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11556,7 +11091,7 @@ func (p projMultDecimalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11565,9 +11100,9 @@ func (p projMultDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11582,9 +11117,9 @@ func (p projMultDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -11610,12 +11145,6 @@ type projMultDecimalDecimalConstOp struct { } func (p projMultDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11684,7 +11213,7 @@ func (p projMultDecimalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11736,12 +11265,6 @@ type projMultDecimalIntervalConstOp struct { } func (p projMultDecimalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11804,7 +11327,7 @@ func (p projMultDecimalIntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11850,12 +11373,6 @@ type projMultInt16Int16ConstOp struct { } func (p projMultInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11940,7 +11457,7 @@ func (p projMultInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12008,12 +11525,6 @@ type projMultInt16Int32ConstOp struct { } func (p projMultInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12098,7 +11609,7 @@ func (p projMultInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12166,12 +11677,6 @@ type projMultInt16Int64ConstOp struct { } func (p projMultInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12256,7 +11761,7 @@ func (p projMultInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12324,12 +11829,6 @@ type projMultInt16DecimalConstOp struct { } func (p projMultInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12365,9 +11864,9 @@ func (p projMultInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -12386,9 +11885,9 @@ func (p projMultInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -12402,7 +11901,7 @@ func (p projMultInt16DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12411,9 +11910,9 @@ func (p projMultInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -12429,9 +11928,9 @@ func (p projMultInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -12458,12 +11957,6 @@ type projMultInt16IntervalConstOp struct { } func (p projMultInt16IntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12516,7 +12009,7 @@ func (p projMultInt16IntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12552,12 +12045,6 @@ type projMultInt32Int16ConstOp struct { } func (p projMultInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12642,7 +12129,7 @@ func (p projMultInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12710,12 +12197,6 @@ type projMultInt32Int32ConstOp struct { } func (p projMultInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12800,7 +12281,7 @@ func (p projMultInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12868,12 +12349,6 @@ type projMultInt32Int64ConstOp struct { } func (p projMultInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12958,7 +12433,7 @@ func (p projMultInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13026,12 +12501,6 @@ type projMultInt32DecimalConstOp struct { } func (p projMultInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13067,9 +12536,9 @@ func (p projMultInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -13088,9 +12557,9 @@ func (p projMultInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -13104,7 +12573,7 @@ func (p projMultInt32DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13113,9 +12582,9 @@ func (p projMultInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -13131,9 +12600,9 @@ func (p projMultInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -13160,12 +12629,6 @@ type projMultInt32IntervalConstOp struct { } func (p projMultInt32IntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13218,7 +12681,7 @@ func (p projMultInt32IntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13254,12 +12717,6 @@ type projMultInt64Int16ConstOp struct { } func (p projMultInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13344,7 +12801,7 @@ func (p projMultInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13412,12 +12869,6 @@ type projMultInt64Int32ConstOp struct { } func (p projMultInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13502,7 +12953,7 @@ func (p projMultInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13570,12 +13021,6 @@ type projMultInt64Int64ConstOp struct { } func (p projMultInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13660,7 +13105,7 @@ func (p projMultInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13728,12 +13173,6 @@ type projMultInt64DecimalConstOp struct { } func (p projMultInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13769,9 +13208,9 @@ func (p projMultInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -13790,9 +13229,9 @@ func (p projMultInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -13806,7 +13245,7 @@ func (p projMultInt64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13815,9 +13254,9 @@ func (p projMultInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -13833,9 +13272,9 @@ func (p projMultInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &p.constArg) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -13862,12 +13301,6 @@ type projMultInt64IntervalConstOp struct { } func (p projMultInt64IntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13920,7 +13353,7 @@ func (p projMultInt64IntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13956,12 +13389,6 @@ type projMultFloat64Float64ConstOp struct { } func (p projMultFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14024,7 +13451,7 @@ func (p projMultFloat64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14070,12 +13497,6 @@ type projMultFloat64IntervalConstOp struct { } func (p projMultFloat64IntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14128,7 +13549,7 @@ func (p projMultFloat64IntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14164,12 +13585,6 @@ type projMultIntervalInt16ConstOp struct { } func (p projMultIntervalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14222,7 +13637,7 @@ func (p projMultIntervalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14258,12 +13673,6 @@ type projMultIntervalInt32ConstOp struct { } func (p projMultIntervalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14316,7 +13725,7 @@ func (p projMultIntervalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14352,12 +13761,6 @@ type projMultIntervalInt64ConstOp struct { } func (p projMultIntervalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14410,7 +13813,7 @@ func (p projMultIntervalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14446,12 +13849,6 @@ type projMultIntervalFloat64ConstOp struct { } func (p projMultIntervalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14504,7 +13901,7 @@ func (p projMultIntervalFloat64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14540,12 +13937,6 @@ type projMultIntervalDecimalConstOp struct { } func (p projMultIntervalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14608,7 +13999,7 @@ func (p projMultIntervalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14654,12 +14045,6 @@ type projDivDecimalInt16ConstOp struct { } func (p projDivDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14699,9 +14084,9 @@ func (p projDivDecimalInt16ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14723,9 +14108,9 @@ func (p projDivDecimalInt16ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14738,7 +14123,7 @@ func (p projDivDecimalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14751,9 +14136,9 @@ func (p projDivDecimalInt16ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14772,9 +14157,9 @@ func (p projDivDecimalInt16ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14800,12 +14185,6 @@ type projDivDecimalInt32ConstOp struct { } func (p projDivDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14845,9 +14224,9 @@ func (p projDivDecimalInt32ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14869,9 +14248,9 @@ func (p projDivDecimalInt32ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14884,7 +14263,7 @@ func (p projDivDecimalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14897,9 +14276,9 @@ func (p projDivDecimalInt32ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14918,9 +14297,9 @@ func (p projDivDecimalInt32ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -14946,12 +14325,6 @@ type projDivDecimalInt64ConstOp struct { } func (p projDivDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14991,9 +14364,9 @@ func (p projDivDecimalInt64ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15015,9 +14388,9 @@ func (p projDivDecimalInt64ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15030,7 +14403,7 @@ func (p projDivDecimalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15043,9 +14416,9 @@ func (p projDivDecimalInt64ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15064,9 +14437,9 @@ func (p projDivDecimalInt64ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15092,12 +14465,6 @@ type projDivDecimalDecimalConstOp struct { } func (p projDivDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15174,7 +14541,7 @@ func (p projDivDecimalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15234,12 +14601,6 @@ type projDivInt16Int16ConstOp struct { } func (p projDivInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15277,10 +14638,10 @@ func (p projDivInt16Int16ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15300,10 +14661,10 @@ func (p projDivInt16Int16ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15316,7 +14677,7 @@ func (p projDivInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15327,10 +14688,10 @@ func (p projDivInt16Int16ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15347,10 +14708,10 @@ func (p projDivInt16Int16ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15376,12 +14737,6 @@ type projDivInt16Int32ConstOp struct { } func (p projDivInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15419,10 +14774,10 @@ func (p projDivInt16Int32ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15442,10 +14797,10 @@ func (p projDivInt16Int32ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15458,7 +14813,7 @@ func (p projDivInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15469,10 +14824,10 @@ func (p projDivInt16Int32ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15489,10 +14844,10 @@ func (p projDivInt16Int32ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15518,12 +14873,6 @@ type projDivInt16Int64ConstOp struct { } func (p projDivInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15561,10 +14910,10 @@ func (p projDivInt16Int64ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15584,10 +14933,10 @@ func (p projDivInt16Int64ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15600,7 +14949,7 @@ func (p projDivInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15611,10 +14960,10 @@ func (p projDivInt16Int64ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15631,10 +14980,10 @@ func (p projDivInt16Int64ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15660,12 +15009,6 @@ type projDivInt16DecimalConstOp struct { } func (p projDivInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15705,9 +15048,9 @@ func (p projDivInt16DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -15730,9 +15073,9 @@ func (p projDivInt16DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -15746,7 +15089,7 @@ func (p projDivInt16DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15759,9 +15102,9 @@ func (p projDivInt16DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -15781,9 +15124,9 @@ func (p projDivInt16DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -15810,12 +15153,6 @@ type projDivInt32Int16ConstOp struct { } func (p projDivInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15853,10 +15190,10 @@ func (p projDivInt32Int16ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15876,10 +15213,10 @@ func (p projDivInt32Int16ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15892,7 +15229,7 @@ func (p projDivInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15903,10 +15240,10 @@ func (p projDivInt32Int16ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15923,10 +15260,10 @@ func (p projDivInt32Int16ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15952,12 +15289,6 @@ type projDivInt32Int32ConstOp struct { } func (p projDivInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15995,10 +15326,10 @@ func (p projDivInt32Int32ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16018,10 +15349,10 @@ func (p projDivInt32Int32ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16034,7 +15365,7 @@ func (p projDivInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16045,10 +15376,10 @@ func (p projDivInt32Int32ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16065,10 +15396,10 @@ func (p projDivInt32Int32ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16094,12 +15425,6 @@ type projDivInt32Int64ConstOp struct { } func (p projDivInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16137,10 +15462,10 @@ func (p projDivInt32Int64ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16160,10 +15485,10 @@ func (p projDivInt32Int64ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16176,7 +15501,7 @@ func (p projDivInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16187,10 +15512,10 @@ func (p projDivInt32Int64ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16207,10 +15532,10 @@ func (p projDivInt32Int64ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16236,12 +15561,6 @@ type projDivInt32DecimalConstOp struct { } func (p projDivInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16281,9 +15600,9 @@ func (p projDivInt32DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -16306,9 +15625,9 @@ func (p projDivInt32DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -16322,7 +15641,7 @@ func (p projDivInt32DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16335,9 +15654,9 @@ func (p projDivInt32DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -16357,9 +15676,9 @@ func (p projDivInt32DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -16386,12 +15705,6 @@ type projDivInt64Int16ConstOp struct { } func (p projDivInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16429,10 +15742,10 @@ func (p projDivInt64Int16ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16452,10 +15765,10 @@ func (p projDivInt64Int16ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16468,7 +15781,7 @@ func (p projDivInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16479,10 +15792,10 @@ func (p projDivInt64Int16ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16499,10 +15812,10 @@ func (p projDivInt64Int16ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16528,12 +15841,6 @@ type projDivInt64Int32ConstOp struct { } func (p projDivInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16571,10 +15878,10 @@ func (p projDivInt64Int32ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16594,10 +15901,10 @@ func (p projDivInt64Int32ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16610,7 +15917,7 @@ func (p projDivInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16621,10 +15928,10 @@ func (p projDivInt64Int32ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16641,10 +15948,10 @@ func (p projDivInt64Int32ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16670,12 +15977,6 @@ type projDivInt64Int64ConstOp struct { } func (p projDivInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16713,10 +16014,10 @@ func (p projDivInt64Int64ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16736,10 +16037,10 @@ func (p projDivInt64Int64ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16752,7 +16053,7 @@ func (p projDivInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16763,10 +16064,10 @@ func (p projDivInt64Int64ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16783,10 +16084,10 @@ func (p projDivInt64Int64ConstOp) Next() coldata.Batch { if int64(p.constArg) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16812,12 +16113,6 @@ type projDivInt64DecimalConstOp struct { } func (p projDivInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16857,9 +16152,9 @@ func (p projDivInt64DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -16882,9 +16177,9 @@ func (p projDivInt64DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -16898,7 +16193,7 @@ func (p projDivInt64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16911,9 +16206,9 @@ func (p projDivInt64DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -16933,9 +16228,9 @@ func (p projDivInt64DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -16962,12 +16257,6 @@ type projDivFloat64Float64ConstOp struct { } func (p projDivFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17038,7 +16327,7 @@ func (p projDivFloat64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17092,12 +16381,6 @@ type projDivIntervalInt64ConstOp struct { } func (p projDivIntervalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17158,7 +16441,7 @@ func (p projDivIntervalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17202,12 +16485,6 @@ type projDivIntervalFloat64ConstOp struct { } func (p projDivIntervalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17268,7 +16545,7 @@ func (p projDivIntervalFloat64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17312,12 +16589,6 @@ type projFloorDivDecimalInt16ConstOp struct { } func (p projFloorDivDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17357,9 +16628,9 @@ func (p projFloorDivDecimalInt16ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17381,9 +16652,9 @@ func (p projFloorDivDecimalInt16ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17396,7 +16667,7 @@ func (p projFloorDivDecimalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17409,9 +16680,9 @@ func (p projFloorDivDecimalInt16ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17430,9 +16701,9 @@ func (p projFloorDivDecimalInt16ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17458,12 +16729,6 @@ type projFloorDivDecimalInt32ConstOp struct { } func (p projFloorDivDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17503,9 +16768,9 @@ func (p projFloorDivDecimalInt32ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17527,9 +16792,9 @@ func (p projFloorDivDecimalInt32ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17542,7 +16807,7 @@ func (p projFloorDivDecimalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17555,9 +16820,9 @@ func (p projFloorDivDecimalInt32ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17576,9 +16841,9 @@ func (p projFloorDivDecimalInt32ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17604,12 +16869,6 @@ type projFloorDivDecimalInt64ConstOp struct { } func (p projFloorDivDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17649,9 +16908,9 @@ func (p projFloorDivDecimalInt64ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17673,9 +16932,9 @@ func (p projFloorDivDecimalInt64ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17688,7 +16947,7 @@ func (p projFloorDivDecimalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17701,9 +16960,9 @@ func (p projFloorDivDecimalInt64ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17722,9 +16981,9 @@ func (p projFloorDivDecimalInt64ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17750,12 +17009,6 @@ type projFloorDivDecimalDecimalConstOp struct { } func (p projFloorDivDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17832,7 +17085,7 @@ func (p projFloorDivDecimalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17892,12 +17145,6 @@ type projFloorDivInt16Int16ConstOp struct { } func (p projFloorDivInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17964,7 +17211,7 @@ func (p projFloorDivInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18014,12 +17261,6 @@ type projFloorDivInt16Int32ConstOp struct { } func (p projFloorDivInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18086,7 +17327,7 @@ func (p projFloorDivInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18136,12 +17377,6 @@ type projFloorDivInt16Int64ConstOp struct { } func (p projFloorDivInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18208,7 +17443,7 @@ func (p projFloorDivInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18258,12 +17493,6 @@ type projFloorDivInt16DecimalConstOp struct { } func (p projFloorDivInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18303,9 +17532,9 @@ func (p projFloorDivInt16DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -18328,9 +17557,9 @@ func (p projFloorDivInt16DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -18344,7 +17573,7 @@ func (p projFloorDivInt16DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18357,9 +17586,9 @@ func (p projFloorDivInt16DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -18379,9 +17608,9 @@ func (p projFloorDivInt16DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -18408,12 +17637,6 @@ type projFloorDivInt32Int16ConstOp struct { } func (p projFloorDivInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18480,7 +17703,7 @@ func (p projFloorDivInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18530,12 +17753,6 @@ type projFloorDivInt32Int32ConstOp struct { } func (p projFloorDivInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18602,7 +17819,7 @@ func (p projFloorDivInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18652,12 +17869,6 @@ type projFloorDivInt32Int64ConstOp struct { } func (p projFloorDivInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18724,7 +17935,7 @@ func (p projFloorDivInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18774,12 +17985,6 @@ type projFloorDivInt32DecimalConstOp struct { } func (p projFloorDivInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18819,9 +18024,9 @@ func (p projFloorDivInt32DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -18844,9 +18049,9 @@ func (p projFloorDivInt32DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -18860,7 +18065,7 @@ func (p projFloorDivInt32DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18873,9 +18078,9 @@ func (p projFloorDivInt32DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -18895,9 +18100,9 @@ func (p projFloorDivInt32DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -18924,12 +18129,6 @@ type projFloorDivInt64Int16ConstOp struct { } func (p projFloorDivInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18996,7 +18195,7 @@ func (p projFloorDivInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19046,12 +18245,6 @@ type projFloorDivInt64Int32ConstOp struct { } func (p projFloorDivInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19118,7 +18311,7 @@ func (p projFloorDivInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19168,12 +18361,6 @@ type projFloorDivInt64Int64ConstOp struct { } func (p projFloorDivInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19240,7 +18427,7 @@ func (p projFloorDivInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19290,12 +18477,6 @@ type projFloorDivInt64DecimalConstOp struct { } func (p projFloorDivInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19335,9 +18516,9 @@ func (p projFloorDivInt64DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -19360,9 +18541,9 @@ func (p projFloorDivInt64DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -19376,7 +18557,7 @@ func (p projFloorDivInt64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19389,9 +18570,9 @@ func (p projFloorDivInt64DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -19411,9 +18592,9 @@ func (p projFloorDivInt64DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -19440,12 +18621,6 @@ type projFloorDivFloat64Float64ConstOp struct { } func (p projFloorDivFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19516,7 +18691,7 @@ func (p projFloorDivFloat64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19570,12 +18745,6 @@ type projModDecimalInt16ConstOp struct { } func (p projModDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19615,9 +18784,9 @@ func (p projModDecimalInt16ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19639,9 +18808,9 @@ func (p projModDecimalInt16ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19654,7 +18823,7 @@ func (p projModDecimalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19667,9 +18836,9 @@ func (p projModDecimalInt16ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19688,9 +18857,9 @@ func (p projModDecimalInt16ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19716,12 +18885,6 @@ type projModDecimalInt32ConstOp struct { } func (p projModDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19761,9 +18924,9 @@ func (p projModDecimalInt32ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19785,9 +18948,9 @@ func (p projModDecimalInt32ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19800,7 +18963,7 @@ func (p projModDecimalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19813,9 +18976,9 @@ func (p projModDecimalInt32ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19834,9 +18997,9 @@ func (p projModDecimalInt32ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19862,12 +19025,6 @@ type projModDecimalInt64ConstOp struct { } func (p projModDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19907,9 +19064,9 @@ func (p projModDecimalInt64ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19931,9 +19088,9 @@ func (p projModDecimalInt64ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19946,7 +19103,7 @@ func (p projModDecimalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19959,9 +19116,9 @@ func (p projModDecimalInt64ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -19980,9 +19137,9 @@ func (p projModDecimalInt64ConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -20008,12 +19165,6 @@ type projModDecimalDecimalConstOp struct { } func (p projModDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20090,7 +19241,7 @@ func (p projModDecimalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20150,12 +19301,6 @@ type projModInt16Int16ConstOp struct { } func (p projModInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20222,7 +19367,7 @@ func (p projModInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20272,12 +19417,6 @@ type projModInt16Int32ConstOp struct { } func (p projModInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20344,7 +19483,7 @@ func (p projModInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20394,12 +19533,6 @@ type projModInt16Int64ConstOp struct { } func (p projModInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20466,7 +19599,7 @@ func (p projModInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20516,12 +19649,6 @@ type projModInt16DecimalConstOp struct { } func (p projModInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20561,9 +19688,9 @@ func (p projModInt16DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -20586,9 +19713,9 @@ func (p projModInt16DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -20602,7 +19729,7 @@ func (p projModInt16DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20615,9 +19742,9 @@ func (p projModInt16DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -20637,9 +19764,9 @@ func (p projModInt16DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -20666,12 +19793,6 @@ type projModInt32Int16ConstOp struct { } func (p projModInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20738,7 +19859,7 @@ func (p projModInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20788,12 +19909,6 @@ type projModInt32Int32ConstOp struct { } func (p projModInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20860,7 +19975,7 @@ func (p projModInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20910,12 +20025,6 @@ type projModInt32Int64ConstOp struct { } func (p projModInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20982,7 +20091,7 @@ func (p projModInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21032,12 +20141,6 @@ type projModInt32DecimalConstOp struct { } func (p projModInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21077,9 +20180,9 @@ func (p projModInt32DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -21102,9 +20205,9 @@ func (p projModInt32DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -21118,7 +20221,7 @@ func (p projModInt32DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21131,9 +20234,9 @@ func (p projModInt32DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -21153,9 +20256,9 @@ func (p projModInt32DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -21182,12 +20285,6 @@ type projModInt64Int16ConstOp struct { } func (p projModInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21254,7 +20351,7 @@ func (p projModInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21304,12 +20401,6 @@ type projModInt64Int32ConstOp struct { } func (p projModInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21376,7 +20467,7 @@ func (p projModInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21426,12 +20517,6 @@ type projModInt64Int64ConstOp struct { } func (p projModInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21498,7 +20583,7 @@ func (p projModInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21548,12 +20633,6 @@ type projModInt64DecimalConstOp struct { } func (p projModInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21593,9 +20672,9 @@ func (p projModInt64DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -21618,9 +20697,9 @@ func (p projModInt64DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -21634,7 +20713,7 @@ func (p projModInt64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21647,9 +20726,9 @@ func (p projModInt64DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -21669,9 +20748,9 @@ func (p projModInt64DecimalConstOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &p.constArg) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -21698,12 +20777,6 @@ type projModFloat64Float64ConstOp struct { } func (p projModFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21774,7 +20847,7 @@ func (p projModFloat64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21828,12 +20901,6 @@ type projPowDecimalInt16ConstOp struct { } func (p projPowDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21869,9 +20936,9 @@ func (p projPowDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21889,9 +20956,9 @@ func (p projPowDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21904,7 +20971,7 @@ func (p projPowDecimalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21913,9 +20980,9 @@ func (p projPowDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21930,9 +20997,9 @@ func (p projPowDecimalInt16ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21958,12 +21025,6 @@ type projPowDecimalInt32ConstOp struct { } func (p projPowDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21999,9 +21060,9 @@ func (p projPowDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22019,9 +21080,9 @@ func (p projPowDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22034,7 +21095,7 @@ func (p projPowDecimalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22043,9 +21104,9 @@ func (p projPowDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22060,9 +21121,9 @@ func (p projPowDecimalInt32ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22088,12 +21149,6 @@ type projPowDecimalInt64ConstOp struct { } func (p projPowDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22129,9 +21184,9 @@ func (p projPowDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22149,9 +21204,9 @@ func (p projPowDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22164,7 +21219,7 @@ func (p projPowDecimalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22173,9 +21228,9 @@ func (p projPowDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22190,9 +21245,9 @@ func (p projPowDecimalInt64ConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -22218,12 +21273,6 @@ type projPowDecimalDecimalConstOp struct { } func (p projPowDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22292,7 +21341,7 @@ func (p projPowDecimalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22344,12 +21393,6 @@ type projPowInt16Int16ConstOp struct { } func (p projPowInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22384,10 +21427,10 @@ func (p projPowInt16Int16ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22409,10 +21452,10 @@ func (p projPowInt16Int16ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22430,7 +21473,7 @@ func (p projPowInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22438,10 +21481,10 @@ func (p projPowInt16Int16ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22460,10 +21503,10 @@ func (p projPowInt16Int16ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22494,12 +21537,6 @@ type projPowInt16Int32ConstOp struct { } func (p projPowInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22534,10 +21571,10 @@ func (p projPowInt16Int32ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22559,10 +21596,10 @@ func (p projPowInt16Int32ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22580,7 +21617,7 @@ func (p projPowInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22588,10 +21625,10 @@ func (p projPowInt16Int32ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22610,10 +21647,10 @@ func (p projPowInt16Int32ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22644,12 +21681,6 @@ type projPowInt16Int64ConstOp struct { } func (p projPowInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22684,10 +21715,10 @@ func (p projPowInt16Int64ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22709,10 +21740,10 @@ func (p projPowInt16Int64ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22730,7 +21761,7 @@ func (p projPowInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22738,10 +21769,10 @@ func (p projPowInt16Int64ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22760,10 +21791,10 @@ func (p projPowInt16Int64ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22794,12 +21825,6 @@ type projPowInt16DecimalConstOp struct { } func (p projPowInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22835,9 +21860,9 @@ func (p projPowInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -22856,9 +21881,9 @@ func (p projPowInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -22872,7 +21897,7 @@ func (p projPowInt16DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22881,9 +21906,9 @@ func (p projPowInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -22899,9 +21924,9 @@ func (p projPowInt16DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -22928,12 +21953,6 @@ type projPowInt32Int16ConstOp struct { } func (p projPowInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22968,10 +21987,10 @@ func (p projPowInt32Int16ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -22993,10 +22012,10 @@ func (p projPowInt32Int16ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23014,7 +22033,7 @@ func (p projPowInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23022,10 +22041,10 @@ func (p projPowInt32Int16ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23044,10 +22063,10 @@ func (p projPowInt32Int16ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23078,12 +22097,6 @@ type projPowInt32Int32ConstOp struct { } func (p projPowInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23118,10 +22131,10 @@ func (p projPowInt32Int32ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23143,10 +22156,10 @@ func (p projPowInt32Int32ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23164,7 +22177,7 @@ func (p projPowInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23172,10 +22185,10 @@ func (p projPowInt32Int32ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23194,10 +22207,10 @@ func (p projPowInt32Int32ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23228,12 +22241,6 @@ type projPowInt32Int64ConstOp struct { } func (p projPowInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23268,10 +22275,10 @@ func (p projPowInt32Int64ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23293,10 +22300,10 @@ func (p projPowInt32Int64ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23314,7 +22321,7 @@ func (p projPowInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23322,10 +22329,10 @@ func (p projPowInt32Int64ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23344,10 +22351,10 @@ func (p projPowInt32Int64ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23378,12 +22385,6 @@ type projPowInt32DecimalConstOp struct { } func (p projPowInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23419,9 +22420,9 @@ func (p projPowInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -23440,9 +22441,9 @@ func (p projPowInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -23456,7 +22457,7 @@ func (p projPowInt32DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23465,9 +22466,9 @@ func (p projPowInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -23483,9 +22484,9 @@ func (p projPowInt32DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -23512,12 +22513,6 @@ type projPowInt64Int16ConstOp struct { } func (p projPowInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23552,10 +22547,10 @@ func (p projPowInt64Int16ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23577,10 +22572,10 @@ func (p projPowInt64Int16ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23598,7 +22593,7 @@ func (p projPowInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23606,10 +22601,10 @@ func (p projPowInt64Int16ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23628,10 +22623,10 @@ func (p projPowInt64Int16ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23662,12 +22657,6 @@ type projPowInt64Int32ConstOp struct { } func (p projPowInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23702,10 +22691,10 @@ func (p projPowInt64Int32ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23727,10 +22716,10 @@ func (p projPowInt64Int32ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23748,7 +22737,7 @@ func (p projPowInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23756,10 +22745,10 @@ func (p projPowInt64Int32ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23778,10 +22767,10 @@ func (p projPowInt64Int32ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23812,12 +22801,6 @@ type projPowInt64Int64ConstOp struct { } func (p projPowInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23852,10 +22835,10 @@ func (p projPowInt64Int64ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23877,10 +22860,10 @@ func (p projPowInt64Int64ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23898,7 +22881,7 @@ func (p projPowInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23906,10 +22889,10 @@ func (p projPowInt64Int64ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23928,10 +22911,10 @@ func (p projPowInt64Int64ConstOp) Next() coldata.Batch { arg := col.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg))) rightTmpDec.SetInt64(int64(int64(p.constArg))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23962,12 +22945,6 @@ type projPowInt64DecimalConstOp struct { } func (p projPowInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24003,9 +22980,9 @@ func (p projPowInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -24024,9 +23001,9 @@ func (p projPowInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -24040,7 +23017,7 @@ func (p projPowInt64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24049,9 +23026,9 @@ func (p projPowInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -24067,9 +23044,9 @@ func (p projPowInt64DecimalConstOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &p.constArg) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &p.constArg) if err != nil { colexecerror.ExpectedError(err) } @@ -24096,12 +23073,6 @@ type projPowFloat64Float64ConstOp struct { } func (p projPowFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24164,7 +23135,7 @@ func (p projPowFloat64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24210,12 +23181,6 @@ type projConcatBytesBytesConstOp struct { } func (p projConcatBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24281,7 +23246,7 @@ func (p projConcatBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24330,12 +23295,6 @@ type projConcatJSONJSONConstOp struct { } func (p projConcatJSONJSONConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24399,7 +23358,7 @@ func (p projConcatJSONJSONConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24442,16 +23401,15 @@ func (p projConcatJSONJSONConstOp) Next() coldata.Batch { type projConcatDatumDatumConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg interface{} } func (p projConcatDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24521,7 +23479,7 @@ func (p projConcatDatumDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24574,12 +23532,6 @@ type projLShiftInt16Int16ConstOp struct { } func (p projLShiftInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24648,7 +23600,7 @@ func (p projLShiftInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24700,12 +23652,6 @@ type projLShiftInt16Int32ConstOp struct { } func (p projLShiftInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24774,7 +23720,7 @@ func (p projLShiftInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24826,12 +23772,6 @@ type projLShiftInt16Int64ConstOp struct { } func (p projLShiftInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24900,7 +23840,7 @@ func (p projLShiftInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24952,12 +23892,6 @@ type projLShiftInt32Int16ConstOp struct { } func (p projLShiftInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25026,7 +23960,7 @@ func (p projLShiftInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25078,12 +24012,6 @@ type projLShiftInt32Int32ConstOp struct { } func (p projLShiftInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25152,7 +24080,7 @@ func (p projLShiftInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25204,12 +24132,6 @@ type projLShiftInt32Int64ConstOp struct { } func (p projLShiftInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25278,7 +24200,7 @@ func (p projLShiftInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25330,12 +24252,6 @@ type projLShiftInt64Int16ConstOp struct { } func (p projLShiftInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25404,7 +24320,7 @@ func (p projLShiftInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25456,12 +24372,6 @@ type projLShiftInt64Int32ConstOp struct { } func (p projLShiftInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25530,7 +24440,7 @@ func (p projLShiftInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25582,12 +24492,6 @@ type projLShiftInt64Int64ConstOp struct { } func (p projLShiftInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25656,7 +24560,7 @@ func (p projLShiftInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25704,16 +24608,15 @@ func (p projLShiftInt64Int64ConstOp) Next() coldata.Batch { type projLShiftDatumInt16ConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int16 } func (p projLShiftDatumInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25792,7 +24695,7 @@ func (p projLShiftDatumInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25850,16 +24753,15 @@ func (p projLShiftDatumInt16ConstOp) Next() coldata.Batch { type projLShiftDatumInt32ConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int32 } func (p projLShiftDatumInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25938,7 +24840,7 @@ func (p projLShiftDatumInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25996,16 +24898,15 @@ func (p projLShiftDatumInt32ConstOp) Next() coldata.Batch { type projLShiftDatumInt64ConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int64 } func (p projLShiftDatumInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26084,7 +24985,7 @@ func (p projLShiftDatumInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26146,12 +25047,6 @@ type projRShiftInt16Int16ConstOp struct { } func (p projRShiftInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26220,7 +25115,7 @@ func (p projRShiftInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26272,12 +25167,6 @@ type projRShiftInt16Int32ConstOp struct { } func (p projRShiftInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26346,7 +25235,7 @@ func (p projRShiftInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26398,12 +25287,6 @@ type projRShiftInt16Int64ConstOp struct { } func (p projRShiftInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26472,7 +25355,7 @@ func (p projRShiftInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26524,12 +25407,6 @@ type projRShiftInt32Int16ConstOp struct { } func (p projRShiftInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26598,7 +25475,7 @@ func (p projRShiftInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26650,12 +25527,6 @@ type projRShiftInt32Int32ConstOp struct { } func (p projRShiftInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26724,7 +25595,7 @@ func (p projRShiftInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26776,12 +25647,6 @@ type projRShiftInt32Int64ConstOp struct { } func (p projRShiftInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26850,7 +25715,7 @@ func (p projRShiftInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26902,12 +25767,6 @@ type projRShiftInt64Int16ConstOp struct { } func (p projRShiftInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26976,7 +25835,7 @@ func (p projRShiftInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27028,12 +25887,6 @@ type projRShiftInt64Int32ConstOp struct { } func (p projRShiftInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27102,7 +25955,7 @@ func (p projRShiftInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27154,12 +26007,6 @@ type projRShiftInt64Int64ConstOp struct { } func (p projRShiftInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27228,7 +26075,7 @@ func (p projRShiftInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27276,16 +26123,15 @@ func (p projRShiftInt64Int64ConstOp) Next() coldata.Batch { type projRShiftDatumInt16ConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int16 } func (p projRShiftDatumInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27364,7 +26210,7 @@ func (p projRShiftDatumInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27422,16 +26268,15 @@ func (p projRShiftDatumInt16ConstOp) Next() coldata.Batch { type projRShiftDatumInt32ConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int32 } func (p projRShiftDatumInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27510,7 +26355,7 @@ func (p projRShiftDatumInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27568,16 +26413,15 @@ func (p projRShiftDatumInt32ConstOp) Next() coldata.Batch { type projRShiftDatumInt64ConstOp struct { projConstOpBase + execgen.BinaryOverloadHelper constArg int64 } func (p projRShiftDatumInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27656,7 +26500,7 @@ func (p projRShiftDatumInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27718,12 +26562,6 @@ type projJSONFetchValJSONBytesConstOp struct { } func (p projJSONFetchValJSONBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27799,7 +26637,7 @@ func (p projJSONFetchValJSONBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27858,12 +26696,6 @@ type projJSONFetchValJSONInt16ConstOp struct { } func (p projJSONFetchValJSONInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27934,7 +26766,7 @@ func (p projJSONFetchValJSONInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27988,12 +26820,6 @@ type projJSONFetchValJSONInt32ConstOp struct { } func (p projJSONFetchValJSONInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28064,7 +26890,7 @@ func (p projJSONFetchValJSONInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28118,12 +26944,6 @@ type projJSONFetchValJSONInt64ConstOp struct { } func (p projJSONFetchValJSONInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28194,7 +27014,7 @@ func (p projJSONFetchValJSONInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28248,12 +27068,6 @@ type projJSONFetchTextJSONBytesConstOp struct { } func (p projJSONFetchTextJSONBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28347,7 +27161,7 @@ func (p projJSONFetchTextJSONBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28424,12 +27238,6 @@ type projJSONFetchTextJSONInt16ConstOp struct { } func (p projJSONFetchTextJSONInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28518,7 +27326,7 @@ func (p projJSONFetchTextJSONInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28590,12 +27398,6 @@ type projJSONFetchTextJSONInt32ConstOp struct { } func (p projJSONFetchTextJSONInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28684,7 +27486,7 @@ func (p projJSONFetchTextJSONInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28756,12 +27558,6 @@ type projJSONFetchTextJSONInt64ConstOp struct { } func (p projJSONFetchTextJSONInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28850,7 +27646,7 @@ func (p projJSONFetchTextJSONInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28922,12 +27718,6 @@ type projJSONFetchValPathJSONDatumConstOp struct { } func (p projJSONFetchValPathJSONDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28997,7 +27787,7 @@ func (p projJSONFetchValPathJSONDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29050,12 +27840,6 @@ type projJSONFetchTextPathJSONDatumConstOp struct { } func (p projJSONFetchTextPathJSONDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29145,7 +27929,7 @@ func (p projJSONFetchTextPathJSONDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29218,12 +28002,6 @@ type projEQBoolBoolConstOp struct { } func (p projEQBoolBoolConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29304,7 +28082,7 @@ func (p projEQBoolBoolConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29368,12 +28146,6 @@ type projEQBytesBytesConstOp struct { } func (p projEQBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29437,7 +28209,7 @@ func (p projEQBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29484,12 +28256,6 @@ type projEQDecimalInt16ConstOp struct { } func (p projEQDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29527,9 +28293,9 @@ func (p projEQDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29550,9 +28316,9 @@ func (p projEQDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29566,7 +28332,7 @@ func (p projEQDecimalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29577,9 +28343,9 @@ func (p projEQDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29597,9 +28363,9 @@ func (p projEQDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29626,12 +28392,6 @@ type projEQDecimalInt32ConstOp struct { } func (p projEQDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29669,9 +28429,9 @@ func (p projEQDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29692,9 +28452,9 @@ func (p projEQDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29708,7 +28468,7 @@ func (p projEQDecimalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29719,9 +28479,9 @@ func (p projEQDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29739,9 +28499,9 @@ func (p projEQDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29768,12 +28528,6 @@ type projEQDecimalInt64ConstOp struct { } func (p projEQDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29811,9 +28565,9 @@ func (p projEQDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29834,9 +28588,9 @@ func (p projEQDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29850,7 +28604,7 @@ func (p projEQDecimalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29861,9 +28615,9 @@ func (p projEQDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29881,9 +28635,9 @@ func (p projEQDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29910,12 +28664,6 @@ type projEQDecimalFloat64ConstOp struct { } func (p projEQDecimalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29953,11 +28701,11 @@ func (p projEQDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29978,11 +28726,11 @@ func (p projEQDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -29996,7 +28744,7 @@ func (p projEQDecimalFloat64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -30007,11 +28755,11 @@ func (p projEQDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -30029,11 +28777,11 @@ func (p projEQDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult == 0 @@ -30060,12 +28808,6 @@ type projEQDecimalDecimalConstOp struct { } func (p projEQDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -30130,7 +28872,7 @@ func (p projEQDecimalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -30178,12 +28920,6 @@ type projEQInt16Int16ConstOp struct { } func (p projEQInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -30270,7 +29006,7 @@ func (p projEQInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -30340,12 +29076,6 @@ type projEQInt16Int32ConstOp struct { } func (p projEQInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -30432,7 +29162,7 @@ func (p projEQInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -30502,12 +29232,6 @@ type projEQInt16Int64ConstOp struct { } func (p projEQInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -30594,7 +29318,7 @@ func (p projEQInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -30664,12 +29388,6 @@ type projEQInt16Float64ConstOp struct { } func (p projEQInt16Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -30772,7 +29490,7 @@ func (p projEQInt16Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -30858,12 +29576,6 @@ type projEQInt16DecimalConstOp struct { } func (p projEQInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -30901,9 +29613,9 @@ func (p projEQInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -30924,9 +29636,9 @@ func (p projEQInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -30940,7 +29652,7 @@ func (p projEQInt16DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -30951,9 +29663,9 @@ func (p projEQInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -30971,9 +29683,9 @@ func (p projEQInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -31000,12 +29712,6 @@ type projEQInt32Int16ConstOp struct { } func (p projEQInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -31092,7 +29798,7 @@ func (p projEQInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -31162,12 +29868,6 @@ type projEQInt32Int32ConstOp struct { } func (p projEQInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -31254,7 +29954,7 @@ func (p projEQInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -31324,12 +30024,6 @@ type projEQInt32Int64ConstOp struct { } func (p projEQInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -31416,7 +30110,7 @@ func (p projEQInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -31486,12 +30180,6 @@ type projEQInt32Float64ConstOp struct { } func (p projEQInt32Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -31594,7 +30282,7 @@ func (p projEQInt32Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -31680,12 +30368,6 @@ type projEQInt32DecimalConstOp struct { } func (p projEQInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -31723,9 +30405,9 @@ func (p projEQInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -31746,9 +30428,9 @@ func (p projEQInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -31762,7 +30444,7 @@ func (p projEQInt32DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -31773,9 +30455,9 @@ func (p projEQInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -31793,9 +30475,9 @@ func (p projEQInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -31822,12 +30504,6 @@ type projEQInt64Int16ConstOp struct { } func (p projEQInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -31914,7 +30590,7 @@ func (p projEQInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -31984,12 +30660,6 @@ type projEQInt64Int32ConstOp struct { } func (p projEQInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -32076,7 +30746,7 @@ func (p projEQInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -32146,12 +30816,6 @@ type projEQInt64Int64ConstOp struct { } func (p projEQInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -32238,7 +30902,7 @@ func (p projEQInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -32308,12 +30972,6 @@ type projEQInt64Float64ConstOp struct { } func (p projEQInt64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -32416,7 +31074,7 @@ func (p projEQInt64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -32502,12 +31160,6 @@ type projEQInt64DecimalConstOp struct { } func (p projEQInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -32545,9 +31197,9 @@ func (p projEQInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -32568,9 +31220,9 @@ func (p projEQInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -32584,7 +31236,7 @@ func (p projEQInt64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -32595,9 +31247,9 @@ func (p projEQInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -32615,9 +31267,9 @@ func (p projEQInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -32644,12 +31296,6 @@ type projEQFloat64Int16ConstOp struct { } func (p projEQFloat64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -32752,7 +31398,7 @@ func (p projEQFloat64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -32838,12 +31484,6 @@ type projEQFloat64Int32ConstOp struct { } func (p projEQFloat64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -32946,7 +31586,7 @@ func (p projEQFloat64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -33032,12 +31672,6 @@ type projEQFloat64Int64ConstOp struct { } func (p projEQFloat64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -33140,7 +31774,7 @@ func (p projEQFloat64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -33226,12 +31860,6 @@ type projEQFloat64Float64ConstOp struct { } func (p projEQFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -33334,7 +31962,7 @@ func (p projEQFloat64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -33420,12 +32048,6 @@ type projEQFloat64DecimalConstOp struct { } func (p projEQFloat64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -33463,11 +32085,11 @@ func (p projEQFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -33488,11 +32110,11 @@ func (p projEQFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -33506,7 +32128,7 @@ func (p projEQFloat64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -33517,11 +32139,11 @@ func (p projEQFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -33539,11 +32161,11 @@ func (p projEQFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult == 0 @@ -33570,12 +32192,6 @@ type projEQTimestampTimestampConstOp struct { } func (p projEQTimestampTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -33654,7 +32270,7 @@ func (p projEQTimestampTimestampConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -33716,12 +32332,6 @@ type projEQIntervalIntervalConstOp struct { } func (p projEQIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -33786,7 +32396,7 @@ func (p projEQIntervalIntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -33834,12 +32444,6 @@ type projEQJSONJSONConstOp struct { } func (p projEQJSONJSONConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -33915,7 +32519,7 @@ func (p projEQJSONJSONConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -33974,12 +32578,6 @@ type projEQDatumDatumConstOp struct { } func (p projEQDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -34047,7 +32645,7 @@ func (p projEQDatumDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -34098,12 +32696,6 @@ type projNEBoolBoolConstOp struct { } func (p projNEBoolBoolConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -34184,7 +32776,7 @@ func (p projNEBoolBoolConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -34248,12 +32840,6 @@ type projNEBytesBytesConstOp struct { } func (p projNEBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -34317,7 +32903,7 @@ func (p projNEBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -34364,12 +32950,6 @@ type projNEDecimalInt16ConstOp struct { } func (p projNEDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -34407,9 +32987,9 @@ func (p projNEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34430,9 +33010,9 @@ func (p projNEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34446,7 +33026,7 @@ func (p projNEDecimalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -34457,9 +33037,9 @@ func (p projNEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34477,9 +33057,9 @@ func (p projNEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34506,12 +33086,6 @@ type projNEDecimalInt32ConstOp struct { } func (p projNEDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -34549,9 +33123,9 @@ func (p projNEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34572,9 +33146,9 @@ func (p projNEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34588,7 +33162,7 @@ func (p projNEDecimalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -34599,9 +33173,9 @@ func (p projNEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34619,9 +33193,9 @@ func (p projNEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34648,12 +33222,6 @@ type projNEDecimalInt64ConstOp struct { } func (p projNEDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -34691,9 +33259,9 @@ func (p projNEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34714,9 +33282,9 @@ func (p projNEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34730,7 +33298,7 @@ func (p projNEDecimalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -34741,9 +33309,9 @@ func (p projNEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34761,9 +33329,9 @@ func (p projNEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34790,12 +33358,6 @@ type projNEDecimalFloat64ConstOp struct { } func (p projNEDecimalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -34833,11 +33395,11 @@ func (p projNEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34858,11 +33420,11 @@ func (p projNEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34876,7 +33438,7 @@ func (p projNEDecimalFloat64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -34887,11 +33449,11 @@ func (p projNEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34909,11 +33471,11 @@ func (p projNEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult != 0 @@ -34940,12 +33502,6 @@ type projNEDecimalDecimalConstOp struct { } func (p projNEDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -35010,7 +33566,7 @@ func (p projNEDecimalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -35058,12 +33614,6 @@ type projNEInt16Int16ConstOp struct { } func (p projNEInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -35150,7 +33700,7 @@ func (p projNEInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -35220,12 +33770,6 @@ type projNEInt16Int32ConstOp struct { } func (p projNEInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -35312,7 +33856,7 @@ func (p projNEInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -35382,12 +33926,6 @@ type projNEInt16Int64ConstOp struct { } func (p projNEInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -35474,7 +34012,7 @@ func (p projNEInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -35544,12 +34082,6 @@ type projNEInt16Float64ConstOp struct { } func (p projNEInt16Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -35652,7 +34184,7 @@ func (p projNEInt16Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -35738,12 +34270,6 @@ type projNEInt16DecimalConstOp struct { } func (p projNEInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -35781,9 +34307,9 @@ func (p projNEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -35804,9 +34330,9 @@ func (p projNEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -35820,7 +34346,7 @@ func (p projNEInt16DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -35831,9 +34357,9 @@ func (p projNEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -35851,9 +34377,9 @@ func (p projNEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -35880,12 +34406,6 @@ type projNEInt32Int16ConstOp struct { } func (p projNEInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -35972,7 +34492,7 @@ func (p projNEInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -36042,12 +34562,6 @@ type projNEInt32Int32ConstOp struct { } func (p projNEInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -36134,7 +34648,7 @@ func (p projNEInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -36204,12 +34718,6 @@ type projNEInt32Int64ConstOp struct { } func (p projNEInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -36296,7 +34804,7 @@ func (p projNEInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -36366,12 +34874,6 @@ type projNEInt32Float64ConstOp struct { } func (p projNEInt32Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -36474,7 +34976,7 @@ func (p projNEInt32Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -36560,12 +35062,6 @@ type projNEInt32DecimalConstOp struct { } func (p projNEInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -36603,9 +35099,9 @@ func (p projNEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -36626,9 +35122,9 @@ func (p projNEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -36642,7 +35138,7 @@ func (p projNEInt32DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -36653,9 +35149,9 @@ func (p projNEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -36673,9 +35169,9 @@ func (p projNEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -36702,12 +35198,6 @@ type projNEInt64Int16ConstOp struct { } func (p projNEInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -36794,7 +35284,7 @@ func (p projNEInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -36864,12 +35354,6 @@ type projNEInt64Int32ConstOp struct { } func (p projNEInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -36956,7 +35440,7 @@ func (p projNEInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -37026,12 +35510,6 @@ type projNEInt64Int64ConstOp struct { } func (p projNEInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -37118,7 +35596,7 @@ func (p projNEInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -37188,12 +35666,6 @@ type projNEInt64Float64ConstOp struct { } func (p projNEInt64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -37296,7 +35768,7 @@ func (p projNEInt64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -37382,12 +35854,6 @@ type projNEInt64DecimalConstOp struct { } func (p projNEInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -37425,9 +35891,9 @@ func (p projNEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -37448,9 +35914,9 @@ func (p projNEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -37464,7 +35930,7 @@ func (p projNEInt64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -37475,9 +35941,9 @@ func (p projNEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -37495,9 +35961,9 @@ func (p projNEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -37524,12 +35990,6 @@ type projNEFloat64Int16ConstOp struct { } func (p projNEFloat64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -37632,7 +36092,7 @@ func (p projNEFloat64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -37718,12 +36178,6 @@ type projNEFloat64Int32ConstOp struct { } func (p projNEFloat64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -37826,7 +36280,7 @@ func (p projNEFloat64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -37912,12 +36366,6 @@ type projNEFloat64Int64ConstOp struct { } func (p projNEFloat64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -38020,7 +36468,7 @@ func (p projNEFloat64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -38106,12 +36554,6 @@ type projNEFloat64Float64ConstOp struct { } func (p projNEFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -38214,7 +36656,7 @@ func (p projNEFloat64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -38300,12 +36742,6 @@ type projNEFloat64DecimalConstOp struct { } func (p projNEFloat64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -38343,11 +36779,11 @@ func (p projNEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -38368,11 +36804,11 @@ func (p projNEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -38386,7 +36822,7 @@ func (p projNEFloat64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -38397,11 +36833,11 @@ func (p projNEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -38419,11 +36855,11 @@ func (p projNEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult != 0 @@ -38450,12 +36886,6 @@ type projNETimestampTimestampConstOp struct { } func (p projNETimestampTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -38534,7 +36964,7 @@ func (p projNETimestampTimestampConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -38596,12 +37026,6 @@ type projNEIntervalIntervalConstOp struct { } func (p projNEIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -38666,7 +37090,7 @@ func (p projNEIntervalIntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -38714,12 +37138,6 @@ type projNEJSONJSONConstOp struct { } func (p projNEJSONJSONConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -38795,7 +37213,7 @@ func (p projNEJSONJSONConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -38854,12 +37272,6 @@ type projNEDatumDatumConstOp struct { } func (p projNEDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -38927,7 +37339,7 @@ func (p projNEDatumDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -38978,12 +37390,6 @@ type projLTBoolBoolConstOp struct { } func (p projLTBoolBoolConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -39064,7 +37470,7 @@ func (p projLTBoolBoolConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -39128,12 +37534,6 @@ type projLTBytesBytesConstOp struct { } func (p projLTBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -39197,7 +37597,7 @@ func (p projLTBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -39244,12 +37644,6 @@ type projLTDecimalInt16ConstOp struct { } func (p projLTDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -39287,9 +37681,9 @@ func (p projLTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39310,9 +37704,9 @@ func (p projLTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39326,7 +37720,7 @@ func (p projLTDecimalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -39337,9 +37731,9 @@ func (p projLTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39357,9 +37751,9 @@ func (p projLTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39386,12 +37780,6 @@ type projLTDecimalInt32ConstOp struct { } func (p projLTDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -39429,9 +37817,9 @@ func (p projLTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39452,9 +37840,9 @@ func (p projLTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39468,7 +37856,7 @@ func (p projLTDecimalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -39479,9 +37867,9 @@ func (p projLTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39499,9 +37887,9 @@ func (p projLTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39528,12 +37916,6 @@ type projLTDecimalInt64ConstOp struct { } func (p projLTDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -39571,9 +37953,9 @@ func (p projLTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39594,9 +37976,9 @@ func (p projLTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39610,7 +37992,7 @@ func (p projLTDecimalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -39621,9 +38003,9 @@ func (p projLTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39641,9 +38023,9 @@ func (p projLTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39670,12 +38052,6 @@ type projLTDecimalFloat64ConstOp struct { } func (p projLTDecimalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -39713,11 +38089,11 @@ func (p projLTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39738,11 +38114,11 @@ func (p projLTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39756,7 +38132,7 @@ func (p projLTDecimalFloat64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -39767,11 +38143,11 @@ func (p projLTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39789,11 +38165,11 @@ func (p projLTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult < 0 @@ -39820,12 +38196,6 @@ type projLTDecimalDecimalConstOp struct { } func (p projLTDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -39890,7 +38260,7 @@ func (p projLTDecimalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -39938,12 +38308,6 @@ type projLTInt16Int16ConstOp struct { } func (p projLTInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -40030,7 +38394,7 @@ func (p projLTInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -40100,12 +38464,6 @@ type projLTInt16Int32ConstOp struct { } func (p projLTInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -40192,7 +38550,7 @@ func (p projLTInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -40262,12 +38620,6 @@ type projLTInt16Int64ConstOp struct { } func (p projLTInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -40354,7 +38706,7 @@ func (p projLTInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -40424,12 +38776,6 @@ type projLTInt16Float64ConstOp struct { } func (p projLTInt16Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -40532,7 +38878,7 @@ func (p projLTInt16Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -40618,12 +38964,6 @@ type projLTInt16DecimalConstOp struct { } func (p projLTInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -40661,9 +39001,9 @@ func (p projLTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -40684,9 +39024,9 @@ func (p projLTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -40700,7 +39040,7 @@ func (p projLTInt16DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -40711,9 +39051,9 @@ func (p projLTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -40731,9 +39071,9 @@ func (p projLTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -40760,12 +39100,6 @@ type projLTInt32Int16ConstOp struct { } func (p projLTInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -40852,7 +39186,7 @@ func (p projLTInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -40922,12 +39256,6 @@ type projLTInt32Int32ConstOp struct { } func (p projLTInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41014,7 +39342,7 @@ func (p projLTInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -41084,12 +39412,6 @@ type projLTInt32Int64ConstOp struct { } func (p projLTInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41176,7 +39498,7 @@ func (p projLTInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -41246,12 +39568,6 @@ type projLTInt32Float64ConstOp struct { } func (p projLTInt32Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41354,7 +39670,7 @@ func (p projLTInt32Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -41440,12 +39756,6 @@ type projLTInt32DecimalConstOp struct { } func (p projLTInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41483,9 +39793,9 @@ func (p projLTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -41506,9 +39816,9 @@ func (p projLTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -41522,7 +39832,7 @@ func (p projLTInt32DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -41533,9 +39843,9 @@ func (p projLTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -41553,9 +39863,9 @@ func (p projLTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -41582,12 +39892,6 @@ type projLTInt64Int16ConstOp struct { } func (p projLTInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41674,7 +39978,7 @@ func (p projLTInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -41744,12 +40048,6 @@ type projLTInt64Int32ConstOp struct { } func (p projLTInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41836,7 +40134,7 @@ func (p projLTInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -41906,12 +40204,6 @@ type projLTInt64Int64ConstOp struct { } func (p projLTInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41998,7 +40290,7 @@ func (p projLTInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -42068,12 +40360,6 @@ type projLTInt64Float64ConstOp struct { } func (p projLTInt64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -42176,7 +40462,7 @@ func (p projLTInt64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -42262,12 +40548,6 @@ type projLTInt64DecimalConstOp struct { } func (p projLTInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -42305,9 +40585,9 @@ func (p projLTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -42328,9 +40608,9 @@ func (p projLTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -42344,7 +40624,7 @@ func (p projLTInt64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -42355,9 +40635,9 @@ func (p projLTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -42375,9 +40655,9 @@ func (p projLTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -42404,12 +40684,6 @@ type projLTFloat64Int16ConstOp struct { } func (p projLTFloat64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -42512,7 +40786,7 @@ func (p projLTFloat64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -42598,12 +40872,6 @@ type projLTFloat64Int32ConstOp struct { } func (p projLTFloat64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -42706,7 +40974,7 @@ func (p projLTFloat64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -42792,12 +41060,6 @@ type projLTFloat64Int64ConstOp struct { } func (p projLTFloat64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -42900,7 +41162,7 @@ func (p projLTFloat64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -42986,12 +41248,6 @@ type projLTFloat64Float64ConstOp struct { } func (p projLTFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -43094,7 +41350,7 @@ func (p projLTFloat64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -43180,12 +41436,6 @@ type projLTFloat64DecimalConstOp struct { } func (p projLTFloat64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -43223,11 +41473,11 @@ func (p projLTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -43248,11 +41498,11 @@ func (p projLTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -43266,7 +41516,7 @@ func (p projLTFloat64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -43277,11 +41527,11 @@ func (p projLTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -43299,11 +41549,11 @@ func (p projLTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult < 0 @@ -43330,12 +41580,6 @@ type projLTTimestampTimestampConstOp struct { } func (p projLTTimestampTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -43414,7 +41658,7 @@ func (p projLTTimestampTimestampConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -43476,12 +41720,6 @@ type projLTIntervalIntervalConstOp struct { } func (p projLTIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -43546,7 +41784,7 @@ func (p projLTIntervalIntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -43594,12 +41832,6 @@ type projLTJSONJSONConstOp struct { } func (p projLTJSONJSONConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -43675,7 +41907,7 @@ func (p projLTJSONJSONConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -43734,12 +41966,6 @@ type projLTDatumDatumConstOp struct { } func (p projLTDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -43807,7 +42033,7 @@ func (p projLTDatumDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -43858,12 +42084,6 @@ type projLEBoolBoolConstOp struct { } func (p projLEBoolBoolConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -43944,7 +42164,7 @@ func (p projLEBoolBoolConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -44008,12 +42228,6 @@ type projLEBytesBytesConstOp struct { } func (p projLEBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -44077,7 +42291,7 @@ func (p projLEBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -44124,12 +42338,6 @@ type projLEDecimalInt16ConstOp struct { } func (p projLEDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -44167,9 +42375,9 @@ func (p projLEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44190,9 +42398,9 @@ func (p projLEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44206,7 +42414,7 @@ func (p projLEDecimalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -44217,9 +42425,9 @@ func (p projLEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44237,9 +42445,9 @@ func (p projLEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44266,12 +42474,6 @@ type projLEDecimalInt32ConstOp struct { } func (p projLEDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -44309,9 +42511,9 @@ func (p projLEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44332,9 +42534,9 @@ func (p projLEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44348,7 +42550,7 @@ func (p projLEDecimalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -44359,9 +42561,9 @@ func (p projLEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44379,9 +42581,9 @@ func (p projLEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44408,12 +42610,6 @@ type projLEDecimalInt64ConstOp struct { } func (p projLEDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -44451,9 +42647,9 @@ func (p projLEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44474,9 +42670,9 @@ func (p projLEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44490,7 +42686,7 @@ func (p projLEDecimalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -44501,9 +42697,9 @@ func (p projLEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44521,9 +42717,9 @@ func (p projLEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44550,12 +42746,6 @@ type projLEDecimalFloat64ConstOp struct { } func (p projLEDecimalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -44593,11 +42783,11 @@ func (p projLEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44618,11 +42808,11 @@ func (p projLEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44636,7 +42826,7 @@ func (p projLEDecimalFloat64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -44647,11 +42837,11 @@ func (p projLEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44669,11 +42859,11 @@ func (p projLEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -44700,12 +42890,6 @@ type projLEDecimalDecimalConstOp struct { } func (p projLEDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -44770,7 +42954,7 @@ func (p projLEDecimalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -44818,12 +43002,6 @@ type projLEInt16Int16ConstOp struct { } func (p projLEInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -44910,7 +43088,7 @@ func (p projLEInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -44980,12 +43158,6 @@ type projLEInt16Int32ConstOp struct { } func (p projLEInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -45072,7 +43244,7 @@ func (p projLEInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -45142,12 +43314,6 @@ type projLEInt16Int64ConstOp struct { } func (p projLEInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -45234,7 +43400,7 @@ func (p projLEInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -45304,12 +43470,6 @@ type projLEInt16Float64ConstOp struct { } func (p projLEInt16Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -45412,7 +43572,7 @@ func (p projLEInt16Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -45498,12 +43658,6 @@ type projLEInt16DecimalConstOp struct { } func (p projLEInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -45541,9 +43695,9 @@ func (p projLEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -45564,9 +43718,9 @@ func (p projLEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -45580,7 +43734,7 @@ func (p projLEInt16DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -45591,9 +43745,9 @@ func (p projLEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -45611,9 +43765,9 @@ func (p projLEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -45640,12 +43794,6 @@ type projLEInt32Int16ConstOp struct { } func (p projLEInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -45732,7 +43880,7 @@ func (p projLEInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -45802,12 +43950,6 @@ type projLEInt32Int32ConstOp struct { } func (p projLEInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -45894,7 +44036,7 @@ func (p projLEInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -45964,12 +44106,6 @@ type projLEInt32Int64ConstOp struct { } func (p projLEInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46056,7 +44192,7 @@ func (p projLEInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -46126,12 +44262,6 @@ type projLEInt32Float64ConstOp struct { } func (p projLEInt32Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46234,7 +44364,7 @@ func (p projLEInt32Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -46320,12 +44450,6 @@ type projLEInt32DecimalConstOp struct { } func (p projLEInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46363,9 +44487,9 @@ func (p projLEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -46386,9 +44510,9 @@ func (p projLEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -46402,7 +44526,7 @@ func (p projLEInt32DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -46413,9 +44537,9 @@ func (p projLEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -46433,9 +44557,9 @@ func (p projLEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -46462,12 +44586,6 @@ type projLEInt64Int16ConstOp struct { } func (p projLEInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46554,7 +44672,7 @@ func (p projLEInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -46624,12 +44742,6 @@ type projLEInt64Int32ConstOp struct { } func (p projLEInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46716,7 +44828,7 @@ func (p projLEInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -46786,12 +44898,6 @@ type projLEInt64Int64ConstOp struct { } func (p projLEInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46878,7 +44984,7 @@ func (p projLEInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -46948,12 +45054,6 @@ type projLEInt64Float64ConstOp struct { } func (p projLEInt64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -47056,7 +45156,7 @@ func (p projLEInt64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -47142,12 +45242,6 @@ type projLEInt64DecimalConstOp struct { } func (p projLEInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -47185,9 +45279,9 @@ func (p projLEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -47208,9 +45302,9 @@ func (p projLEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -47224,7 +45318,7 @@ func (p projLEInt64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -47235,9 +45329,9 @@ func (p projLEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -47255,9 +45349,9 @@ func (p projLEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -47284,12 +45378,6 @@ type projLEFloat64Int16ConstOp struct { } func (p projLEFloat64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -47392,7 +45480,7 @@ func (p projLEFloat64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -47478,12 +45566,6 @@ type projLEFloat64Int32ConstOp struct { } func (p projLEFloat64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -47586,7 +45668,7 @@ func (p projLEFloat64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -47672,12 +45754,6 @@ type projLEFloat64Int64ConstOp struct { } func (p projLEFloat64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -47780,7 +45856,7 @@ func (p projLEFloat64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -47866,12 +45942,6 @@ type projLEFloat64Float64ConstOp struct { } func (p projLEFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -47974,7 +46044,7 @@ func (p projLEFloat64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -48060,12 +46130,6 @@ type projLEFloat64DecimalConstOp struct { } func (p projLEFloat64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -48103,11 +46167,11 @@ func (p projLEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -48128,11 +46192,11 @@ func (p projLEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -48146,7 +46210,7 @@ func (p projLEFloat64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -48157,11 +46221,11 @@ func (p projLEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -48179,11 +46243,11 @@ func (p projLEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult <= 0 @@ -48210,12 +46274,6 @@ type projLETimestampTimestampConstOp struct { } func (p projLETimestampTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -48294,7 +46352,7 @@ func (p projLETimestampTimestampConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -48356,12 +46414,6 @@ type projLEIntervalIntervalConstOp struct { } func (p projLEIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -48426,7 +46478,7 @@ func (p projLEIntervalIntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -48474,12 +46526,6 @@ type projLEJSONJSONConstOp struct { } func (p projLEJSONJSONConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -48555,7 +46601,7 @@ func (p projLEJSONJSONConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -48614,12 +46660,6 @@ type projLEDatumDatumConstOp struct { } func (p projLEDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -48687,7 +46727,7 @@ func (p projLEDatumDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -48738,12 +46778,6 @@ type projGTBoolBoolConstOp struct { } func (p projGTBoolBoolConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -48824,7 +46858,7 @@ func (p projGTBoolBoolConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -48888,12 +46922,6 @@ type projGTBytesBytesConstOp struct { } func (p projGTBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -48957,7 +46985,7 @@ func (p projGTBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -49004,12 +47032,6 @@ type projGTDecimalInt16ConstOp struct { } func (p projGTDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -49047,9 +47069,9 @@ func (p projGTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49070,9 +47092,9 @@ func (p projGTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49086,7 +47108,7 @@ func (p projGTDecimalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -49097,9 +47119,9 @@ func (p projGTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49117,9 +47139,9 @@ func (p projGTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49146,12 +47168,6 @@ type projGTDecimalInt32ConstOp struct { } func (p projGTDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -49189,9 +47205,9 @@ func (p projGTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49212,9 +47228,9 @@ func (p projGTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49228,7 +47244,7 @@ func (p projGTDecimalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -49239,9 +47255,9 @@ func (p projGTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49259,9 +47275,9 @@ func (p projGTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49288,12 +47304,6 @@ type projGTDecimalInt64ConstOp struct { } func (p projGTDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -49331,9 +47341,9 @@ func (p projGTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49354,9 +47364,9 @@ func (p projGTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49370,7 +47380,7 @@ func (p projGTDecimalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -49381,9 +47391,9 @@ func (p projGTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49401,9 +47411,9 @@ func (p projGTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49430,12 +47440,6 @@ type projGTDecimalFloat64ConstOp struct { } func (p projGTDecimalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -49473,11 +47477,11 @@ func (p projGTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49498,11 +47502,11 @@ func (p projGTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49516,7 +47520,7 @@ func (p projGTDecimalFloat64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -49527,11 +47531,11 @@ func (p projGTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49549,11 +47553,11 @@ func (p projGTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult > 0 @@ -49580,12 +47584,6 @@ type projGTDecimalDecimalConstOp struct { } func (p projGTDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -49650,7 +47648,7 @@ func (p projGTDecimalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -49698,12 +47696,6 @@ type projGTInt16Int16ConstOp struct { } func (p projGTInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -49790,7 +47782,7 @@ func (p projGTInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -49860,12 +47852,6 @@ type projGTInt16Int32ConstOp struct { } func (p projGTInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -49952,7 +47938,7 @@ func (p projGTInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -50022,12 +48008,6 @@ type projGTInt16Int64ConstOp struct { } func (p projGTInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -50114,7 +48094,7 @@ func (p projGTInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -50184,12 +48164,6 @@ type projGTInt16Float64ConstOp struct { } func (p projGTInt16Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -50292,7 +48266,7 @@ func (p projGTInt16Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -50378,12 +48352,6 @@ type projGTInt16DecimalConstOp struct { } func (p projGTInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -50421,9 +48389,9 @@ func (p projGTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -50444,9 +48412,9 @@ func (p projGTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -50460,7 +48428,7 @@ func (p projGTInt16DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -50471,9 +48439,9 @@ func (p projGTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -50491,9 +48459,9 @@ func (p projGTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -50520,12 +48488,6 @@ type projGTInt32Int16ConstOp struct { } func (p projGTInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -50612,7 +48574,7 @@ func (p projGTInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -50682,12 +48644,6 @@ type projGTInt32Int32ConstOp struct { } func (p projGTInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -50774,7 +48730,7 @@ func (p projGTInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -50844,12 +48800,6 @@ type projGTInt32Int64ConstOp struct { } func (p projGTInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -50936,7 +48886,7 @@ func (p projGTInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51006,12 +48956,6 @@ type projGTInt32Float64ConstOp struct { } func (p projGTInt32Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -51114,7 +49058,7 @@ func (p projGTInt32Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51200,12 +49144,6 @@ type projGTInt32DecimalConstOp struct { } func (p projGTInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -51243,9 +49181,9 @@ func (p projGTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -51266,9 +49204,9 @@ func (p projGTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -51282,7 +49220,7 @@ func (p projGTInt32DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51293,9 +49231,9 @@ func (p projGTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -51313,9 +49251,9 @@ func (p projGTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -51342,12 +49280,6 @@ type projGTInt64Int16ConstOp struct { } func (p projGTInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -51434,7 +49366,7 @@ func (p projGTInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51504,12 +49436,6 @@ type projGTInt64Int32ConstOp struct { } func (p projGTInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -51596,7 +49522,7 @@ func (p projGTInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51666,12 +49592,6 @@ type projGTInt64Int64ConstOp struct { } func (p projGTInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -51758,7 +49678,7 @@ func (p projGTInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51828,12 +49748,6 @@ type projGTInt64Float64ConstOp struct { } func (p projGTInt64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -51936,7 +49850,7 @@ func (p projGTInt64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -52022,12 +49936,6 @@ type projGTInt64DecimalConstOp struct { } func (p projGTInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -52065,9 +49973,9 @@ func (p projGTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -52088,9 +49996,9 @@ func (p projGTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -52104,7 +50012,7 @@ func (p projGTInt64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -52115,9 +50023,9 @@ func (p projGTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -52135,9 +50043,9 @@ func (p projGTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -52164,12 +50072,6 @@ type projGTFloat64Int16ConstOp struct { } func (p projGTFloat64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -52272,7 +50174,7 @@ func (p projGTFloat64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -52358,12 +50260,6 @@ type projGTFloat64Int32ConstOp struct { } func (p projGTFloat64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -52466,7 +50362,7 @@ func (p projGTFloat64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -52552,12 +50448,6 @@ type projGTFloat64Int64ConstOp struct { } func (p projGTFloat64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -52660,7 +50550,7 @@ func (p projGTFloat64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -52746,12 +50636,6 @@ type projGTFloat64Float64ConstOp struct { } func (p projGTFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -52854,7 +50738,7 @@ func (p projGTFloat64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -52940,12 +50824,6 @@ type projGTFloat64DecimalConstOp struct { } func (p projGTFloat64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -52983,11 +50861,11 @@ func (p projGTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -53008,11 +50886,11 @@ func (p projGTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -53026,7 +50904,7 @@ func (p projGTFloat64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -53037,11 +50915,11 @@ func (p projGTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -53059,11 +50937,11 @@ func (p projGTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult > 0 @@ -53090,12 +50968,6 @@ type projGTTimestampTimestampConstOp struct { } func (p projGTTimestampTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -53174,7 +51046,7 @@ func (p projGTTimestampTimestampConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -53236,12 +51108,6 @@ type projGTIntervalIntervalConstOp struct { } func (p projGTIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -53306,7 +51172,7 @@ func (p projGTIntervalIntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -53354,12 +51220,6 @@ type projGTJSONJSONConstOp struct { } func (p projGTJSONJSONConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -53435,7 +51295,7 @@ func (p projGTJSONJSONConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -53494,12 +51354,6 @@ type projGTDatumDatumConstOp struct { } func (p projGTDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -53567,7 +51421,7 @@ func (p projGTDatumDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -53618,12 +51472,6 @@ type projGEBoolBoolConstOp struct { } func (p projGEBoolBoolConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -53704,7 +51552,7 @@ func (p projGEBoolBoolConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -53768,12 +51616,6 @@ type projGEBytesBytesConstOp struct { } func (p projGEBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -53837,7 +51679,7 @@ func (p projGEBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -53884,12 +51726,6 @@ type projGEDecimalInt16ConstOp struct { } func (p projGEDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -53927,9 +51763,9 @@ func (p projGEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -53950,9 +51786,9 @@ func (p projGEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -53966,7 +51802,7 @@ func (p projGEDecimalInt16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -53977,9 +51813,9 @@ func (p projGEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -53997,9 +51833,9 @@ func (p projGEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -54026,12 +51862,6 @@ type projGEDecimalInt32ConstOp struct { } func (p projGEDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -54069,9 +51899,9 @@ func (p projGEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -54092,9 +51922,9 @@ func (p projGEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -54108,7 +51938,7 @@ func (p projGEDecimalInt32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -54119,9 +51949,9 @@ func (p projGEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -54139,9 +51969,9 @@ func (p projGEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -54168,12 +51998,6 @@ type projGEDecimalInt64ConstOp struct { } func (p projGEDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -54211,9 +52035,9 @@ func (p projGEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -54234,9 +52058,9 @@ func (p projGEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -54250,7 +52074,7 @@ func (p projGEDecimalInt64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -54261,9 +52085,9 @@ func (p projGEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -54281,9 +52105,9 @@ func (p projGEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -54310,12 +52134,6 @@ type projGEDecimalFloat64ConstOp struct { } func (p projGEDecimalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -54353,11 +52171,11 @@ func (p projGEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -54378,11 +52196,11 @@ func (p projGEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -54396,7 +52214,7 @@ func (p projGEDecimalFloat64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -54407,11 +52225,11 @@ func (p projGEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -54429,11 +52247,11 @@ func (p projGEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -54460,12 +52278,6 @@ type projGEDecimalDecimalConstOp struct { } func (p projGEDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -54530,7 +52342,7 @@ func (p projGEDecimalDecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -54578,12 +52390,6 @@ type projGEInt16Int16ConstOp struct { } func (p projGEInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -54670,7 +52476,7 @@ func (p projGEInt16Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -54740,12 +52546,6 @@ type projGEInt16Int32ConstOp struct { } func (p projGEInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -54832,7 +52632,7 @@ func (p projGEInt16Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -54902,12 +52702,6 @@ type projGEInt16Int64ConstOp struct { } func (p projGEInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -54994,7 +52788,7 @@ func (p projGEInt16Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -55064,12 +52858,6 @@ type projGEInt16Float64ConstOp struct { } func (p projGEInt16Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -55172,7 +52960,7 @@ func (p projGEInt16Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -55258,12 +53046,6 @@ type projGEInt16DecimalConstOp struct { } func (p projGEInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -55301,9 +53083,9 @@ func (p projGEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -55324,9 +53106,9 @@ func (p projGEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -55340,7 +53122,7 @@ func (p projGEInt16DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -55351,9 +53133,9 @@ func (p projGEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -55371,9 +53153,9 @@ func (p projGEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -55400,12 +53182,6 @@ type projGEInt32Int16ConstOp struct { } func (p projGEInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -55492,7 +53268,7 @@ func (p projGEInt32Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -55562,12 +53338,6 @@ type projGEInt32Int32ConstOp struct { } func (p projGEInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -55654,7 +53424,7 @@ func (p projGEInt32Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -55724,12 +53494,6 @@ type projGEInt32Int64ConstOp struct { } func (p projGEInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -55816,7 +53580,7 @@ func (p projGEInt32Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -55886,12 +53650,6 @@ type projGEInt32Float64ConstOp struct { } func (p projGEInt32Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -55994,7 +53752,7 @@ func (p projGEInt32Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56080,12 +53838,6 @@ type projGEInt32DecimalConstOp struct { } func (p projGEInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -56123,9 +53875,9 @@ func (p projGEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -56146,9 +53898,9 @@ func (p projGEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -56162,7 +53914,7 @@ func (p projGEInt32DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56173,9 +53925,9 @@ func (p projGEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -56193,9 +53945,9 @@ func (p projGEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -56222,12 +53974,6 @@ type projGEInt64Int16ConstOp struct { } func (p projGEInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -56314,7 +54060,7 @@ func (p projGEInt64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56384,12 +54130,6 @@ type projGEInt64Int32ConstOp struct { } func (p projGEInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -56476,7 +54216,7 @@ func (p projGEInt64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56546,12 +54286,6 @@ type projGEInt64Int64ConstOp struct { } func (p projGEInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -56638,7 +54372,7 @@ func (p projGEInt64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56708,12 +54442,6 @@ type projGEInt64Float64ConstOp struct { } func (p projGEInt64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -56816,7 +54544,7 @@ func (p projGEInt64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56902,12 +54630,6 @@ type projGEInt64DecimalConstOp struct { } func (p projGEInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -56945,9 +54667,9 @@ func (p projGEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -56968,9 +54690,9 @@ func (p projGEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -56984,7 +54706,7 @@ func (p projGEInt64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56995,9 +54717,9 @@ func (p projGEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -57015,9 +54737,9 @@ func (p projGEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -57044,12 +54766,6 @@ type projGEFloat64Int16ConstOp struct { } func (p projGEFloat64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -57152,7 +54868,7 @@ func (p projGEFloat64Int16ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -57238,12 +54954,6 @@ type projGEFloat64Int32ConstOp struct { } func (p projGEFloat64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -57346,7 +55056,7 @@ func (p projGEFloat64Int32ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -57432,12 +55142,6 @@ type projGEFloat64Int64ConstOp struct { } func (p projGEFloat64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -57540,7 +55244,7 @@ func (p projGEFloat64Int64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -57626,12 +55330,6 @@ type projGEFloat64Float64ConstOp struct { } func (p projGEFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -57734,7 +55432,7 @@ func (p projGEFloat64Float64ConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -57820,12 +55518,6 @@ type projGEFloat64DecimalConstOp struct { } func (p projGEFloat64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -57863,11 +55555,11 @@ func (p projGEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -57888,11 +55580,11 @@ func (p projGEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -57906,7 +55598,7 @@ func (p projGEFloat64DecimalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -57917,11 +55609,11 @@ func (p projGEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -57939,11 +55631,11 @@ func (p projGEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } projCol[i] = cmpResult >= 0 @@ -57970,12 +55662,6 @@ type projGETimestampTimestampConstOp struct { } func (p projGETimestampTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -58054,7 +55740,7 @@ func (p projGETimestampTimestampConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -58116,12 +55802,6 @@ type projGEIntervalIntervalConstOp struct { } func (p projGEIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -58186,7 +55866,7 @@ func (p projGEIntervalIntervalConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -58234,12 +55914,6 @@ type projGEJSONJSONConstOp struct { } func (p projGEJSONJSONConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -58315,7 +55989,7 @@ func (p projGEJSONJSONConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -58374,12 +56048,6 @@ type projGEDatumDatumConstOp struct { } func (p projGEDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -58447,7 +56115,7 @@ func (p projGEDatumDatumConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -58514,7 +56182,6 @@ func GetProjectionRConstOperator( allocator: allocator, colIdx: colIdx, outputIdx: outputIdx, - overloadHelper: execgen.OverloadHelper{BinFn: binFn, EvalCtx: evalCtx}, } c := colconv.GetDatumToPhysicalFn(constType)(constArg) leftType, rightType := inputTypes[colIdx], constType @@ -58530,21 +56197,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitandInt16Int16ConstOp{ + op := &projBitandInt16Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projBitandInt16Int32ConstOp{ + op := &projBitandInt16Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projBitandInt16Int64ConstOp{ + op := &projBitandInt16Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } case 32: @@ -58552,21 +56222,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitandInt32Int16ConstOp{ + op := &projBitandInt32Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projBitandInt32Int32ConstOp{ + op := &projBitandInt32Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projBitandInt32Int64ConstOp{ + op := &projBitandInt32Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } case -1: @@ -58575,21 +56248,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitandInt64Int16ConstOp{ + op := &projBitandInt64Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projBitandInt64Int32ConstOp{ + op := &projBitandInt64Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projBitandInt64Int64ConstOp{ + op := &projBitandInt64Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -58602,10 +56278,12 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projBitandDatumDatumConstOp{ + op := &projBitandDatumDatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -58619,21 +56297,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitorInt16Int16ConstOp{ + op := &projBitorInt16Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projBitorInt16Int32ConstOp{ + op := &projBitorInt16Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projBitorInt16Int64ConstOp{ + op := &projBitorInt16Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } case 32: @@ -58641,21 +56322,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitorInt32Int16ConstOp{ + op := &projBitorInt32Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projBitorInt32Int32ConstOp{ + op := &projBitorInt32Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projBitorInt32Int64ConstOp{ + op := &projBitorInt32Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } case -1: @@ -58664,21 +56348,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitorInt64Int16ConstOp{ + op := &projBitorInt64Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projBitorInt64Int32ConstOp{ + op := &projBitorInt64Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projBitorInt64Int64ConstOp{ + op := &projBitorInt64Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -58691,10 +56378,12 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projBitorDatumDatumConstOp{ + op := &projBitorDatumDatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -58708,21 +56397,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitxorInt16Int16ConstOp{ + op := &projBitxorInt16Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projBitxorInt16Int32ConstOp{ + op := &projBitxorInt16Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projBitxorInt16Int64ConstOp{ + op := &projBitxorInt16Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } case 32: @@ -58730,21 +56422,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitxorInt32Int16ConstOp{ + op := &projBitxorInt32Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projBitxorInt32Int32ConstOp{ + op := &projBitxorInt32Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projBitxorInt32Int64ConstOp{ + op := &projBitxorInt32Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } case -1: @@ -58753,21 +56448,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitxorInt64Int16ConstOp{ + op := &projBitxorInt64Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projBitxorInt64Int32ConstOp{ + op := &projBitxorInt64Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projBitxorInt64Int64ConstOp{ + op := &projBitxorInt64Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -58780,10 +56478,12 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projBitxorDatumDatumConstOp{ + op := &projBitxorDatumDatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -58798,30 +56498,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusDecimalInt16ConstOp{ + op := &projPlusDecimalInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projPlusDecimalInt32ConstOp{ + op := &projPlusDecimalInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projPlusDecimalInt64ConstOp{ + op := &projPlusDecimalInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPlusDecimalDecimalConstOp{ + op := &projPlusDecimalDecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -58832,39 +56536,45 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusInt16Int16ConstOp{ + op := &projPlusInt16Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projPlusInt16Int32ConstOp{ + op := &projPlusInt16Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projPlusInt16Int64ConstOp{ + op := &projPlusInt16Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPlusInt16DecimalConstOp{ + op := &projPlusInt16DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projPlusInt16DatumConstOp{ + op := &projPlusInt16DatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } case 32: @@ -58872,39 +56582,45 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusInt32Int16ConstOp{ + op := &projPlusInt32Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projPlusInt32Int32ConstOp{ + op := &projPlusInt32Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projPlusInt32Int64ConstOp{ + op := &projPlusInt32Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPlusInt32DecimalConstOp{ + op := &projPlusInt32DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projPlusInt32DatumConstOp{ + op := &projPlusInt32DatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } case -1: @@ -58913,39 +56629,45 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusInt64Int16ConstOp{ + op := &projPlusInt64Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projPlusInt64Int32ConstOp{ + op := &projPlusInt64Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projPlusInt64Int64ConstOp{ + op := &projPlusInt64Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPlusInt64DecimalConstOp{ + op := &projPlusInt64DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projPlusInt64DatumConstOp{ + op := &projPlusInt64DatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -58958,10 +56680,11 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projPlusFloat64Float64ConstOp{ + op := &projPlusFloat64Float64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -58974,10 +56697,11 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projPlusTimestampIntervalConstOp{ + op := &projPlusTimestampIntervalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } } } @@ -58990,28 +56714,32 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projPlusIntervalTimestampConstOp{ + op := &projPlusIntervalTimestampConstOp{ projConstOpBase: projConstOpBase, constArg: c.(time.Time), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projPlusIntervalIntervalConstOp{ + op := &projPlusIntervalIntervalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projPlusIntervalDatumConstOp{ + op := &projPlusIntervalDatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -59024,29 +56752,37 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projPlusDatumIntervalConstOp{ + op := &projPlusDatumIntervalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusDatumInt16ConstOp{ + op := &projPlusDatumInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case 32: - return &projPlusDatumInt32ConstOp{ + op := &projPlusDatumInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case -1: default: - return &projPlusDatumInt64ConstOp{ + op := &projPlusDatumInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -59061,30 +56797,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusDecimalInt16ConstOp{ + op := &projMinusDecimalInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projMinusDecimalInt32ConstOp{ + op := &projMinusDecimalInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projMinusDecimalInt64ConstOp{ + op := &projMinusDecimalInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMinusDecimalDecimalConstOp{ + op := &projMinusDecimalDecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -59095,39 +56835,45 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusInt16Int16ConstOp{ + op := &projMinusInt16Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projMinusInt16Int32ConstOp{ + op := &projMinusInt16Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projMinusInt16Int64ConstOp{ + op := &projMinusInt16Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMinusInt16DecimalConstOp{ + op := &projMinusInt16DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projMinusInt16DatumConstOp{ + op := &projMinusInt16DatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } case 32: @@ -59135,39 +56881,45 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusInt32Int16ConstOp{ + op := &projMinusInt32Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projMinusInt32Int32ConstOp{ + op := &projMinusInt32Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projMinusInt32Int64ConstOp{ + op := &projMinusInt32Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMinusInt32DecimalConstOp{ + op := &projMinusInt32DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projMinusInt32DatumConstOp{ + op := &projMinusInt32DatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } case -1: @@ -59176,39 +56928,45 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusInt64Int16ConstOp{ + op := &projMinusInt64Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projMinusInt64Int32ConstOp{ + op := &projMinusInt64Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projMinusInt64Int64ConstOp{ + op := &projMinusInt64Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMinusInt64DecimalConstOp{ + op := &projMinusInt64DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projMinusInt64DatumConstOp{ + op := &projMinusInt64DatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -59221,10 +56979,11 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projMinusFloat64Float64ConstOp{ + op := &projMinusFloat64Float64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -59237,19 +56996,21 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projMinusTimestampTimestampConstOp{ + op := &projMinusTimestampTimestampConstOp{ projConstOpBase: projConstOpBase, constArg: c.(time.Time), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMinusTimestampIntervalConstOp{ + op := &projMinusTimestampIntervalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } } } @@ -59262,19 +57023,22 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projMinusIntervalIntervalConstOp{ + op := &projMinusIntervalIntervalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projMinusIntervalDatumConstOp{ + op := &projMinusIntervalDatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -59287,29 +57051,33 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projMinusJSONBytesConstOp{ + op := &projMinusJSONBytesConstOp{ projConstOpBase: projConstOpBase, constArg: c.([]byte), - }, nil + } + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusJSONInt16ConstOp{ + op := &projMinusJSONInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projMinusJSONInt32ConstOp{ + op := &projMinusJSONInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projMinusJSONInt64ConstOp{ + op := &projMinusJSONInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -59322,47 +57090,59 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projMinusDatumDatumConstOp{ + op := &projMinusDatumDatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMinusDatumIntervalConstOp{ + op := &projMinusDatumIntervalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } case types.BytesFamily: switch rightType.Width() { case -1: default: - return &projMinusDatumBytesConstOp{ + op := &projMinusDatumBytesConstOp{ projConstOpBase: projConstOpBase, constArg: c.([]byte), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusDatumInt16ConstOp{ + op := &projMinusDatumInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case 32: - return &projMinusDatumInt32ConstOp{ + op := &projMinusDatumInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case -1: default: - return &projMinusDatumInt64ConstOp{ + op := &projMinusDatumInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -59377,39 +57157,44 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultDecimalInt16ConstOp{ + op := &projMultDecimalInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projMultDecimalInt32ConstOp{ + op := &projMultDecimalInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projMultDecimalInt64ConstOp{ + op := &projMultDecimalInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultDecimalDecimalConstOp{ + op := &projMultDecimalDecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultDecimalIntervalConstOp{ + op := &projMultDecimalIntervalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } } } @@ -59420,39 +57205,44 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultInt16Int16ConstOp{ + op := &projMultInt16Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projMultInt16Int32ConstOp{ + op := &projMultInt16Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projMultInt16Int64ConstOp{ + op := &projMultInt16Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultInt16DecimalConstOp{ + op := &projMultInt16DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultInt16IntervalConstOp{ + op := &projMultInt16IntervalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } } case 32: @@ -59460,39 +57250,44 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultInt32Int16ConstOp{ + op := &projMultInt32Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projMultInt32Int32ConstOp{ + op := &projMultInt32Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projMultInt32Int64ConstOp{ + op := &projMultInt32Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultInt32DecimalConstOp{ + op := &projMultInt32DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultInt32IntervalConstOp{ + op := &projMultInt32IntervalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } } case -1: @@ -59501,39 +57296,44 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultInt64Int16ConstOp{ + op := &projMultInt64Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projMultInt64Int32ConstOp{ + op := &projMultInt64Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projMultInt64Int64ConstOp{ + op := &projMultInt64Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultInt64DecimalConstOp{ + op := &projMultInt64DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultInt64IntervalConstOp{ + op := &projMultInt64IntervalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } } } @@ -59546,19 +57346,21 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projMultFloat64Float64ConstOp{ + op := &projMultFloat64Float64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultFloat64IntervalConstOp{ + op := &projMultFloat64IntervalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(duration.Duration), - }, nil + } + return op, nil } } } @@ -59570,39 +57372,44 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultIntervalInt16ConstOp{ + op := &projMultIntervalInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projMultIntervalInt32ConstOp{ + op := &projMultIntervalInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projMultIntervalInt64ConstOp{ + op := &projMultIntervalInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.FloatFamily: switch rightType.Width() { case -1: default: - return &projMultIntervalFloat64ConstOp{ + op := &projMultIntervalFloat64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultIntervalDecimalConstOp{ + op := &projMultIntervalDecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -59617,30 +57424,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projDivDecimalInt16ConstOp{ + op := &projDivDecimalInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projDivDecimalInt32ConstOp{ + op := &projDivDecimalInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projDivDecimalInt64ConstOp{ + op := &projDivDecimalInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projDivDecimalDecimalConstOp{ + op := &projDivDecimalDecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -59651,30 +57462,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projDivInt16Int16ConstOp{ + op := &projDivInt16Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projDivInt16Int32ConstOp{ + op := &projDivInt16Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projDivInt16Int64ConstOp{ + op := &projDivInt16Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projDivInt16DecimalConstOp{ + op := &projDivInt16DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } case 32: @@ -59682,30 +57497,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projDivInt32Int16ConstOp{ + op := &projDivInt32Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projDivInt32Int32ConstOp{ + op := &projDivInt32Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projDivInt32Int64ConstOp{ + op := &projDivInt32Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projDivInt32DecimalConstOp{ + op := &projDivInt32DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } case -1: @@ -59714,30 +57533,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projDivInt64Int16ConstOp{ + op := &projDivInt64Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projDivInt64Int32ConstOp{ + op := &projDivInt64Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projDivInt64Int64ConstOp{ + op := &projDivInt64Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projDivInt64DecimalConstOp{ + op := &projDivInt64DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -59750,10 +57573,11 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projDivFloat64Float64ConstOp{ + op := &projDivFloat64Float64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -59766,19 +57590,21 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projDivIntervalInt64ConstOp{ + op := &projDivIntervalInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.FloatFamily: switch rightType.Width() { case -1: default: - return &projDivIntervalFloat64ConstOp{ + op := &projDivIntervalFloat64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -59793,30 +57619,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projFloorDivDecimalInt16ConstOp{ + op := &projFloorDivDecimalInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projFloorDivDecimalInt32ConstOp{ + op := &projFloorDivDecimalInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projFloorDivDecimalInt64ConstOp{ + op := &projFloorDivDecimalInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projFloorDivDecimalDecimalConstOp{ + op := &projFloorDivDecimalDecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -59827,30 +57657,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projFloorDivInt16Int16ConstOp{ + op := &projFloorDivInt16Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projFloorDivInt16Int32ConstOp{ + op := &projFloorDivInt16Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projFloorDivInt16Int64ConstOp{ + op := &projFloorDivInt16Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projFloorDivInt16DecimalConstOp{ + op := &projFloorDivInt16DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } case 32: @@ -59858,30 +57692,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projFloorDivInt32Int16ConstOp{ + op := &projFloorDivInt32Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projFloorDivInt32Int32ConstOp{ + op := &projFloorDivInt32Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projFloorDivInt32Int64ConstOp{ + op := &projFloorDivInt32Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projFloorDivInt32DecimalConstOp{ + op := &projFloorDivInt32DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } case -1: @@ -59890,30 +57728,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projFloorDivInt64Int16ConstOp{ + op := &projFloorDivInt64Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projFloorDivInt64Int32ConstOp{ + op := &projFloorDivInt64Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projFloorDivInt64Int64ConstOp{ + op := &projFloorDivInt64Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projFloorDivInt64DecimalConstOp{ + op := &projFloorDivInt64DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -59926,10 +57768,11 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projFloorDivFloat64Float64ConstOp{ + op := &projFloorDivFloat64Float64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -59944,30 +57787,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projModDecimalInt16ConstOp{ + op := &projModDecimalInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projModDecimalInt32ConstOp{ + op := &projModDecimalInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projModDecimalInt64ConstOp{ + op := &projModDecimalInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projModDecimalDecimalConstOp{ + op := &projModDecimalDecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -59978,30 +57825,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projModInt16Int16ConstOp{ + op := &projModInt16Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projModInt16Int32ConstOp{ + op := &projModInt16Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projModInt16Int64ConstOp{ + op := &projModInt16Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projModInt16DecimalConstOp{ + op := &projModInt16DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } case 32: @@ -60009,30 +57860,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projModInt32Int16ConstOp{ + op := &projModInt32Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projModInt32Int32ConstOp{ + op := &projModInt32Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projModInt32Int64ConstOp{ + op := &projModInt32Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projModInt32DecimalConstOp{ + op := &projModInt32DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } case -1: @@ -60041,30 +57896,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projModInt64Int16ConstOp{ + op := &projModInt64Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projModInt64Int32ConstOp{ + op := &projModInt64Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projModInt64Int64ConstOp{ + op := &projModInt64Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projModInt64DecimalConstOp{ + op := &projModInt64DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -60077,10 +57936,11 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projModFloat64Float64ConstOp{ + op := &projModFloat64Float64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -60095,30 +57955,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPowDecimalInt16ConstOp{ + op := &projPowDecimalInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projPowDecimalInt32ConstOp{ + op := &projPowDecimalInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projPowDecimalInt64ConstOp{ + op := &projPowDecimalInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPowDecimalDecimalConstOp{ + op := &projPowDecimalDecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -60129,30 +57993,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPowInt16Int16ConstOp{ + op := &projPowInt16Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projPowInt16Int32ConstOp{ + op := &projPowInt16Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projPowInt16Int64ConstOp{ + op := &projPowInt16Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPowInt16DecimalConstOp{ + op := &projPowInt16DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } case 32: @@ -60160,30 +58028,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPowInt32Int16ConstOp{ + op := &projPowInt32Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projPowInt32Int32ConstOp{ + op := &projPowInt32Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projPowInt32Int64ConstOp{ + op := &projPowInt32Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPowInt32DecimalConstOp{ + op := &projPowInt32DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } case -1: @@ -60192,30 +58064,34 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPowInt64Int16ConstOp{ + op := &projPowInt64Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projPowInt64Int32ConstOp{ + op := &projPowInt64Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projPowInt64Int64ConstOp{ + op := &projPowInt64Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPowInt64DecimalConstOp{ + op := &projPowInt64DecimalConstOp{ projConstOpBase: projConstOpBase, constArg: c.(apd.Decimal), - }, nil + } + return op, nil } } } @@ -60228,10 +58104,11 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projPowFloat64Float64ConstOp{ + op := &projPowFloat64Float64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(float64), - }, nil + } + return op, nil } } } @@ -60247,10 +58124,11 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projConcatBytesBytesConstOp{ + op := &projConcatBytesBytesConstOp{ projConstOpBase: projConstOpBase, constArg: c.([]byte), - }, nil + } + return op, nil } } } @@ -60263,10 +58141,11 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projConcatJSONJSONConstOp{ + op := &projConcatJSONJSONConstOp{ projConstOpBase: projConstOpBase, constArg: c.(json.JSON), - }, nil + } + return op, nil } } } @@ -60279,10 +58158,12 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projConcatDatumDatumConstOp{ + op := &projConcatDatumDatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -60296,21 +58177,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projLShiftInt16Int16ConstOp{ + op := &projLShiftInt16Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projLShiftInt16Int32ConstOp{ + op := &projLShiftInt16Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projLShiftInt16Int64ConstOp{ + op := &projLShiftInt16Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } case 32: @@ -60318,21 +58202,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projLShiftInt32Int16ConstOp{ + op := &projLShiftInt32Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projLShiftInt32Int32ConstOp{ + op := &projLShiftInt32Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projLShiftInt32Int64ConstOp{ + op := &projLShiftInt32Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } case -1: @@ -60341,21 +58228,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projLShiftInt64Int16ConstOp{ + op := &projLShiftInt64Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projLShiftInt64Int32ConstOp{ + op := &projLShiftInt64Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projLShiftInt64Int64ConstOp{ + op := &projLShiftInt64Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -60367,21 +58257,27 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projLShiftDatumInt16ConstOp{ + op := &projLShiftDatumInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case 32: - return &projLShiftDatumInt32ConstOp{ + op := &projLShiftDatumInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case -1: default: - return &projLShiftDatumInt64ConstOp{ + op := &projLShiftDatumInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -60395,21 +58291,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projRShiftInt16Int16ConstOp{ + op := &projRShiftInt16Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projRShiftInt16Int32ConstOp{ + op := &projRShiftInt16Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projRShiftInt16Int64ConstOp{ + op := &projRShiftInt16Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } case 32: @@ -60417,21 +58316,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projRShiftInt32Int16ConstOp{ + op := &projRShiftInt32Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projRShiftInt32Int32ConstOp{ + op := &projRShiftInt32Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projRShiftInt32Int64ConstOp{ + op := &projRShiftInt32Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } case -1: @@ -60440,21 +58342,24 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projRShiftInt64Int16ConstOp{ + op := &projRShiftInt64Int16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projRShiftInt64Int32ConstOp{ + op := &projRShiftInt64Int32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projRShiftInt64Int64ConstOp{ + op := &projRShiftInt64Int64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -60466,21 +58371,27 @@ func GetProjectionRConstOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projRShiftDatumInt16ConstOp{ + op := &projRShiftDatumInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case 32: - return &projRShiftDatumInt32ConstOp{ + op := &projRShiftDatumInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case -1: default: - return &projRShiftDatumInt64ConstOp{ + op := &projRShiftDatumInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -60496,29 +58407,33 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projJSONFetchValJSONBytesConstOp{ + op := &projJSONFetchValJSONBytesConstOp{ projConstOpBase: projConstOpBase, constArg: c.([]byte), - }, nil + } + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projJSONFetchValJSONInt16ConstOp{ + op := &projJSONFetchValJSONInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projJSONFetchValJSONInt32ConstOp{ + op := &projJSONFetchValJSONInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projJSONFetchValJSONInt64ConstOp{ + op := &projJSONFetchValJSONInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -60534,29 +58449,33 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projJSONFetchTextJSONBytesConstOp{ + op := &projJSONFetchTextJSONBytesConstOp{ projConstOpBase: projConstOpBase, constArg: c.([]byte), - }, nil + } + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projJSONFetchTextJSONInt16ConstOp{ + op := &projJSONFetchTextJSONInt16ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int16), - }, nil + } + return op, nil case 32: - return &projJSONFetchTextJSONInt32ConstOp{ + op := &projJSONFetchTextJSONInt32ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int32), - }, nil + } + return op, nil case -1: default: - return &projJSONFetchTextJSONInt64ConstOp{ + op := &projJSONFetchTextJSONInt64ConstOp{ projConstOpBase: projConstOpBase, constArg: c.(int64), - }, nil + } + return op, nil } } } @@ -60572,10 +58491,11 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projJSONFetchValPathJSONDatumConstOp{ + op := &projJSONFetchValPathJSONDatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + return op, nil } } } @@ -60591,10 +58511,11 @@ func GetProjectionRConstOperator( switch rightType.Width() { case -1: default: - return &projJSONFetchTextPathJSONDatumConstOp{ + op := &projJSONFetchTextPathJSONDatumConstOp{ projConstOpBase: projConstOpBase, constArg: constArg, - }, nil + } + return op, nil } } } diff --git a/pkg/sql/colexec/colexecproj/proj_like_ops.eg.go b/pkg/sql/colexec/colexecproj/proj_like_ops.eg.go index 9ce89444fa74..e0ae8b248a42 100644 --- a/pkg/sql/colexec/colexecproj/proj_like_ops.eg.go +++ b/pkg/sql/colexec/colexecproj/proj_like_ops.eg.go @@ -23,12 +23,6 @@ type projPrefixBytesBytesConstOp struct { } func (p projPrefixBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -80,7 +74,7 @@ func (p projPrefixBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -115,12 +109,6 @@ type projSuffixBytesBytesConstOp struct { } func (p projSuffixBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -172,7 +160,7 @@ func (p projSuffixBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -207,12 +195,6 @@ type projContainsBytesBytesConstOp struct { } func (p projContainsBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -264,7 +246,7 @@ func (p projContainsBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -299,12 +281,6 @@ type projRegexpBytesBytesConstOp struct { } func (p projRegexpBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -356,7 +332,7 @@ func (p projRegexpBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -391,12 +367,6 @@ type projNotPrefixBytesBytesConstOp struct { } func (p projNotPrefixBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -448,7 +418,7 @@ func (p projNotPrefixBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -483,12 +453,6 @@ type projNotSuffixBytesBytesConstOp struct { } func (p projNotSuffixBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -540,7 +504,7 @@ func (p projNotSuffixBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -575,12 +539,6 @@ type projNotContainsBytesBytesConstOp struct { } func (p projNotContainsBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -632,7 +590,7 @@ func (p projNotContainsBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -667,12 +625,6 @@ type projNotRegexpBytesBytesConstOp struct { } func (p projNotRegexpBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -724,7 +676,7 @@ func (p projNotRegexpBytesBytesConstOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(colNulls)) + projVec.SetNulls(_outNulls.Or(*colNulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] diff --git a/pkg/sql/colexec/colexecproj/proj_non_const_ops.eg.go b/pkg/sql/colexec/colexecproj/proj_non_const_ops.eg.go index 6c82d25b1143..467d6f360bcc 100644 --- a/pkg/sql/colexec/colexecproj/proj_non_const_ops.eg.go +++ b/pkg/sql/colexec/colexecproj/proj_non_const_ops.eg.go @@ -15,6 +15,7 @@ import ( "time" "unsafe" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" @@ -40,6 +41,7 @@ var ( _ = coldataext.CompareDatum _ sqltelemetry.EnumTelemetryType _ telemetry.Counter + _ apd.Context ) // projConstOpBase contains all of the fields for projections with a constant, @@ -49,20 +51,18 @@ var ( // around the problem we specify it here. type projConstOpBase struct { colexecop.OneInputHelper - allocator *colmem.Allocator - colIdx int - outputIdx int - overloadHelper execgen.OverloadHelper + allocator *colmem.Allocator + colIdx int + outputIdx int } // projOpBase contains all of the fields for non-constant projections. type projOpBase struct { colexecop.OneInputHelper - allocator *colmem.Allocator - col1Idx int - col2Idx int - outputIdx int - overloadHelper execgen.OverloadHelper + allocator *colmem.Allocator + col1Idx int + col2Idx int + outputIdx int } type projBitandInt16Int16Op struct { @@ -70,12 +70,6 @@ type projBitandInt16Int16Op struct { } func (p projBitandInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -137,7 +131,7 @@ func (p projBitandInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -180,12 +174,6 @@ type projBitandInt16Int32Op struct { } func (p projBitandInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -247,7 +235,7 @@ func (p projBitandInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -290,12 +278,6 @@ type projBitandInt16Int64Op struct { } func (p projBitandInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -357,7 +339,7 @@ func (p projBitandInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -400,12 +382,6 @@ type projBitandInt32Int16Op struct { } func (p projBitandInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -467,7 +443,7 @@ func (p projBitandInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -510,12 +486,6 @@ type projBitandInt32Int32Op struct { } func (p projBitandInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -577,7 +547,7 @@ func (p projBitandInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -620,12 +590,6 @@ type projBitandInt32Int64Op struct { } func (p projBitandInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -687,7 +651,7 @@ func (p projBitandInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -730,12 +694,6 @@ type projBitandInt64Int16Op struct { } func (p projBitandInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -797,7 +755,7 @@ func (p projBitandInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -840,12 +798,6 @@ type projBitandInt64Int32Op struct { } func (p projBitandInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -907,7 +859,7 @@ func (p projBitandInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -950,12 +902,6 @@ type projBitandInt64Int64Op struct { } func (p projBitandInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1017,7 +963,7 @@ func (p projBitandInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1057,15 +1003,14 @@ func (p projBitandInt64Int64Op) Next() coldata.Batch { type projBitandDatumDatumOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projBitandDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1139,7 +1084,7 @@ func (p projBitandDatumDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1194,12 +1139,6 @@ type projBitorInt16Int16Op struct { } func (p projBitorInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1261,7 +1200,7 @@ func (p projBitorInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1304,12 +1243,6 @@ type projBitorInt16Int32Op struct { } func (p projBitorInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1371,7 +1304,7 @@ func (p projBitorInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1414,12 +1347,6 @@ type projBitorInt16Int64Op struct { } func (p projBitorInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1481,7 +1408,7 @@ func (p projBitorInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1524,12 +1451,6 @@ type projBitorInt32Int16Op struct { } func (p projBitorInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1591,7 +1512,7 @@ func (p projBitorInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1634,12 +1555,6 @@ type projBitorInt32Int32Op struct { } func (p projBitorInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1701,7 +1616,7 @@ func (p projBitorInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1744,12 +1659,6 @@ type projBitorInt32Int64Op struct { } func (p projBitorInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1811,7 +1720,7 @@ func (p projBitorInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1854,12 +1763,6 @@ type projBitorInt64Int16Op struct { } func (p projBitorInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -1921,7 +1824,7 @@ func (p projBitorInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -1964,12 +1867,6 @@ type projBitorInt64Int32Op struct { } func (p projBitorInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2031,7 +1928,7 @@ func (p projBitorInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2074,12 +1971,6 @@ type projBitorInt64Int64Op struct { } func (p projBitorInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2141,7 +2032,7 @@ func (p projBitorInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2181,15 +2072,14 @@ func (p projBitorInt64Int64Op) Next() coldata.Batch { type projBitorDatumDatumOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projBitorDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2263,7 +2153,7 @@ func (p projBitorDatumDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2318,12 +2208,6 @@ type projBitxorInt16Int16Op struct { } func (p projBitxorInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2385,7 +2269,7 @@ func (p projBitxorInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2428,12 +2312,6 @@ type projBitxorInt16Int32Op struct { } func (p projBitxorInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2495,7 +2373,7 @@ func (p projBitxorInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2538,12 +2416,6 @@ type projBitxorInt16Int64Op struct { } func (p projBitxorInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2605,7 +2477,7 @@ func (p projBitxorInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2648,12 +2520,6 @@ type projBitxorInt32Int16Op struct { } func (p projBitxorInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2715,7 +2581,7 @@ func (p projBitxorInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2758,12 +2624,6 @@ type projBitxorInt32Int32Op struct { } func (p projBitxorInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2825,7 +2685,7 @@ func (p projBitxorInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2868,12 +2728,6 @@ type projBitxorInt32Int64Op struct { } func (p projBitxorInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -2935,7 +2789,7 @@ func (p projBitxorInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -2978,12 +2832,6 @@ type projBitxorInt64Int16Op struct { } func (p projBitxorInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3045,7 +2893,7 @@ func (p projBitxorInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3088,12 +2936,6 @@ type projBitxorInt64Int32Op struct { } func (p projBitxorInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3155,7 +2997,7 @@ func (p projBitxorInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3198,12 +3040,6 @@ type projBitxorInt64Int64Op struct { } func (p projBitxorInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3265,7 +3101,7 @@ func (p projBitxorInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3305,15 +3141,14 @@ func (p projBitxorInt64Int64Op) Next() coldata.Batch { type projBitxorDatumDatumOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projBitxorDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3387,7 +3222,7 @@ func (p projBitxorDatumDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3442,12 +3277,6 @@ type projPlusDecimalInt16Op struct { } func (p projPlusDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3484,9 +3313,9 @@ func (p projPlusDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3508,9 +3337,9 @@ func (p projPlusDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3523,7 +3352,7 @@ func (p projPlusDecimalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3533,9 +3362,9 @@ func (p projPlusDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3553,9 +3382,9 @@ func (p projPlusDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3580,12 +3409,6 @@ type projPlusDecimalInt32Op struct { } func (p projPlusDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3622,9 +3445,9 @@ func (p projPlusDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3646,9 +3469,9 @@ func (p projPlusDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3661,7 +3484,7 @@ func (p projPlusDecimalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3671,9 +3494,9 @@ func (p projPlusDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3691,9 +3514,9 @@ func (p projPlusDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3718,12 +3541,6 @@ type projPlusDecimalInt64Op struct { } func (p projPlusDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3760,9 +3577,9 @@ func (p projPlusDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3784,9 +3601,9 @@ func (p projPlusDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3799,7 +3616,7 @@ func (p projPlusDecimalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3809,9 +3626,9 @@ func (p projPlusDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3829,9 +3646,9 @@ func (p projPlusDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Add(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -3856,12 +3673,6 @@ type projPlusDecimalDecimalOp struct { } func (p projPlusDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -3935,7 +3746,7 @@ func (p projPlusDecimalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -3990,12 +3801,6 @@ type projPlusInt16Int16Op struct { } func (p projPlusInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4069,7 +3874,7 @@ func (p projPlusInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4124,12 +3929,6 @@ type projPlusInt16Int32Op struct { } func (p projPlusInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4203,7 +4002,7 @@ func (p projPlusInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4258,12 +4057,6 @@ type projPlusInt16Int64Op struct { } func (p projPlusInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4337,7 +4130,7 @@ func (p projPlusInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4392,12 +4185,6 @@ type projPlusInt16DecimalOp struct { } func (p projPlusInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4434,9 +4221,9 @@ func (p projPlusInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -4459,9 +4246,9 @@ func (p projPlusInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -4475,7 +4262,7 @@ func (p projPlusInt16DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4485,9 +4272,9 @@ func (p projPlusInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -4506,9 +4293,9 @@ func (p projPlusInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -4531,15 +4318,14 @@ func (p projPlusInt16DecimalOp) Next() coldata.Batch { type projPlusInt16DatumOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projPlusInt16DatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4622,7 +4408,7 @@ func (p projPlusInt16DatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4686,12 +4472,6 @@ type projPlusInt32Int16Op struct { } func (p projPlusInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4765,7 +4545,7 @@ func (p projPlusInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4820,12 +4600,6 @@ type projPlusInt32Int32Op struct { } func (p projPlusInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -4899,7 +4673,7 @@ func (p projPlusInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -4954,12 +4728,6 @@ type projPlusInt32Int64Op struct { } func (p projPlusInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5033,7 +4801,7 @@ func (p projPlusInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5088,12 +4856,6 @@ type projPlusInt32DecimalOp struct { } func (p projPlusInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5130,9 +4892,9 @@ func (p projPlusInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -5155,9 +4917,9 @@ func (p projPlusInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -5171,7 +4933,7 @@ func (p projPlusInt32DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5181,9 +4943,9 @@ func (p projPlusInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -5202,9 +4964,9 @@ func (p projPlusInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -5227,15 +4989,14 @@ func (p projPlusInt32DecimalOp) Next() coldata.Batch { type projPlusInt32DatumOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projPlusInt32DatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5318,7 +5079,7 @@ func (p projPlusInt32DatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5382,12 +5143,6 @@ type projPlusInt64Int16Op struct { } func (p projPlusInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5461,7 +5216,7 @@ func (p projPlusInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5516,12 +5271,6 @@ type projPlusInt64Int32Op struct { } func (p projPlusInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5595,7 +5344,7 @@ func (p projPlusInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5650,12 +5399,6 @@ type projPlusInt64Int64Op struct { } func (p projPlusInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5729,7 +5472,7 @@ func (p projPlusInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5784,12 +5527,6 @@ type projPlusInt64DecimalOp struct { } func (p projPlusInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -5826,9 +5563,9 @@ func (p projPlusInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -5851,9 +5588,9 @@ func (p projPlusInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -5867,7 +5604,7 @@ func (p projPlusInt64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -5877,9 +5614,9 @@ func (p projPlusInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -5898,9 +5635,9 @@ func (p projPlusInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Add(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Add(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -5923,15 +5660,14 @@ func (p projPlusInt64DecimalOp) Next() coldata.Batch { type projPlusInt64DatumOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projPlusInt64DatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6014,7 +5750,7 @@ func (p projPlusInt64DatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6078,12 +5814,6 @@ type projPlusFloat64Float64Op struct { } func (p projPlusFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6151,7 +5881,7 @@ func (p projPlusFloat64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6200,12 +5930,6 @@ type projPlusTimestampIntervalOp struct { } func (p projPlusTimestampIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6273,7 +5997,7 @@ func (p projPlusTimestampIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6322,12 +6046,6 @@ type projPlusIntervalTimestampOp struct { } func (p projPlusIntervalTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6395,7 +6113,7 @@ func (p projPlusIntervalTimestampOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6444,12 +6162,6 @@ type projPlusIntervalIntervalOp struct { } func (p projPlusIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6507,7 +6219,7 @@ func (p projPlusIntervalIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6543,15 +6255,14 @@ func (p projPlusIntervalIntervalOp) Next() coldata.Batch { type projPlusIntervalDatumOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projPlusIntervalDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6634,7 +6345,7 @@ func (p projPlusIntervalDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6695,15 +6406,14 @@ func (p projPlusIntervalDatumOp) Next() coldata.Batch { type projPlusDatumIntervalOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projPlusDatumIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6786,7 +6496,7 @@ func (p projPlusDatumIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6847,15 +6557,14 @@ func (p projPlusDatumIntervalOp) Next() coldata.Batch { type projPlusDatumInt16Op struct { projOpBase + execgen.BinaryOverloadHelper } func (p projPlusDatumInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -6938,7 +6647,7 @@ func (p projPlusDatumInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -6999,15 +6708,14 @@ func (p projPlusDatumInt16Op) Next() coldata.Batch { type projPlusDatumInt32Op struct { projOpBase + execgen.BinaryOverloadHelper } func (p projPlusDatumInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7090,7 +6798,7 @@ func (p projPlusDatumInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7151,15 +6859,14 @@ func (p projPlusDatumInt32Op) Next() coldata.Batch { type projPlusDatumInt64Op struct { projOpBase + execgen.BinaryOverloadHelper } func (p projPlusDatumInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7242,7 +6949,7 @@ func (p projPlusDatumInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7306,12 +7013,6 @@ type projMinusDecimalInt16Op struct { } func (p projMinusDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7348,9 +7049,9 @@ func (p projMinusDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7372,9 +7073,9 @@ func (p projMinusDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7387,7 +7088,7 @@ func (p projMinusDecimalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7397,9 +7098,9 @@ func (p projMinusDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7417,9 +7118,9 @@ func (p projMinusDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7444,12 +7145,6 @@ type projMinusDecimalInt32Op struct { } func (p projMinusDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7486,9 +7181,9 @@ func (p projMinusDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7510,9 +7205,9 @@ func (p projMinusDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7525,7 +7220,7 @@ func (p projMinusDecimalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7535,9 +7230,9 @@ func (p projMinusDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7555,9 +7250,9 @@ func (p projMinusDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7582,12 +7277,6 @@ type projMinusDecimalInt64Op struct { } func (p projMinusDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7624,9 +7313,9 @@ func (p projMinusDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7648,9 +7337,9 @@ func (p projMinusDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7663,7 +7352,7 @@ func (p projMinusDecimalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7673,9 +7362,9 @@ func (p projMinusDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7693,9 +7382,9 @@ func (p projMinusDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Sub(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -7720,12 +7409,6 @@ type projMinusDecimalDecimalOp struct { } func (p projMinusDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7799,7 +7482,7 @@ func (p projMinusDecimalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7854,12 +7537,6 @@ type projMinusInt16Int16Op struct { } func (p projMinusInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -7933,7 +7610,7 @@ func (p projMinusInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -7988,12 +7665,6 @@ type projMinusInt16Int32Op struct { } func (p projMinusInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8067,7 +7738,7 @@ func (p projMinusInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8122,12 +7793,6 @@ type projMinusInt16Int64Op struct { } func (p projMinusInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8201,7 +7866,7 @@ func (p projMinusInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8256,12 +7921,6 @@ type projMinusInt16DecimalOp struct { } func (p projMinusInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8298,9 +7957,9 @@ func (p projMinusInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -8323,9 +7982,9 @@ func (p projMinusInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -8339,7 +7998,7 @@ func (p projMinusInt16DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8349,9 +8008,9 @@ func (p projMinusInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -8370,9 +8029,9 @@ func (p projMinusInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -8395,15 +8054,14 @@ func (p projMinusInt16DecimalOp) Next() coldata.Batch { type projMinusInt16DatumOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projMinusInt16DatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8486,7 +8144,7 @@ func (p projMinusInt16DatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8550,12 +8208,6 @@ type projMinusInt32Int16Op struct { } func (p projMinusInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8629,7 +8281,7 @@ func (p projMinusInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8684,12 +8336,6 @@ type projMinusInt32Int32Op struct { } func (p projMinusInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8763,7 +8409,7 @@ func (p projMinusInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8818,12 +8464,6 @@ type projMinusInt32Int64Op struct { } func (p projMinusInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8897,7 +8537,7 @@ func (p projMinusInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -8952,12 +8592,6 @@ type projMinusInt32DecimalOp struct { } func (p projMinusInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -8994,9 +8628,9 @@ func (p projMinusInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -9019,9 +8653,9 @@ func (p projMinusInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -9035,7 +8669,7 @@ func (p projMinusInt32DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9045,9 +8679,9 @@ func (p projMinusInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -9066,9 +8700,9 @@ func (p projMinusInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -9091,15 +8725,14 @@ func (p projMinusInt32DecimalOp) Next() coldata.Batch { type projMinusInt32DatumOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projMinusInt32DatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9182,7 +8815,7 @@ func (p projMinusInt32DatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9246,12 +8879,6 @@ type projMinusInt64Int16Op struct { } func (p projMinusInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9325,7 +8952,7 @@ func (p projMinusInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9380,12 +9007,6 @@ type projMinusInt64Int32Op struct { } func (p projMinusInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9459,7 +9080,7 @@ func (p projMinusInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9514,12 +9135,6 @@ type projMinusInt64Int64Op struct { } func (p projMinusInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9593,7 +9208,7 @@ func (p projMinusInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9648,12 +9263,6 @@ type projMinusInt64DecimalOp struct { } func (p projMinusInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9690,9 +9299,9 @@ func (p projMinusInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -9715,9 +9324,9 @@ func (p projMinusInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -9731,7 +9340,7 @@ func (p projMinusInt64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9741,9 +9350,9 @@ func (p projMinusInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -9762,9 +9371,9 @@ func (p projMinusInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Sub(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Sub(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -9787,15 +9396,14 @@ func (p projMinusInt64DecimalOp) Next() coldata.Batch { type projMinusInt64DatumOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projMinusInt64DatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -9878,7 +9486,7 @@ func (p projMinusInt64DatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -9942,12 +9550,6 @@ type projMinusFloat64Float64Op struct { } func (p projMinusFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10015,7 +9617,7 @@ func (p projMinusFloat64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10064,12 +9666,6 @@ type projMinusTimestampTimestampOp struct { } func (p projMinusTimestampTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10133,7 +9729,7 @@ func (p projMinusTimestampTimestampOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10178,12 +9774,6 @@ type projMinusTimestampIntervalOp struct { } func (p projMinusTimestampIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10251,7 +9841,7 @@ func (p projMinusTimestampIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10300,12 +9890,6 @@ type projMinusIntervalIntervalOp struct { } func (p projMinusIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10363,7 +9947,7 @@ func (p projMinusIntervalIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10399,15 +9983,14 @@ func (p projMinusIntervalIntervalOp) Next() coldata.Batch { type projMinusIntervalDatumOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projMinusIntervalDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10490,7 +10073,7 @@ func (p projMinusIntervalDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10554,12 +10137,6 @@ type projMinusJSONBytesOp struct { } func (p projMinusJSONBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10633,7 +10210,7 @@ func (p projMinusJSONBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10688,12 +10265,6 @@ type projMinusJSONInt16Op struct { } func (p projMinusJSONInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10760,7 +10331,7 @@ func (p projMinusJSONInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10808,12 +10379,6 @@ type projMinusJSONInt32Op struct { } func (p projMinusJSONInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -10880,7 +10445,7 @@ func (p projMinusJSONInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -10928,12 +10493,6 @@ type projMinusJSONInt64Op struct { } func (p projMinusJSONInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11000,7 +10559,7 @@ func (p projMinusJSONInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11045,15 +10604,14 @@ func (p projMinusJSONInt64Op) Next() coldata.Batch { type projMinusDatumDatumOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projMinusDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11127,7 +10685,7 @@ func (p projMinusDatumDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11179,15 +10737,14 @@ func (p projMinusDatumDatumOp) Next() coldata.Batch { type projMinusDatumIntervalOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projMinusDatumIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11270,7 +10827,7 @@ func (p projMinusDatumIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11331,15 +10888,14 @@ func (p projMinusDatumIntervalOp) Next() coldata.Batch { type projMinusDatumBytesOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projMinusDatumBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11421,7 +10977,7 @@ func (p projMinusDatumBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11481,15 +11037,14 @@ func (p projMinusDatumBytesOp) Next() coldata.Batch { type projMinusDatumInt16Op struct { projOpBase + execgen.BinaryOverloadHelper } func (p projMinusDatumInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11572,7 +11127,7 @@ func (p projMinusDatumInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11633,15 +11188,14 @@ func (p projMinusDatumInt16Op) Next() coldata.Batch { type projMinusDatumInt32Op struct { projOpBase + execgen.BinaryOverloadHelper } func (p projMinusDatumInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11724,7 +11278,7 @@ func (p projMinusDatumInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11785,15 +11339,14 @@ func (p projMinusDatumInt32Op) Next() coldata.Batch { type projMinusDatumInt64Op struct { projOpBase + execgen.BinaryOverloadHelper } func (p projMinusDatumInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11876,7 +11429,7 @@ func (p projMinusDatumInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -11940,12 +11493,6 @@ type projMultDecimalInt16Op struct { } func (p projMultDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -11982,9 +11529,9 @@ func (p projMultDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -12006,9 +11553,9 @@ func (p projMultDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -12021,7 +11568,7 @@ func (p projMultDecimalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12031,9 +11578,9 @@ func (p projMultDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -12051,9 +11598,9 @@ func (p projMultDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -12078,12 +11625,6 @@ type projMultDecimalInt32Op struct { } func (p projMultDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12120,9 +11661,9 @@ func (p projMultDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -12144,9 +11685,9 @@ func (p projMultDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -12159,7 +11700,7 @@ func (p projMultDecimalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12169,9 +11710,9 @@ func (p projMultDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -12189,9 +11730,9 @@ func (p projMultDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -12216,12 +11757,6 @@ type projMultDecimalInt64Op struct { } func (p projMultDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12258,9 +11793,9 @@ func (p projMultDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -12282,9 +11817,9 @@ func (p projMultDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -12297,7 +11832,7 @@ func (p projMultDecimalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12307,9 +11842,9 @@ func (p projMultDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -12327,9 +11862,9 @@ func (p projMultDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.ExactCtx.Mul(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -12354,12 +11889,6 @@ type projMultDecimalDecimalOp struct { } func (p projMultDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12433,7 +11962,7 @@ func (p projMultDecimalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12488,12 +12017,6 @@ type projMultDecimalIntervalOp struct { } func (p projMultDecimalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12561,7 +12084,7 @@ func (p projMultDecimalIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12610,12 +12133,6 @@ type projMultInt16Int16Op struct { } func (p projMultInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12705,7 +12222,7 @@ func (p projMultInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12776,12 +12293,6 @@ type projMultInt16Int32Op struct { } func (p projMultInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -12871,7 +12382,7 @@ func (p projMultInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -12942,12 +12453,6 @@ type projMultInt16Int64Op struct { } func (p projMultInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13037,7 +12542,7 @@ func (p projMultInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13108,12 +12613,6 @@ type projMultInt16DecimalOp struct { } func (p projMultInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13150,9 +12649,9 @@ func (p projMultInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -13175,9 +12674,9 @@ func (p projMultInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -13191,7 +12690,7 @@ func (p projMultInt16DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13201,9 +12700,9 @@ func (p projMultInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -13222,9 +12721,9 @@ func (p projMultInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -13250,12 +12749,6 @@ type projMultInt16IntervalOp struct { } func (p projMultInt16IntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13313,7 +12806,7 @@ func (p projMultInt16IntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13352,12 +12845,6 @@ type projMultInt32Int16Op struct { } func (p projMultInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13447,7 +12934,7 @@ func (p projMultInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13518,12 +13005,6 @@ type projMultInt32Int32Op struct { } func (p projMultInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13613,7 +13094,7 @@ func (p projMultInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13684,12 +13165,6 @@ type projMultInt32Int64Op struct { } func (p projMultInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13779,7 +13254,7 @@ func (p projMultInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13850,12 +13325,6 @@ type projMultInt32DecimalOp struct { } func (p projMultInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -13892,9 +13361,9 @@ func (p projMultInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -13917,9 +13386,9 @@ func (p projMultInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -13933,7 +13402,7 @@ func (p projMultInt32DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -13943,9 +13412,9 @@ func (p projMultInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -13964,9 +13433,9 @@ func (p projMultInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -13992,12 +13461,6 @@ type projMultInt32IntervalOp struct { } func (p projMultInt32IntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14055,7 +13518,7 @@ func (p projMultInt32IntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14094,12 +13557,6 @@ type projMultInt64Int16Op struct { } func (p projMultInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14189,7 +13646,7 @@ func (p projMultInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14260,12 +13717,6 @@ type projMultInt64Int32Op struct { } func (p projMultInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14355,7 +13806,7 @@ func (p projMultInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14426,12 +13877,6 @@ type projMultInt64Int64Op struct { } func (p projMultInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14521,7 +13966,7 @@ func (p projMultInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14592,12 +14037,6 @@ type projMultInt64DecimalOp struct { } func (p projMultInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14634,9 +14073,9 @@ func (p projMultInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -14659,9 +14098,9 @@ func (p projMultInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -14675,7 +14114,7 @@ func (p projMultInt64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14685,9 +14124,9 @@ func (p projMultInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -14706,9 +14145,9 @@ func (p projMultInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.ExactCtx.Mul(&projCol[i], tmpDec, &arg2) + _, err := tree.ExactCtx.Mul(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -14734,12 +14173,6 @@ type projMultInt64IntervalOp struct { } func (p projMultInt64IntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14797,7 +14230,7 @@ func (p projMultInt64IntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14836,12 +14269,6 @@ type projMultFloat64Float64Op struct { } func (p projMultFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -14909,7 +14336,7 @@ func (p projMultFloat64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -14958,12 +14385,6 @@ type projMultFloat64IntervalOp struct { } func (p projMultFloat64IntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15021,7 +14442,7 @@ func (p projMultFloat64IntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15060,12 +14481,6 @@ type projMultIntervalInt16Op struct { } func (p projMultIntervalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15123,7 +14538,7 @@ func (p projMultIntervalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15162,12 +14577,6 @@ type projMultIntervalInt32Op struct { } func (p projMultIntervalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15225,7 +14634,7 @@ func (p projMultIntervalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15264,12 +14673,6 @@ type projMultIntervalInt64Op struct { } func (p projMultIntervalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15327,7 +14730,7 @@ func (p projMultIntervalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15366,12 +14769,6 @@ type projMultIntervalFloat64Op struct { } func (p projMultIntervalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15429,7 +14826,7 @@ func (p projMultIntervalFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15468,12 +14865,6 @@ type projMultIntervalDecimalOp struct { } func (p projMultIntervalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15541,7 +14932,7 @@ func (p projMultIntervalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15590,12 +14981,6 @@ type projDivDecimalInt16Op struct { } func (p projDivDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15636,9 +15021,9 @@ func (p projDivDecimalInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15664,9 +15049,9 @@ func (p projDivDecimalInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15679,7 +15064,7 @@ func (p projDivDecimalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15693,9 +15078,9 @@ func (p projDivDecimalInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15717,9 +15102,9 @@ func (p projDivDecimalInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15744,12 +15129,6 @@ type projDivDecimalInt32Op struct { } func (p projDivDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15790,9 +15169,9 @@ func (p projDivDecimalInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15818,9 +15197,9 @@ func (p projDivDecimalInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15833,7 +15212,7 @@ func (p projDivDecimalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -15847,9 +15226,9 @@ func (p projDivDecimalInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15871,9 +15250,9 @@ func (p projDivDecimalInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15898,12 +15277,6 @@ type projDivDecimalInt64Op struct { } func (p projDivDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -15944,9 +15317,9 @@ func (p projDivDecimalInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15972,9 +15345,9 @@ func (p projDivDecimalInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -15987,7 +15360,7 @@ func (p projDivDecimalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16001,9 +15374,9 @@ func (p projDivDecimalInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16025,9 +15398,9 @@ func (p projDivDecimalInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16052,12 +15425,6 @@ type projDivDecimalDecimalOp struct { } func (p projDivDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16139,7 +15506,7 @@ func (p projDivDecimalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16202,12 +15569,6 @@ type projDivInt16Int16Op struct { } func (p projDivInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16246,10 +15607,10 @@ func (p projDivInt16Int16Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16273,10 +15634,10 @@ func (p projDivInt16Int16Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16289,7 +15650,7 @@ func (p projDivInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16301,10 +15662,10 @@ func (p projDivInt16Int16Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16324,10 +15685,10 @@ func (p projDivInt16Int16Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16352,12 +15713,6 @@ type projDivInt16Int32Op struct { } func (p projDivInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16396,10 +15751,10 @@ func (p projDivInt16Int32Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16423,10 +15778,10 @@ func (p projDivInt16Int32Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16439,7 +15794,7 @@ func (p projDivInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16451,10 +15806,10 @@ func (p projDivInt16Int32Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16474,10 +15829,10 @@ func (p projDivInt16Int32Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16502,12 +15857,6 @@ type projDivInt16Int64Op struct { } func (p projDivInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16546,10 +15895,10 @@ func (p projDivInt16Int64Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16573,10 +15922,10 @@ func (p projDivInt16Int64Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16589,7 +15938,7 @@ func (p projDivInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16601,10 +15950,10 @@ func (p projDivInt16Int64Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16624,10 +15973,10 @@ func (p projDivInt16Int64Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16652,12 +16001,6 @@ type projDivInt16DecimalOp struct { } func (p projDivInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16698,9 +16041,9 @@ func (p projDivInt16DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -16727,9 +16070,9 @@ func (p projDivInt16DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -16743,7 +16086,7 @@ func (p projDivInt16DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16757,9 +16100,9 @@ func (p projDivInt16DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -16782,9 +16125,9 @@ func (p projDivInt16DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -16810,12 +16153,6 @@ type projDivInt32Int16Op struct { } func (p projDivInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -16854,10 +16191,10 @@ func (p projDivInt32Int16Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16881,10 +16218,10 @@ func (p projDivInt32Int16Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16897,7 +16234,7 @@ func (p projDivInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -16909,10 +16246,10 @@ func (p projDivInt32Int16Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16932,10 +16269,10 @@ func (p projDivInt32Int16Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -16960,12 +16297,6 @@ type projDivInt32Int32Op struct { } func (p projDivInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17004,10 +16335,10 @@ func (p projDivInt32Int32Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17031,10 +16362,10 @@ func (p projDivInt32Int32Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17047,7 +16378,7 @@ func (p projDivInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17059,10 +16390,10 @@ func (p projDivInt32Int32Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17082,10 +16413,10 @@ func (p projDivInt32Int32Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17110,12 +16441,6 @@ type projDivInt32Int64Op struct { } func (p projDivInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17154,10 +16479,10 @@ func (p projDivInt32Int64Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17181,10 +16506,10 @@ func (p projDivInt32Int64Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17197,7 +16522,7 @@ func (p projDivInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17209,10 +16534,10 @@ func (p projDivInt32Int64Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17232,10 +16557,10 @@ func (p projDivInt32Int64Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17260,12 +16585,6 @@ type projDivInt32DecimalOp struct { } func (p projDivInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17306,9 +16625,9 @@ func (p projDivInt32DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -17335,9 +16654,9 @@ func (p projDivInt32DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -17351,7 +16670,7 @@ func (p projDivInt32DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17365,9 +16684,9 @@ func (p projDivInt32DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -17390,9 +16709,9 @@ func (p projDivInt32DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -17418,12 +16737,6 @@ type projDivInt64Int16Op struct { } func (p projDivInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17462,10 +16775,10 @@ func (p projDivInt64Int16Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17489,10 +16802,10 @@ func (p projDivInt64Int16Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17505,7 +16818,7 @@ func (p projDivInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17517,10 +16830,10 @@ func (p projDivInt64Int16Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17540,10 +16853,10 @@ func (p projDivInt64Int16Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17568,12 +16881,6 @@ type projDivInt64Int32Op struct { } func (p projDivInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17612,10 +16919,10 @@ func (p projDivInt64Int32Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17639,10 +16946,10 @@ func (p projDivInt64Int32Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17655,7 +16962,7 @@ func (p projDivInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17667,10 +16974,10 @@ func (p projDivInt64Int32Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17690,10 +16997,10 @@ func (p projDivInt64Int32Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17718,12 +17025,6 @@ type projDivInt64Int64Op struct { } func (p projDivInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17762,10 +17063,10 @@ func (p projDivInt64Int64Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17789,10 +17090,10 @@ func (p projDivInt64Int64Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17805,7 +17106,7 @@ func (p projDivInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17817,10 +17118,10 @@ func (p projDivInt64Int64Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17840,10 +17141,10 @@ func (p projDivInt64Int64Op) Next() coldata.Batch { if int64(arg2) == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Quo(&projCol[i], leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Quo(&projCol[i], &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -17868,12 +17169,6 @@ type projDivInt64DecimalOp struct { } func (p projDivInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -17914,9 +17209,9 @@ func (p projDivInt64DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -17943,9 +17238,9 @@ func (p projDivInt64DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -17959,7 +17254,7 @@ func (p projDivInt64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -17973,9 +17268,9 @@ func (p projDivInt64DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -17998,9 +17293,9 @@ func (p projDivInt64DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Quo(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Quo(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -18026,12 +17321,6 @@ type projDivFloat64Float64Op struct { } func (p projDivFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18107,7 +17396,7 @@ func (p projDivFloat64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18164,12 +17453,6 @@ type projDivIntervalInt64Op struct { } func (p projDivIntervalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18235,7 +17518,7 @@ func (p projDivIntervalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18282,12 +17565,6 @@ type projDivIntervalFloat64Op struct { } func (p projDivIntervalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18353,7 +17630,7 @@ func (p projDivIntervalFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18400,12 +17677,6 @@ type projFloorDivDecimalInt16Op struct { } func (p projFloorDivDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18446,9 +17717,9 @@ func (p projFloorDivDecimalInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -18474,9 +17745,9 @@ func (p projFloorDivDecimalInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -18489,7 +17760,7 @@ func (p projFloorDivDecimalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18503,9 +17774,9 @@ func (p projFloorDivDecimalInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -18527,9 +17798,9 @@ func (p projFloorDivDecimalInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -18554,12 +17825,6 @@ type projFloorDivDecimalInt32Op struct { } func (p projFloorDivDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18600,9 +17865,9 @@ func (p projFloorDivDecimalInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -18628,9 +17893,9 @@ func (p projFloorDivDecimalInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -18643,7 +17908,7 @@ func (p projFloorDivDecimalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18657,9 +17922,9 @@ func (p projFloorDivDecimalInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -18681,9 +17946,9 @@ func (p projFloorDivDecimalInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -18708,12 +17973,6 @@ type projFloorDivDecimalInt64Op struct { } func (p projFloorDivDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18754,9 +18013,9 @@ func (p projFloorDivDecimalInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -18782,9 +18041,9 @@ func (p projFloorDivDecimalInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -18797,7 +18056,7 @@ func (p projFloorDivDecimalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -18811,9 +18070,9 @@ func (p projFloorDivDecimalInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -18835,9 +18094,9 @@ func (p projFloorDivDecimalInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -18862,12 +18121,6 @@ type projFloorDivDecimalDecimalOp struct { } func (p projFloorDivDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -18949,7 +18202,7 @@ func (p projFloorDivDecimalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19012,12 +18265,6 @@ type projFloorDivInt16Int16Op struct { } func (p projFloorDivInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19089,7 +18336,7 @@ func (p projFloorDivInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19142,12 +18389,6 @@ type projFloorDivInt16Int32Op struct { } func (p projFloorDivInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19219,7 +18460,7 @@ func (p projFloorDivInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19272,12 +18513,6 @@ type projFloorDivInt16Int64Op struct { } func (p projFloorDivInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19349,7 +18584,7 @@ func (p projFloorDivInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19402,12 +18637,6 @@ type projFloorDivInt16DecimalOp struct { } func (p projFloorDivInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19448,9 +18677,9 @@ func (p projFloorDivInt16DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -19477,9 +18706,9 @@ func (p projFloorDivInt16DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -19493,7 +18722,7 @@ func (p projFloorDivInt16DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19507,9 +18736,9 @@ func (p projFloorDivInt16DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -19532,9 +18761,9 @@ func (p projFloorDivInt16DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -19560,12 +18789,6 @@ type projFloorDivInt32Int16Op struct { } func (p projFloorDivInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19637,7 +18860,7 @@ func (p projFloorDivInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19690,12 +18913,6 @@ type projFloorDivInt32Int32Op struct { } func (p projFloorDivInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19767,7 +18984,7 @@ func (p projFloorDivInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19820,12 +19037,6 @@ type projFloorDivInt32Int64Op struct { } func (p projFloorDivInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19897,7 +19108,7 @@ func (p projFloorDivInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -19950,12 +19161,6 @@ type projFloorDivInt32DecimalOp struct { } func (p projFloorDivInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -19996,9 +19201,9 @@ func (p projFloorDivInt32DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -20025,9 +19230,9 @@ func (p projFloorDivInt32DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -20041,7 +19246,7 @@ func (p projFloorDivInt32DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20055,9 +19260,9 @@ func (p projFloorDivInt32DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -20080,9 +19285,9 @@ func (p projFloorDivInt32DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -20108,12 +19313,6 @@ type projFloorDivInt64Int16Op struct { } func (p projFloorDivInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20185,7 +19384,7 @@ func (p projFloorDivInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20238,12 +19437,6 @@ type projFloorDivInt64Int32Op struct { } func (p projFloorDivInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20315,7 +19508,7 @@ func (p projFloorDivInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20368,12 +19561,6 @@ type projFloorDivInt64Int64Op struct { } func (p projFloorDivInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20445,7 +19632,7 @@ func (p projFloorDivInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20498,12 +19685,6 @@ type projFloorDivInt64DecimalOp struct { } func (p projFloorDivInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20544,9 +19725,9 @@ func (p projFloorDivInt64DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -20573,9 +19754,9 @@ func (p projFloorDivInt64DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -20589,7 +19770,7 @@ func (p projFloorDivInt64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20603,9 +19784,9 @@ func (p projFloorDivInt64DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -20628,9 +19809,9 @@ func (p projFloorDivInt64DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.QuoInteger(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -20656,12 +19837,6 @@ type projFloorDivFloat64Float64Op struct { } func (p projFloorDivFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20737,7 +19912,7 @@ func (p projFloorDivFloat64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20794,12 +19969,6 @@ type projModDecimalInt16Op struct { } func (p projModDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20840,9 +20009,9 @@ func (p projModDecimalInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -20868,9 +20037,9 @@ func (p projModDecimalInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -20883,7 +20052,7 @@ func (p projModDecimalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -20897,9 +20066,9 @@ func (p projModDecimalInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -20921,9 +20090,9 @@ func (p projModDecimalInt16Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -20948,12 +20117,6 @@ type projModDecimalInt32Op struct { } func (p projModDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -20994,9 +20157,9 @@ func (p projModDecimalInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21022,9 +20185,9 @@ func (p projModDecimalInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21037,7 +20200,7 @@ func (p projModDecimalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21051,9 +20214,9 @@ func (p projModDecimalInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21075,9 +20238,9 @@ func (p projModDecimalInt32Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21102,12 +20265,6 @@ type projModDecimalInt64Op struct { } func (p projModDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21148,9 +20305,9 @@ func (p projModDecimalInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21176,9 +20333,9 @@ func (p projModDecimalInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21191,7 +20348,7 @@ func (p projModDecimalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21205,9 +20362,9 @@ func (p projModDecimalInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21229,9 +20386,9 @@ func (p projModDecimalInt64Op) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -21256,12 +20413,6 @@ type projModDecimalDecimalOp struct { } func (p projModDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21343,7 +20494,7 @@ func (p projModDecimalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21406,12 +20557,6 @@ type projModInt16Int16Op struct { } func (p projModInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21483,7 +20628,7 @@ func (p projModInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21536,12 +20681,6 @@ type projModInt16Int32Op struct { } func (p projModInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21613,7 +20752,7 @@ func (p projModInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21666,12 +20805,6 @@ type projModInt16Int64Op struct { } func (p projModInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21743,7 +20876,7 @@ func (p projModInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21796,12 +20929,6 @@ type projModInt16DecimalOp struct { } func (p projModInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -21842,9 +20969,9 @@ func (p projModInt16DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -21871,9 +20998,9 @@ func (p projModInt16DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -21887,7 +21014,7 @@ func (p projModInt16DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -21901,9 +21028,9 @@ func (p projModInt16DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -21926,9 +21053,9 @@ func (p projModInt16DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -21954,12 +21081,6 @@ type projModInt32Int16Op struct { } func (p projModInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22031,7 +21152,7 @@ func (p projModInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22084,12 +21205,6 @@ type projModInt32Int32Op struct { } func (p projModInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22161,7 +21276,7 @@ func (p projModInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22214,12 +21329,6 @@ type projModInt32Int64Op struct { } func (p projModInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22291,7 +21400,7 @@ func (p projModInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22344,12 +21453,6 @@ type projModInt32DecimalOp struct { } func (p projModInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22390,9 +21493,9 @@ func (p projModInt32DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -22419,9 +21522,9 @@ func (p projModInt32DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -22435,7 +21538,7 @@ func (p projModInt32DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22449,9 +21552,9 @@ func (p projModInt32DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -22474,9 +21577,9 @@ func (p projModInt32DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -22502,12 +21605,6 @@ type projModInt64Int16Op struct { } func (p projModInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22579,7 +21676,7 @@ func (p projModInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22632,12 +21729,6 @@ type projModInt64Int32Op struct { } func (p projModInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22709,7 +21800,7 @@ func (p projModInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22762,12 +21853,6 @@ type projModInt64Int64Op struct { } func (p projModInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22839,7 +21924,7 @@ func (p projModInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22892,12 +21977,6 @@ type projModInt64DecimalOp struct { } func (p projModInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -22938,9 +22017,9 @@ func (p projModInt64DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -22967,9 +22046,9 @@ func (p projModInt64DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -22983,7 +22062,7 @@ func (p projModInt64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -22997,9 +22076,9 @@ func (p projModInt64DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -23022,9 +22101,9 @@ func (p projModInt64DecimalOp) Next() coldata.Batch { colexecerror.ExpectedError(tree.ErrDivByZero) } - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.HighPrecisionCtx.Rem(&projCol[i], tmpDec, &arg2) + _, err := tree.HighPrecisionCtx.Rem(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -23050,12 +22129,6 @@ type projModFloat64Float64Op struct { } func (p projModFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23131,7 +22204,7 @@ func (p projModFloat64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23188,12 +22261,6 @@ type projPowDecimalInt16Op struct { } func (p projPowDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23230,9 +22297,9 @@ func (p projPowDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -23254,9 +22321,9 @@ func (p projPowDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -23269,7 +22336,7 @@ func (p projPowDecimalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23279,9 +22346,9 @@ func (p projPowDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -23299,9 +22366,9 @@ func (p projPowDecimalInt16Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -23326,12 +22393,6 @@ type projPowDecimalInt32Op struct { } func (p projPowDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23368,9 +22429,9 @@ func (p projPowDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -23392,9 +22453,9 @@ func (p projPowDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -23407,7 +22468,7 @@ func (p projPowDecimalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23417,9 +22478,9 @@ func (p projPowDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -23437,9 +22498,9 @@ func (p projPowDecimalInt32Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -23464,12 +22525,6 @@ type projPowDecimalInt64Op struct { } func (p projPowDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23506,9 +22561,9 @@ func (p projPowDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -23530,9 +22585,9 @@ func (p projPowDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -23545,7 +22600,7 @@ func (p projPowDecimalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23555,9 +22610,9 @@ func (p projPowDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -23575,9 +22630,9 @@ func (p projPowDecimalInt64Op) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, tmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&projCol[i], &arg1, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -23602,12 +22657,6 @@ type projPowDecimalDecimalOp struct { } func (p projPowDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23681,7 +22730,7 @@ func (p projPowDecimalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23736,12 +22785,6 @@ type projPowInt16Int16Op struct { } func (p projPowInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23777,10 +22820,10 @@ func (p projPowInt16Int16Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23806,10 +22849,10 @@ func (p projPowInt16Int16Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23827,7 +22870,7 @@ func (p projPowInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23836,10 +22879,10 @@ func (p projPowInt16Int16Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23861,10 +22904,10 @@ func (p projPowInt16Int16Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23894,12 +22937,6 @@ type projPowInt16Int32Op struct { } func (p projPowInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -23935,10 +22972,10 @@ func (p projPowInt16Int32Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23964,10 +23001,10 @@ func (p projPowInt16Int32Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -23985,7 +23022,7 @@ func (p projPowInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -23994,10 +23031,10 @@ func (p projPowInt16Int32Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24019,10 +23056,10 @@ func (p projPowInt16Int32Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24052,12 +23089,6 @@ type projPowInt16Int64Op struct { } func (p projPowInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24093,10 +23124,10 @@ func (p projPowInt16Int64Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24122,10 +23153,10 @@ func (p projPowInt16Int64Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24143,7 +23174,7 @@ func (p projPowInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24152,10 +23183,10 @@ func (p projPowInt16Int64Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24177,10 +23208,10 @@ func (p projPowInt16Int64Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24210,12 +23241,6 @@ type projPowInt16DecimalOp struct { } func (p projPowInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24252,9 +23277,9 @@ func (p projPowInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -24277,9 +23302,9 @@ func (p projPowInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -24293,7 +23318,7 @@ func (p projPowInt16DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24303,9 +23328,9 @@ func (p projPowInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -24324,9 +23349,9 @@ func (p projPowInt16DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -24352,12 +23377,6 @@ type projPowInt32Int16Op struct { } func (p projPowInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24393,10 +23412,10 @@ func (p projPowInt32Int16Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24422,10 +23441,10 @@ func (p projPowInt32Int16Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24443,7 +23462,7 @@ func (p projPowInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24452,10 +23471,10 @@ func (p projPowInt32Int16Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24477,10 +23496,10 @@ func (p projPowInt32Int16Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24510,12 +23529,6 @@ type projPowInt32Int32Op struct { } func (p projPowInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24551,10 +23564,10 @@ func (p projPowInt32Int32Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24580,10 +23593,10 @@ func (p projPowInt32Int32Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24601,7 +23614,7 @@ func (p projPowInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24610,10 +23623,10 @@ func (p projPowInt32Int32Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24635,10 +23648,10 @@ func (p projPowInt32Int32Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24668,12 +23681,6 @@ type projPowInt32Int64Op struct { } func (p projPowInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24709,10 +23716,10 @@ func (p projPowInt32Int64Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24738,10 +23745,10 @@ func (p projPowInt32Int64Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24759,7 +23766,7 @@ func (p projPowInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24768,10 +23775,10 @@ func (p projPowInt32Int64Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24793,10 +23800,10 @@ func (p projPowInt32Int64Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -24826,12 +23833,6 @@ type projPowInt32DecimalOp struct { } func (p projPowInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -24868,9 +23869,9 @@ func (p projPowInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -24893,9 +23894,9 @@ func (p projPowInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -24909,7 +23910,7 @@ func (p projPowInt32DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -24919,9 +23920,9 @@ func (p projPowInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -24940,9 +23941,9 @@ func (p projPowInt32DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -24968,12 +23969,6 @@ type projPowInt64Int16Op struct { } func (p projPowInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25009,10 +24004,10 @@ func (p projPowInt64Int16Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -25038,10 +24033,10 @@ func (p projPowInt64Int16Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -25059,7 +24054,7 @@ func (p projPowInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25068,10 +24063,10 @@ func (p projPowInt64Int16Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -25093,10 +24088,10 @@ func (p projPowInt64Int16Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -25126,12 +24121,6 @@ type projPowInt64Int32Op struct { } func (p projPowInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25167,10 +24156,10 @@ func (p projPowInt64Int32Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -25196,10 +24185,10 @@ func (p projPowInt64Int32Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -25217,7 +24206,7 @@ func (p projPowInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25226,10 +24215,10 @@ func (p projPowInt64Int32Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -25251,10 +24240,10 @@ func (p projPowInt64Int32Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -25284,12 +24273,6 @@ type projPowInt64Int64Op struct { } func (p projPowInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25325,10 +24308,10 @@ func (p projPowInt64Int64Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -25354,10 +24337,10 @@ func (p projPowInt64Int64Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -25375,7 +24358,7 @@ func (p projPowInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25384,10 +24367,10 @@ func (p projPowInt64Int64Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -25409,10 +24392,10 @@ func (p projPowInt64Int64Op) Next() coldata.Batch { arg2 := col2.Get(i) { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64(int64(arg1))) rightTmpDec.SetInt64(int64(int64(arg2))) - if _, err := tree.DecimalCtx.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.DecimalCtx.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -25442,12 +24425,6 @@ type projPowInt64DecimalOp struct { } func (p projPowInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25484,9 +24461,9 @@ func (p projPowInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -25509,9 +24486,9 @@ func (p projPowInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -25525,7 +24502,7 @@ func (p projPowInt64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25535,9 +24512,9 @@ func (p projPowInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -25556,9 +24533,9 @@ func (p projPowInt64DecimalOp) Next() coldata.Batch { { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - _, err := tree.DecimalCtx.Pow(&projCol[i], tmpDec, &arg2) + _, err := tree.DecimalCtx.Pow(&projCol[i], &tmpDec, &arg2) if err != nil { colexecerror.ExpectedError(err) } @@ -25584,12 +24561,6 @@ type projPowFloat64Float64Op struct { } func (p projPowFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25657,7 +24628,7 @@ func (p projPowFloat64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25706,12 +24677,6 @@ type projConcatBytesBytesOp struct { } func (p projConcatBytesBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25781,7 +24746,7 @@ func (p projConcatBytesBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25832,12 +24797,6 @@ type projConcatJSONJSONOp struct { } func (p projConcatJSONJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -25905,7 +24864,7 @@ func (p projConcatJSONJSONOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -25951,15 +24910,14 @@ func (p projConcatJSONJSONOp) Next() coldata.Batch { type projConcatDatumDatumOp struct { projOpBase + execgen.BinaryOverloadHelper } func (p projConcatDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26033,7 +24991,7 @@ func (p projConcatDatumDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26088,12 +25046,6 @@ type projLShiftInt16Int16Op struct { } func (p projLShiftInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26167,7 +25119,7 @@ func (p projLShiftInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26222,12 +25174,6 @@ type projLShiftInt16Int32Op struct { } func (p projLShiftInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26301,7 +25247,7 @@ func (p projLShiftInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26356,12 +25302,6 @@ type projLShiftInt16Int64Op struct { } func (p projLShiftInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26435,7 +25375,7 @@ func (p projLShiftInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26490,12 +25430,6 @@ type projLShiftInt32Int16Op struct { } func (p projLShiftInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26569,7 +25503,7 @@ func (p projLShiftInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26624,12 +25558,6 @@ type projLShiftInt32Int32Op struct { } func (p projLShiftInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26703,7 +25631,7 @@ func (p projLShiftInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26758,12 +25686,6 @@ type projLShiftInt32Int64Op struct { } func (p projLShiftInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26837,7 +25759,7 @@ func (p projLShiftInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -26892,12 +25814,6 @@ type projLShiftInt64Int16Op struct { } func (p projLShiftInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -26971,7 +25887,7 @@ func (p projLShiftInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27026,12 +25942,6 @@ type projLShiftInt64Int32Op struct { } func (p projLShiftInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27105,7 +26015,7 @@ func (p projLShiftInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27160,12 +26070,6 @@ type projLShiftInt64Int64Op struct { } func (p projLShiftInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27239,7 +26143,7 @@ func (p projLShiftInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27291,15 +26195,14 @@ func (p projLShiftInt64Int64Op) Next() coldata.Batch { type projLShiftDatumInt16Op struct { projOpBase + execgen.BinaryOverloadHelper } func (p projLShiftDatumInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27382,7 +26285,7 @@ func (p projLShiftDatumInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27443,15 +26346,14 @@ func (p projLShiftDatumInt16Op) Next() coldata.Batch { type projLShiftDatumInt32Op struct { projOpBase + execgen.BinaryOverloadHelper } func (p projLShiftDatumInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27534,7 +26436,7 @@ func (p projLShiftDatumInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27595,15 +26497,14 @@ func (p projLShiftDatumInt32Op) Next() coldata.Batch { type projLShiftDatumInt64Op struct { projOpBase + execgen.BinaryOverloadHelper } func (p projLShiftDatumInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27686,7 +26587,7 @@ func (p projLShiftDatumInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27750,12 +26651,6 @@ type projRShiftInt16Int16Op struct { } func (p projRShiftInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27829,7 +26724,7 @@ func (p projRShiftInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -27884,12 +26779,6 @@ type projRShiftInt16Int32Op struct { } func (p projRShiftInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -27963,7 +26852,7 @@ func (p projRShiftInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28018,12 +26907,6 @@ type projRShiftInt16Int64Op struct { } func (p projRShiftInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28097,7 +26980,7 @@ func (p projRShiftInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28152,12 +27035,6 @@ type projRShiftInt32Int16Op struct { } func (p projRShiftInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28231,7 +27108,7 @@ func (p projRShiftInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28286,12 +27163,6 @@ type projRShiftInt32Int32Op struct { } func (p projRShiftInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28365,7 +27236,7 @@ func (p projRShiftInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28420,12 +27291,6 @@ type projRShiftInt32Int64Op struct { } func (p projRShiftInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28499,7 +27364,7 @@ func (p projRShiftInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28554,12 +27419,6 @@ type projRShiftInt64Int16Op struct { } func (p projRShiftInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28633,7 +27492,7 @@ func (p projRShiftInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28688,12 +27547,6 @@ type projRShiftInt64Int32Op struct { } func (p projRShiftInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28767,7 +27620,7 @@ func (p projRShiftInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28822,12 +27675,6 @@ type projRShiftInt64Int64Op struct { } func (p projRShiftInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -28901,7 +27748,7 @@ func (p projRShiftInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -28953,15 +27800,14 @@ func (p projRShiftInt64Int64Op) Next() coldata.Batch { type projRShiftDatumInt16Op struct { projOpBase + execgen.BinaryOverloadHelper } func (p projRShiftDatumInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29044,7 +27890,7 @@ func (p projRShiftDatumInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29105,15 +27951,14 @@ func (p projRShiftDatumInt16Op) Next() coldata.Batch { type projRShiftDatumInt32Op struct { projOpBase + execgen.BinaryOverloadHelper } func (p projRShiftDatumInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29196,7 +28041,7 @@ func (p projRShiftDatumInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29257,15 +28102,14 @@ func (p projRShiftDatumInt32Op) Next() coldata.Batch { type projRShiftDatumInt64Op struct { projOpBase + execgen.BinaryOverloadHelper } func (p projRShiftDatumInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29348,7 +28192,7 @@ func (p projRShiftDatumInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29412,12 +28256,6 @@ type projJSONFetchValJSONBytesOp struct { } func (p projJSONFetchValJSONBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29497,7 +28335,7 @@ func (p projJSONFetchValJSONBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29558,12 +28396,6 @@ type projJSONFetchValJSONInt16Op struct { } func (p projJSONFetchValJSONInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29638,7 +28470,7 @@ func (p projJSONFetchValJSONInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29694,12 +28526,6 @@ type projJSONFetchValJSONInt32Op struct { } func (p projJSONFetchValJSONInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29774,7 +28600,7 @@ func (p projJSONFetchValJSONInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29830,12 +28656,6 @@ type projJSONFetchValJSONInt64Op struct { } func (p projJSONFetchValJSONInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -29910,7 +28730,7 @@ func (p projJSONFetchValJSONInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -29966,12 +28786,6 @@ type projJSONFetchTextJSONBytesOp struct { } func (p projJSONFetchTextJSONBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -30069,7 +28883,7 @@ func (p projJSONFetchTextJSONBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -30148,12 +28962,6 @@ type projJSONFetchTextJSONInt16Op struct { } func (p projJSONFetchTextJSONInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -30246,7 +29054,7 @@ func (p projJSONFetchTextJSONInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -30320,12 +29128,6 @@ type projJSONFetchTextJSONInt32Op struct { } func (p projJSONFetchTextJSONInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -30418,7 +29220,7 @@ func (p projJSONFetchTextJSONInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -30492,12 +29294,6 @@ type projJSONFetchTextJSONInt64Op struct { } func (p projJSONFetchTextJSONInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -30590,7 +29386,7 @@ func (p projJSONFetchTextJSONInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -30664,12 +29460,6 @@ type projJSONFetchValPathJSONDatumOp struct { } func (p projJSONFetchValPathJSONDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -30743,7 +29533,7 @@ func (p projJSONFetchValPathJSONDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -30798,12 +29588,6 @@ type projJSONFetchTextPathJSONDatumOp struct { } func (p projJSONFetchTextPathJSONDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -30897,7 +29681,7 @@ func (p projJSONFetchTextPathJSONDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -30972,12 +29756,6 @@ type projEQBoolBoolOp struct { } func (p projEQBoolBoolOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -31063,7 +29841,7 @@ func (p projEQBoolBoolOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -31130,12 +29908,6 @@ type projEQBytesBytesOp struct { } func (p projEQBytesBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -31203,7 +29975,7 @@ func (p projEQBytesBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -31252,12 +30024,6 @@ type projEQDecimalInt16Op struct { } func (p projEQDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -31296,9 +30062,9 @@ func (p projEQDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31323,9 +30089,9 @@ func (p projEQDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31339,7 +30105,7 @@ func (p projEQDecimalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -31351,9 +30117,9 @@ func (p projEQDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31374,9 +30140,9 @@ func (p projEQDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31402,12 +30168,6 @@ type projEQDecimalInt32Op struct { } func (p projEQDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -31446,9 +30206,9 @@ func (p projEQDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31473,9 +30233,9 @@ func (p projEQDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31489,7 +30249,7 @@ func (p projEQDecimalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -31501,9 +30261,9 @@ func (p projEQDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31524,9 +30284,9 @@ func (p projEQDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31552,12 +30312,6 @@ type projEQDecimalInt64Op struct { } func (p projEQDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -31596,9 +30350,9 @@ func (p projEQDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31623,9 +30377,9 @@ func (p projEQDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31639,7 +30393,7 @@ func (p projEQDecimalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -31651,9 +30405,9 @@ func (p projEQDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31674,9 +30428,9 @@ func (p projEQDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31702,12 +30456,6 @@ type projEQDecimalFloat64Op struct { } func (p projEQDecimalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -31746,11 +30494,11 @@ func (p projEQDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31775,11 +30523,11 @@ func (p projEQDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31793,7 +30541,7 @@ func (p projEQDecimalFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -31805,11 +30553,11 @@ func (p projEQDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31830,11 +30578,11 @@ func (p projEQDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult == 0 @@ -31860,12 +30608,6 @@ type projEQDecimalDecimalOp struct { } func (p projEQDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -31935,7 +30677,7 @@ func (p projEQDecimalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -31986,12 +30728,6 @@ type projEQInt16Int16Op struct { } func (p projEQInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -32083,7 +30819,7 @@ func (p projEQInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -32156,12 +30892,6 @@ type projEQInt16Int32Op struct { } func (p projEQInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -32253,7 +30983,7 @@ func (p projEQInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -32326,12 +31056,6 @@ type projEQInt16Int64Op struct { } func (p projEQInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -32423,7 +31147,7 @@ func (p projEQInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -32496,12 +31220,6 @@ type projEQInt16Float64Op struct { } func (p projEQInt16Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -32609,7 +31327,7 @@ func (p projEQInt16Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -32698,12 +31416,6 @@ type projEQInt16DecimalOp struct { } func (p projEQInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -32742,9 +31454,9 @@ func (p projEQInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -32769,9 +31481,9 @@ func (p projEQInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -32785,7 +31497,7 @@ func (p projEQInt16DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -32797,9 +31509,9 @@ func (p projEQInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -32820,9 +31532,9 @@ func (p projEQInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -32848,12 +31560,6 @@ type projEQInt32Int16Op struct { } func (p projEQInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -32945,7 +31651,7 @@ func (p projEQInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -33018,12 +31724,6 @@ type projEQInt32Int32Op struct { } func (p projEQInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -33115,7 +31815,7 @@ func (p projEQInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -33188,12 +31888,6 @@ type projEQInt32Int64Op struct { } func (p projEQInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -33285,7 +31979,7 @@ func (p projEQInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -33358,12 +32052,6 @@ type projEQInt32Float64Op struct { } func (p projEQInt32Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -33471,7 +32159,7 @@ func (p projEQInt32Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -33560,12 +32248,6 @@ type projEQInt32DecimalOp struct { } func (p projEQInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -33604,9 +32286,9 @@ func (p projEQInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -33631,9 +32313,9 @@ func (p projEQInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -33647,7 +32329,7 @@ func (p projEQInt32DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -33659,9 +32341,9 @@ func (p projEQInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -33682,9 +32364,9 @@ func (p projEQInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -33710,12 +32392,6 @@ type projEQInt64Int16Op struct { } func (p projEQInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -33807,7 +32483,7 @@ func (p projEQInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -33880,12 +32556,6 @@ type projEQInt64Int32Op struct { } func (p projEQInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -33977,7 +32647,7 @@ func (p projEQInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -34050,12 +32720,6 @@ type projEQInt64Int64Op struct { } func (p projEQInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -34147,7 +32811,7 @@ func (p projEQInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -34220,12 +32884,6 @@ type projEQInt64Float64Op struct { } func (p projEQInt64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -34333,7 +32991,7 @@ func (p projEQInt64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -34422,12 +33080,6 @@ type projEQInt64DecimalOp struct { } func (p projEQInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -34466,9 +33118,9 @@ func (p projEQInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -34493,9 +33145,9 @@ func (p projEQInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -34509,7 +33161,7 @@ func (p projEQInt64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -34521,9 +33173,9 @@ func (p projEQInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -34544,9 +33196,9 @@ func (p projEQInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -34572,12 +33224,6 @@ type projEQFloat64Int16Op struct { } func (p projEQFloat64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -34685,7 +33331,7 @@ func (p projEQFloat64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -34774,12 +33420,6 @@ type projEQFloat64Int32Op struct { } func (p projEQFloat64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -34887,7 +33527,7 @@ func (p projEQFloat64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -34976,12 +33616,6 @@ type projEQFloat64Int64Op struct { } func (p projEQFloat64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -35089,7 +33723,7 @@ func (p projEQFloat64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -35178,12 +33812,6 @@ type projEQFloat64Float64Op struct { } func (p projEQFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -35291,7 +33919,7 @@ func (p projEQFloat64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -35380,12 +34008,6 @@ type projEQFloat64DecimalOp struct { } func (p projEQFloat64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -35424,11 +34046,11 @@ func (p projEQFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -35453,11 +34075,11 @@ func (p projEQFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -35471,7 +34093,7 @@ func (p projEQFloat64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -35483,11 +34105,11 @@ func (p projEQFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -35508,11 +34130,11 @@ func (p projEQFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult == 0 @@ -35538,12 +34160,6 @@ type projEQTimestampTimestampOp struct { } func (p projEQTimestampTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -35627,7 +34243,7 @@ func (p projEQTimestampTimestampOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -35692,12 +34308,6 @@ type projEQIntervalIntervalOp struct { } func (p projEQIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -35767,7 +34377,7 @@ func (p projEQIntervalIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -35818,12 +34428,6 @@ type projEQJSONJSONOp struct { } func (p projEQJSONJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -35903,7 +34507,7 @@ func (p projEQJSONJSONOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -35964,12 +34568,6 @@ type projEQDatumDatumOp struct { } func (p projEQDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -36041,7 +34639,7 @@ func (p projEQDatumDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -36094,12 +34692,6 @@ type projNEBoolBoolOp struct { } func (p projNEBoolBoolOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -36185,7 +34777,7 @@ func (p projNEBoolBoolOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -36252,12 +34844,6 @@ type projNEBytesBytesOp struct { } func (p projNEBytesBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -36325,7 +34911,7 @@ func (p projNEBytesBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -36374,12 +34960,6 @@ type projNEDecimalInt16Op struct { } func (p projNEDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -36418,9 +34998,9 @@ func (p projNEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36445,9 +35025,9 @@ func (p projNEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36461,7 +35041,7 @@ func (p projNEDecimalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -36473,9 +35053,9 @@ func (p projNEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36496,9 +35076,9 @@ func (p projNEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36524,12 +35104,6 @@ type projNEDecimalInt32Op struct { } func (p projNEDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -36568,9 +35142,9 @@ func (p projNEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36595,9 +35169,9 @@ func (p projNEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36611,7 +35185,7 @@ func (p projNEDecimalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -36623,9 +35197,9 @@ func (p projNEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36646,9 +35220,9 @@ func (p projNEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36674,12 +35248,6 @@ type projNEDecimalInt64Op struct { } func (p projNEDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -36718,9 +35286,9 @@ func (p projNEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36745,9 +35313,9 @@ func (p projNEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36761,7 +35329,7 @@ func (p projNEDecimalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -36773,9 +35341,9 @@ func (p projNEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36796,9 +35364,9 @@ func (p projNEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36824,12 +35392,6 @@ type projNEDecimalFloat64Op struct { } func (p projNEDecimalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -36868,11 +35430,11 @@ func (p projNEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36897,11 +35459,11 @@ func (p projNEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36915,7 +35477,7 @@ func (p projNEDecimalFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -36927,11 +35489,11 @@ func (p projNEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36952,11 +35514,11 @@ func (p projNEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult != 0 @@ -36982,12 +35544,6 @@ type projNEDecimalDecimalOp struct { } func (p projNEDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -37057,7 +35613,7 @@ func (p projNEDecimalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -37108,12 +35664,6 @@ type projNEInt16Int16Op struct { } func (p projNEInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -37205,7 +35755,7 @@ func (p projNEInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -37278,12 +35828,6 @@ type projNEInt16Int32Op struct { } func (p projNEInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -37375,7 +35919,7 @@ func (p projNEInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -37448,12 +35992,6 @@ type projNEInt16Int64Op struct { } func (p projNEInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -37545,7 +36083,7 @@ func (p projNEInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -37618,12 +36156,6 @@ type projNEInt16Float64Op struct { } func (p projNEInt16Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -37731,7 +36263,7 @@ func (p projNEInt16Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -37820,12 +36352,6 @@ type projNEInt16DecimalOp struct { } func (p projNEInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -37864,9 +36390,9 @@ func (p projNEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -37891,9 +36417,9 @@ func (p projNEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -37907,7 +36433,7 @@ func (p projNEInt16DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -37919,9 +36445,9 @@ func (p projNEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -37942,9 +36468,9 @@ func (p projNEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -37970,12 +36496,6 @@ type projNEInt32Int16Op struct { } func (p projNEInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -38067,7 +36587,7 @@ func (p projNEInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -38140,12 +36660,6 @@ type projNEInt32Int32Op struct { } func (p projNEInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -38237,7 +36751,7 @@ func (p projNEInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -38310,12 +36824,6 @@ type projNEInt32Int64Op struct { } func (p projNEInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -38407,7 +36915,7 @@ func (p projNEInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -38480,12 +36988,6 @@ type projNEInt32Float64Op struct { } func (p projNEInt32Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -38593,7 +37095,7 @@ func (p projNEInt32Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -38682,12 +37184,6 @@ type projNEInt32DecimalOp struct { } func (p projNEInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -38726,9 +37222,9 @@ func (p projNEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -38753,9 +37249,9 @@ func (p projNEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -38769,7 +37265,7 @@ func (p projNEInt32DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -38781,9 +37277,9 @@ func (p projNEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -38804,9 +37300,9 @@ func (p projNEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -38832,12 +37328,6 @@ type projNEInt64Int16Op struct { } func (p projNEInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -38929,7 +37419,7 @@ func (p projNEInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -39002,12 +37492,6 @@ type projNEInt64Int32Op struct { } func (p projNEInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -39099,7 +37583,7 @@ func (p projNEInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -39172,12 +37656,6 @@ type projNEInt64Int64Op struct { } func (p projNEInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -39269,7 +37747,7 @@ func (p projNEInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -39342,12 +37820,6 @@ type projNEInt64Float64Op struct { } func (p projNEInt64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -39455,7 +37927,7 @@ func (p projNEInt64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -39544,12 +38016,6 @@ type projNEInt64DecimalOp struct { } func (p projNEInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -39588,9 +38054,9 @@ func (p projNEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -39615,9 +38081,9 @@ func (p projNEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -39631,7 +38097,7 @@ func (p projNEInt64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -39643,9 +38109,9 @@ func (p projNEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -39666,9 +38132,9 @@ func (p projNEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -39694,12 +38160,6 @@ type projNEFloat64Int16Op struct { } func (p projNEFloat64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -39807,7 +38267,7 @@ func (p projNEFloat64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -39896,12 +38356,6 @@ type projNEFloat64Int32Op struct { } func (p projNEFloat64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -40009,7 +38463,7 @@ func (p projNEFloat64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -40098,12 +38552,6 @@ type projNEFloat64Int64Op struct { } func (p projNEFloat64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -40211,7 +38659,7 @@ func (p projNEFloat64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -40300,12 +38748,6 @@ type projNEFloat64Float64Op struct { } func (p projNEFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -40413,7 +38855,7 @@ func (p projNEFloat64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -40502,12 +38944,6 @@ type projNEFloat64DecimalOp struct { } func (p projNEFloat64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -40546,11 +38982,11 @@ func (p projNEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -40575,11 +39011,11 @@ func (p projNEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -40593,7 +39029,7 @@ func (p projNEFloat64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -40605,11 +39041,11 @@ func (p projNEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -40630,11 +39066,11 @@ func (p projNEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult != 0 @@ -40660,12 +39096,6 @@ type projNETimestampTimestampOp struct { } func (p projNETimestampTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -40749,7 +39179,7 @@ func (p projNETimestampTimestampOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -40814,12 +39244,6 @@ type projNEIntervalIntervalOp struct { } func (p projNEIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -40889,7 +39313,7 @@ func (p projNEIntervalIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -40940,12 +39364,6 @@ type projNEJSONJSONOp struct { } func (p projNEJSONJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41025,7 +39443,7 @@ func (p projNEJSONJSONOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -41086,12 +39504,6 @@ type projNEDatumDatumOp struct { } func (p projNEDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41163,7 +39575,7 @@ func (p projNEDatumDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -41216,12 +39628,6 @@ type projLTBoolBoolOp struct { } func (p projLTBoolBoolOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41307,7 +39713,7 @@ func (p projLTBoolBoolOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -41374,12 +39780,6 @@ type projLTBytesBytesOp struct { } func (p projLTBytesBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41447,7 +39847,7 @@ func (p projLTBytesBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -41496,12 +39896,6 @@ type projLTDecimalInt16Op struct { } func (p projLTDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41540,9 +39934,9 @@ func (p projLTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -41567,9 +39961,9 @@ func (p projLTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -41583,7 +39977,7 @@ func (p projLTDecimalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -41595,9 +39989,9 @@ func (p projLTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -41618,9 +40012,9 @@ func (p projLTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -41646,12 +40040,6 @@ type projLTDecimalInt32Op struct { } func (p projLTDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41690,9 +40078,9 @@ func (p projLTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -41717,9 +40105,9 @@ func (p projLTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -41733,7 +40121,7 @@ func (p projLTDecimalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -41745,9 +40133,9 @@ func (p projLTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -41768,9 +40156,9 @@ func (p projLTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -41796,12 +40184,6 @@ type projLTDecimalInt64Op struct { } func (p projLTDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41840,9 +40222,9 @@ func (p projLTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -41867,9 +40249,9 @@ func (p projLTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -41883,7 +40265,7 @@ func (p projLTDecimalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -41895,9 +40277,9 @@ func (p projLTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -41918,9 +40300,9 @@ func (p projLTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -41946,12 +40328,6 @@ type projLTDecimalFloat64Op struct { } func (p projLTDecimalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -41990,11 +40366,11 @@ func (p projLTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -42019,11 +40395,11 @@ func (p projLTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -42037,7 +40413,7 @@ func (p projLTDecimalFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -42049,11 +40425,11 @@ func (p projLTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -42074,11 +40450,11 @@ func (p projLTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult < 0 @@ -42104,12 +40480,6 @@ type projLTDecimalDecimalOp struct { } func (p projLTDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -42179,7 +40549,7 @@ func (p projLTDecimalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -42230,12 +40600,6 @@ type projLTInt16Int16Op struct { } func (p projLTInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -42327,7 +40691,7 @@ func (p projLTInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -42400,12 +40764,6 @@ type projLTInt16Int32Op struct { } func (p projLTInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -42497,7 +40855,7 @@ func (p projLTInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -42570,12 +40928,6 @@ type projLTInt16Int64Op struct { } func (p projLTInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -42667,7 +41019,7 @@ func (p projLTInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -42740,12 +41092,6 @@ type projLTInt16Float64Op struct { } func (p projLTInt16Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -42853,7 +41199,7 @@ func (p projLTInt16Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -42942,12 +41288,6 @@ type projLTInt16DecimalOp struct { } func (p projLTInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -42986,9 +41326,9 @@ func (p projLTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -43013,9 +41353,9 @@ func (p projLTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -43029,7 +41369,7 @@ func (p projLTInt16DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -43041,9 +41381,9 @@ func (p projLTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -43064,9 +41404,9 @@ func (p projLTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -43092,12 +41432,6 @@ type projLTInt32Int16Op struct { } func (p projLTInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -43189,7 +41523,7 @@ func (p projLTInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -43262,12 +41596,6 @@ type projLTInt32Int32Op struct { } func (p projLTInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -43359,7 +41687,7 @@ func (p projLTInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -43432,12 +41760,6 @@ type projLTInt32Int64Op struct { } func (p projLTInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -43529,7 +41851,7 @@ func (p projLTInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -43602,12 +41924,6 @@ type projLTInt32Float64Op struct { } func (p projLTInt32Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -43715,7 +42031,7 @@ func (p projLTInt32Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -43804,12 +42120,6 @@ type projLTInt32DecimalOp struct { } func (p projLTInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -43848,9 +42158,9 @@ func (p projLTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -43875,9 +42185,9 @@ func (p projLTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -43891,7 +42201,7 @@ func (p projLTInt32DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -43903,9 +42213,9 @@ func (p projLTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -43926,9 +42236,9 @@ func (p projLTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -43954,12 +42264,6 @@ type projLTInt64Int16Op struct { } func (p projLTInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -44051,7 +42355,7 @@ func (p projLTInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -44124,12 +42428,6 @@ type projLTInt64Int32Op struct { } func (p projLTInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -44221,7 +42519,7 @@ func (p projLTInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -44294,12 +42592,6 @@ type projLTInt64Int64Op struct { } func (p projLTInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -44391,7 +42683,7 @@ func (p projLTInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -44464,12 +42756,6 @@ type projLTInt64Float64Op struct { } func (p projLTInt64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -44577,7 +42863,7 @@ func (p projLTInt64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -44666,12 +42952,6 @@ type projLTInt64DecimalOp struct { } func (p projLTInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -44710,9 +42990,9 @@ func (p projLTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -44737,9 +43017,9 @@ func (p projLTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -44753,7 +43033,7 @@ func (p projLTInt64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -44765,9 +43045,9 @@ func (p projLTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -44788,9 +43068,9 @@ func (p projLTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -44816,12 +43096,6 @@ type projLTFloat64Int16Op struct { } func (p projLTFloat64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -44929,7 +43203,7 @@ func (p projLTFloat64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -45018,12 +43292,6 @@ type projLTFloat64Int32Op struct { } func (p projLTFloat64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -45131,7 +43399,7 @@ func (p projLTFloat64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -45220,12 +43488,6 @@ type projLTFloat64Int64Op struct { } func (p projLTFloat64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -45333,7 +43595,7 @@ func (p projLTFloat64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -45422,12 +43684,6 @@ type projLTFloat64Float64Op struct { } func (p projLTFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -45535,7 +43791,7 @@ func (p projLTFloat64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -45624,12 +43880,6 @@ type projLTFloat64DecimalOp struct { } func (p projLTFloat64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -45668,11 +43918,11 @@ func (p projLTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -45697,11 +43947,11 @@ func (p projLTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -45715,7 +43965,7 @@ func (p projLTFloat64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -45727,11 +43977,11 @@ func (p projLTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -45752,11 +44002,11 @@ func (p projLTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult < 0 @@ -45782,12 +44032,6 @@ type projLTTimestampTimestampOp struct { } func (p projLTTimestampTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -45871,7 +44115,7 @@ func (p projLTTimestampTimestampOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -45936,12 +44180,6 @@ type projLTIntervalIntervalOp struct { } func (p projLTIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46011,7 +44249,7 @@ func (p projLTIntervalIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -46062,12 +44300,6 @@ type projLTJSONJSONOp struct { } func (p projLTJSONJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46147,7 +44379,7 @@ func (p projLTJSONJSONOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -46208,12 +44440,6 @@ type projLTDatumDatumOp struct { } func (p projLTDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46285,7 +44511,7 @@ func (p projLTDatumDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -46338,12 +44564,6 @@ type projLEBoolBoolOp struct { } func (p projLEBoolBoolOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46429,7 +44649,7 @@ func (p projLEBoolBoolOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -46496,12 +44716,6 @@ type projLEBytesBytesOp struct { } func (p projLEBytesBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46569,7 +44783,7 @@ func (p projLEBytesBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -46618,12 +44832,6 @@ type projLEDecimalInt16Op struct { } func (p projLEDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46662,9 +44870,9 @@ func (p projLEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -46689,9 +44897,9 @@ func (p projLEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -46705,7 +44913,7 @@ func (p projLEDecimalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -46717,9 +44925,9 @@ func (p projLEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -46740,9 +44948,9 @@ func (p projLEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -46768,12 +44976,6 @@ type projLEDecimalInt32Op struct { } func (p projLEDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46812,9 +45014,9 @@ func (p projLEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -46839,9 +45041,9 @@ func (p projLEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -46855,7 +45057,7 @@ func (p projLEDecimalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -46867,9 +45069,9 @@ func (p projLEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -46890,9 +45092,9 @@ func (p projLEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -46918,12 +45120,6 @@ type projLEDecimalInt64Op struct { } func (p projLEDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -46962,9 +45158,9 @@ func (p projLEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -46989,9 +45185,9 @@ func (p projLEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -47005,7 +45201,7 @@ func (p projLEDecimalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -47017,9 +45213,9 @@ func (p projLEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -47040,9 +45236,9 @@ func (p projLEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -47068,12 +45264,6 @@ type projLEDecimalFloat64Op struct { } func (p projLEDecimalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -47112,11 +45302,11 @@ func (p projLEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -47141,11 +45331,11 @@ func (p projLEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -47159,7 +45349,7 @@ func (p projLEDecimalFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -47171,11 +45361,11 @@ func (p projLEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -47196,11 +45386,11 @@ func (p projLEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult <= 0 @@ -47226,12 +45416,6 @@ type projLEDecimalDecimalOp struct { } func (p projLEDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -47301,7 +45485,7 @@ func (p projLEDecimalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -47352,12 +45536,6 @@ type projLEInt16Int16Op struct { } func (p projLEInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -47449,7 +45627,7 @@ func (p projLEInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -47522,12 +45700,6 @@ type projLEInt16Int32Op struct { } func (p projLEInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -47619,7 +45791,7 @@ func (p projLEInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -47692,12 +45864,6 @@ type projLEInt16Int64Op struct { } func (p projLEInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -47789,7 +45955,7 @@ func (p projLEInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -47862,12 +46028,6 @@ type projLEInt16Float64Op struct { } func (p projLEInt16Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -47975,7 +46135,7 @@ func (p projLEInt16Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -48064,12 +46224,6 @@ type projLEInt16DecimalOp struct { } func (p projLEInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -48108,9 +46262,9 @@ func (p projLEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -48135,9 +46289,9 @@ func (p projLEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -48151,7 +46305,7 @@ func (p projLEInt16DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -48163,9 +46317,9 @@ func (p projLEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -48186,9 +46340,9 @@ func (p projLEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -48214,12 +46368,6 @@ type projLEInt32Int16Op struct { } func (p projLEInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -48311,7 +46459,7 @@ func (p projLEInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -48384,12 +46532,6 @@ type projLEInt32Int32Op struct { } func (p projLEInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -48481,7 +46623,7 @@ func (p projLEInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -48554,12 +46696,6 @@ type projLEInt32Int64Op struct { } func (p projLEInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -48651,7 +46787,7 @@ func (p projLEInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -48724,12 +46860,6 @@ type projLEInt32Float64Op struct { } func (p projLEInt32Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -48837,7 +46967,7 @@ func (p projLEInt32Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -48926,12 +47056,6 @@ type projLEInt32DecimalOp struct { } func (p projLEInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -48970,9 +47094,9 @@ func (p projLEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -48997,9 +47121,9 @@ func (p projLEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -49013,7 +47137,7 @@ func (p projLEInt32DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -49025,9 +47149,9 @@ func (p projLEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -49048,9 +47172,9 @@ func (p projLEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -49076,12 +47200,6 @@ type projLEInt64Int16Op struct { } func (p projLEInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -49173,7 +47291,7 @@ func (p projLEInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -49246,12 +47364,6 @@ type projLEInt64Int32Op struct { } func (p projLEInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -49343,7 +47455,7 @@ func (p projLEInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -49416,12 +47528,6 @@ type projLEInt64Int64Op struct { } func (p projLEInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -49513,7 +47619,7 @@ func (p projLEInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -49586,12 +47692,6 @@ type projLEInt64Float64Op struct { } func (p projLEInt64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -49699,7 +47799,7 @@ func (p projLEInt64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -49788,12 +47888,6 @@ type projLEInt64DecimalOp struct { } func (p projLEInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -49832,9 +47926,9 @@ func (p projLEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -49859,9 +47953,9 @@ func (p projLEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -49875,7 +47969,7 @@ func (p projLEInt64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -49887,9 +47981,9 @@ func (p projLEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -49910,9 +48004,9 @@ func (p projLEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -49938,12 +48032,6 @@ type projLEFloat64Int16Op struct { } func (p projLEFloat64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -50051,7 +48139,7 @@ func (p projLEFloat64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -50140,12 +48228,6 @@ type projLEFloat64Int32Op struct { } func (p projLEFloat64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -50253,7 +48335,7 @@ func (p projLEFloat64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -50342,12 +48424,6 @@ type projLEFloat64Int64Op struct { } func (p projLEFloat64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -50455,7 +48531,7 @@ func (p projLEFloat64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -50544,12 +48620,6 @@ type projLEFloat64Float64Op struct { } func (p projLEFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -50657,7 +48727,7 @@ func (p projLEFloat64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -50746,12 +48816,6 @@ type projLEFloat64DecimalOp struct { } func (p projLEFloat64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -50790,11 +48854,11 @@ func (p projLEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -50819,11 +48883,11 @@ func (p projLEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -50837,7 +48901,7 @@ func (p projLEFloat64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -50849,11 +48913,11 @@ func (p projLEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -50874,11 +48938,11 @@ func (p projLEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult <= 0 @@ -50904,12 +48968,6 @@ type projLETimestampTimestampOp struct { } func (p projLETimestampTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -50993,7 +49051,7 @@ func (p projLETimestampTimestampOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51058,12 +49116,6 @@ type projLEIntervalIntervalOp struct { } func (p projLEIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -51133,7 +49185,7 @@ func (p projLEIntervalIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51184,12 +49236,6 @@ type projLEJSONJSONOp struct { } func (p projLEJSONJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -51269,7 +49315,7 @@ func (p projLEJSONJSONOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51330,12 +49376,6 @@ type projLEDatumDatumOp struct { } func (p projLEDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -51407,7 +49447,7 @@ func (p projLEDatumDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51460,12 +49500,6 @@ type projGTBoolBoolOp struct { } func (p projGTBoolBoolOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -51551,7 +49585,7 @@ func (p projGTBoolBoolOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51618,12 +49652,6 @@ type projGTBytesBytesOp struct { } func (p projGTBytesBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -51691,7 +49719,7 @@ func (p projGTBytesBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51740,12 +49768,6 @@ type projGTDecimalInt16Op struct { } func (p projGTDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -51784,9 +49806,9 @@ func (p projGTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -51811,9 +49833,9 @@ func (p projGTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -51827,7 +49849,7 @@ func (p projGTDecimalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51839,9 +49861,9 @@ func (p projGTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -51862,9 +49884,9 @@ func (p projGTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -51890,12 +49912,6 @@ type projGTDecimalInt32Op struct { } func (p projGTDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -51934,9 +49950,9 @@ func (p projGTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -51961,9 +49977,9 @@ func (p projGTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -51977,7 +49993,7 @@ func (p projGTDecimalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -51989,9 +50005,9 @@ func (p projGTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -52012,9 +50028,9 @@ func (p projGTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -52040,12 +50056,6 @@ type projGTDecimalInt64Op struct { } func (p projGTDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -52084,9 +50094,9 @@ func (p projGTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -52111,9 +50121,9 @@ func (p projGTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -52127,7 +50137,7 @@ func (p projGTDecimalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -52139,9 +50149,9 @@ func (p projGTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -52162,9 +50172,9 @@ func (p projGTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -52190,12 +50200,6 @@ type projGTDecimalFloat64Op struct { } func (p projGTDecimalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -52234,11 +50238,11 @@ func (p projGTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -52263,11 +50267,11 @@ func (p projGTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -52281,7 +50285,7 @@ func (p projGTDecimalFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -52293,11 +50297,11 @@ func (p projGTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -52318,11 +50322,11 @@ func (p projGTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult > 0 @@ -52348,12 +50352,6 @@ type projGTDecimalDecimalOp struct { } func (p projGTDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -52423,7 +50421,7 @@ func (p projGTDecimalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -52474,12 +50472,6 @@ type projGTInt16Int16Op struct { } func (p projGTInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -52571,7 +50563,7 @@ func (p projGTInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -52644,12 +50636,6 @@ type projGTInt16Int32Op struct { } func (p projGTInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -52741,7 +50727,7 @@ func (p projGTInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -52814,12 +50800,6 @@ type projGTInt16Int64Op struct { } func (p projGTInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -52911,7 +50891,7 @@ func (p projGTInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -52984,12 +50964,6 @@ type projGTInt16Float64Op struct { } func (p projGTInt16Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -53097,7 +51071,7 @@ func (p projGTInt16Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -53186,12 +51160,6 @@ type projGTInt16DecimalOp struct { } func (p projGTInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -53230,9 +51198,9 @@ func (p projGTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -53257,9 +51225,9 @@ func (p projGTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -53273,7 +51241,7 @@ func (p projGTInt16DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -53285,9 +51253,9 @@ func (p projGTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -53308,9 +51276,9 @@ func (p projGTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -53336,12 +51304,6 @@ type projGTInt32Int16Op struct { } func (p projGTInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -53433,7 +51395,7 @@ func (p projGTInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -53506,12 +51468,6 @@ type projGTInt32Int32Op struct { } func (p projGTInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -53603,7 +51559,7 @@ func (p projGTInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -53676,12 +51632,6 @@ type projGTInt32Int64Op struct { } func (p projGTInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -53773,7 +51723,7 @@ func (p projGTInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -53846,12 +51796,6 @@ type projGTInt32Float64Op struct { } func (p projGTInt32Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -53959,7 +51903,7 @@ func (p projGTInt32Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -54048,12 +51992,6 @@ type projGTInt32DecimalOp struct { } func (p projGTInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -54092,9 +52030,9 @@ func (p projGTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -54119,9 +52057,9 @@ func (p projGTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -54135,7 +52073,7 @@ func (p projGTInt32DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -54147,9 +52085,9 @@ func (p projGTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -54170,9 +52108,9 @@ func (p projGTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -54198,12 +52136,6 @@ type projGTInt64Int16Op struct { } func (p projGTInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -54295,7 +52227,7 @@ func (p projGTInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -54368,12 +52300,6 @@ type projGTInt64Int32Op struct { } func (p projGTInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -54465,7 +52391,7 @@ func (p projGTInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -54538,12 +52464,6 @@ type projGTInt64Int64Op struct { } func (p projGTInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -54635,7 +52555,7 @@ func (p projGTInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -54708,12 +52628,6 @@ type projGTInt64Float64Op struct { } func (p projGTInt64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -54821,7 +52735,7 @@ func (p projGTInt64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -54910,12 +52824,6 @@ type projGTInt64DecimalOp struct { } func (p projGTInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -54954,9 +52862,9 @@ func (p projGTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -54981,9 +52889,9 @@ func (p projGTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -54997,7 +52905,7 @@ func (p projGTInt64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -55009,9 +52917,9 @@ func (p projGTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -55032,9 +52940,9 @@ func (p projGTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -55060,12 +52968,6 @@ type projGTFloat64Int16Op struct { } func (p projGTFloat64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -55173,7 +53075,7 @@ func (p projGTFloat64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -55262,12 +53164,6 @@ type projGTFloat64Int32Op struct { } func (p projGTFloat64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -55375,7 +53271,7 @@ func (p projGTFloat64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -55464,12 +53360,6 @@ type projGTFloat64Int64Op struct { } func (p projGTFloat64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -55577,7 +53467,7 @@ func (p projGTFloat64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -55666,12 +53556,6 @@ type projGTFloat64Float64Op struct { } func (p projGTFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -55779,7 +53663,7 @@ func (p projGTFloat64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -55868,12 +53752,6 @@ type projGTFloat64DecimalOp struct { } func (p projGTFloat64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -55912,11 +53790,11 @@ func (p projGTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -55941,11 +53819,11 @@ func (p projGTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -55959,7 +53837,7 @@ func (p projGTFloat64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -55971,11 +53849,11 @@ func (p projGTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -55996,11 +53874,11 @@ func (p projGTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult > 0 @@ -56026,12 +53904,6 @@ type projGTTimestampTimestampOp struct { } func (p projGTTimestampTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -56115,7 +53987,7 @@ func (p projGTTimestampTimestampOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56180,12 +54052,6 @@ type projGTIntervalIntervalOp struct { } func (p projGTIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -56255,7 +54121,7 @@ func (p projGTIntervalIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56306,12 +54172,6 @@ type projGTJSONJSONOp struct { } func (p projGTJSONJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -56391,7 +54251,7 @@ func (p projGTJSONJSONOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56452,12 +54312,6 @@ type projGTDatumDatumOp struct { } func (p projGTDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -56529,7 +54383,7 @@ func (p projGTDatumDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56582,12 +54436,6 @@ type projGEBoolBoolOp struct { } func (p projGEBoolBoolOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -56673,7 +54521,7 @@ func (p projGEBoolBoolOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56740,12 +54588,6 @@ type projGEBytesBytesOp struct { } func (p projGEBytesBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -56813,7 +54655,7 @@ func (p projGEBytesBytesOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56862,12 +54704,6 @@ type projGEDecimalInt16Op struct { } func (p projGEDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -56906,9 +54742,9 @@ func (p projGEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -56933,9 +54769,9 @@ func (p projGEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -56949,7 +54785,7 @@ func (p projGEDecimalInt16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -56961,9 +54797,9 @@ func (p projGEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -56984,9 +54820,9 @@ func (p projGEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -57012,12 +54848,6 @@ type projGEDecimalInt32Op struct { } func (p projGEDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -57056,9 +54886,9 @@ func (p projGEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -57083,9 +54913,9 @@ func (p projGEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -57099,7 +54929,7 @@ func (p projGEDecimalInt32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -57111,9 +54941,9 @@ func (p projGEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -57134,9 +54964,9 @@ func (p projGEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -57162,12 +54992,6 @@ type projGEDecimalInt64Op struct { } func (p projGEDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -57206,9 +55030,9 @@ func (p projGEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -57233,9 +55057,9 @@ func (p projGEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -57249,7 +55073,7 @@ func (p projGEDecimalInt64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -57261,9 +55085,9 @@ func (p projGEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -57284,9 +55108,9 @@ func (p projGEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -57312,12 +55136,6 @@ type projGEDecimalFloat64Op struct { } func (p projGEDecimalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -57356,11 +55174,11 @@ func (p projGEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -57385,11 +55203,11 @@ func (p projGEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -57403,7 +55221,7 @@ func (p projGEDecimalFloat64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -57415,11 +55233,11 @@ func (p projGEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -57440,11 +55258,11 @@ func (p projGEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } projCol[i] = cmpResult >= 0 @@ -57470,12 +55288,6 @@ type projGEDecimalDecimalOp struct { } func (p projGEDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -57545,7 +55357,7 @@ func (p projGEDecimalDecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -57596,12 +55408,6 @@ type projGEInt16Int16Op struct { } func (p projGEInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -57693,7 +55499,7 @@ func (p projGEInt16Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -57766,12 +55572,6 @@ type projGEInt16Int32Op struct { } func (p projGEInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -57863,7 +55663,7 @@ func (p projGEInt16Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -57936,12 +55736,6 @@ type projGEInt16Int64Op struct { } func (p projGEInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -58033,7 +55827,7 @@ func (p projGEInt16Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -58106,12 +55900,6 @@ type projGEInt16Float64Op struct { } func (p projGEInt16Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -58219,7 +56007,7 @@ func (p projGEInt16Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -58308,12 +56096,6 @@ type projGEInt16DecimalOp struct { } func (p projGEInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -58352,9 +56134,9 @@ func (p projGEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -58379,9 +56161,9 @@ func (p projGEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -58395,7 +56177,7 @@ func (p projGEInt16DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -58407,9 +56189,9 @@ func (p projGEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -58430,9 +56212,9 @@ func (p projGEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -58458,12 +56240,6 @@ type projGEInt32Int16Op struct { } func (p projGEInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -58555,7 +56331,7 @@ func (p projGEInt32Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -58628,12 +56404,6 @@ type projGEInt32Int32Op struct { } func (p projGEInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -58725,7 +56495,7 @@ func (p projGEInt32Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -58798,12 +56568,6 @@ type projGEInt32Int64Op struct { } func (p projGEInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -58895,7 +56659,7 @@ func (p projGEInt32Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -58968,12 +56732,6 @@ type projGEInt32Float64Op struct { } func (p projGEInt32Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -59081,7 +56839,7 @@ func (p projGEInt32Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -59170,12 +56928,6 @@ type projGEInt32DecimalOp struct { } func (p projGEInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -59214,9 +56966,9 @@ func (p projGEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -59241,9 +56993,9 @@ func (p projGEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -59257,7 +57009,7 @@ func (p projGEInt32DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -59269,9 +57021,9 @@ func (p projGEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -59292,9 +57044,9 @@ func (p projGEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -59320,12 +57072,6 @@ type projGEInt64Int16Op struct { } func (p projGEInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -59417,7 +57163,7 @@ func (p projGEInt64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -59490,12 +57236,6 @@ type projGEInt64Int32Op struct { } func (p projGEInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -59587,7 +57327,7 @@ func (p projGEInt64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -59660,12 +57400,6 @@ type projGEInt64Int64Op struct { } func (p projGEInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -59757,7 +57491,7 @@ func (p projGEInt64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -59830,12 +57564,6 @@ type projGEInt64Float64Op struct { } func (p projGEInt64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -59943,7 +57671,7 @@ func (p projGEInt64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -60032,12 +57760,6 @@ type projGEInt64DecimalOp struct { } func (p projGEInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -60076,9 +57798,9 @@ func (p projGEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -60103,9 +57825,9 @@ func (p projGEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -60119,7 +57841,7 @@ func (p projGEInt64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -60131,9 +57853,9 @@ func (p projGEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -60154,9 +57876,9 @@ func (p projGEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -60182,12 +57904,6 @@ type projGEFloat64Int16Op struct { } func (p projGEFloat64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -60295,7 +58011,7 @@ func (p projGEFloat64Int16Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -60384,12 +58100,6 @@ type projGEFloat64Int32Op struct { } func (p projGEFloat64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -60497,7 +58207,7 @@ func (p projGEFloat64Int32Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -60586,12 +58296,6 @@ type projGEFloat64Int64Op struct { } func (p projGEFloat64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -60699,7 +58403,7 @@ func (p projGEFloat64Int64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -60788,12 +58492,6 @@ type projGEFloat64Float64Op struct { } func (p projGEFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -60901,7 +58599,7 @@ func (p projGEFloat64Float64Op) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -60990,12 +58688,6 @@ type projGEFloat64DecimalOp struct { } func (p projGEFloat64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -61034,11 +58726,11 @@ func (p projGEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -61063,11 +58755,11 @@ func (p projGEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -61081,7 +58773,7 @@ func (p projGEFloat64DecimalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -61093,11 +58785,11 @@ func (p projGEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -61118,11 +58810,11 @@ func (p projGEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } projCol[i] = cmpResult >= 0 @@ -61148,12 +58840,6 @@ type projGETimestampTimestampOp struct { } func (p projGETimestampTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -61237,7 +58923,7 @@ func (p projGETimestampTimestampOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -61302,12 +58988,6 @@ type projGEIntervalIntervalOp struct { } func (p projGEIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -61377,7 +59057,7 @@ func (p projGEIntervalIntervalOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -61428,12 +59108,6 @@ type projGEJSONJSONOp struct { } func (p projGEJSONJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -61513,7 +59187,7 @@ func (p projGEJSONJSONOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -61574,12 +59248,6 @@ type projGEDatumDatumOp struct { } func (p projGEDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -61651,7 +59319,7 @@ func (p projGEDatumDatumOp) Next() coldata.Batch { // If $hasNulls is true, union _outNulls with the set of input Nulls. // If $hasNulls is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) } else { if sel := batch.Selection(); sel != nil { sel = sel[:n] @@ -61721,7 +59389,6 @@ func GetProjectionOperator( col1Idx: col1Idx, col2Idx: col2Idx, outputIdx: outputIdx, - overloadHelper: execgen.OverloadHelper{BinFn: binFn, EvalCtx: evalCtx}, } leftType, rightType := inputTypes[col1Idx], inputTypes[col2Idx] @@ -61737,12 +59404,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitandInt16Int16Op{projOpBase: projOpBase}, nil + op := &projBitandInt16Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projBitandInt16Int32Op{projOpBase: projOpBase}, nil + op := &projBitandInt16Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projBitandInt16Int64Op{projOpBase: projOpBase}, nil + op := &projBitandInt16Int64Op{projOpBase: projOpBase} + return op, nil } } case 32: @@ -61750,12 +59420,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitandInt32Int16Op{projOpBase: projOpBase}, nil + op := &projBitandInt32Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projBitandInt32Int32Op{projOpBase: projOpBase}, nil + op := &projBitandInt32Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projBitandInt32Int64Op{projOpBase: projOpBase}, nil + op := &projBitandInt32Int64Op{projOpBase: projOpBase} + return op, nil } } case -1: @@ -61764,12 +59437,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitandInt64Int16Op{projOpBase: projOpBase}, nil + op := &projBitandInt64Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projBitandInt64Int32Op{projOpBase: projOpBase}, nil + op := &projBitandInt64Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projBitandInt64Int64Op{projOpBase: projOpBase}, nil + op := &projBitandInt64Int64Op{projOpBase: projOpBase} + return op, nil } } } @@ -61782,7 +59458,9 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projBitandDatumDatumOp{projOpBase: projOpBase}, nil + op := &projBitandDatumDatumOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -61796,12 +59474,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitorInt16Int16Op{projOpBase: projOpBase}, nil + op := &projBitorInt16Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projBitorInt16Int32Op{projOpBase: projOpBase}, nil + op := &projBitorInt16Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projBitorInt16Int64Op{projOpBase: projOpBase}, nil + op := &projBitorInt16Int64Op{projOpBase: projOpBase} + return op, nil } } case 32: @@ -61809,12 +59490,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitorInt32Int16Op{projOpBase: projOpBase}, nil + op := &projBitorInt32Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projBitorInt32Int32Op{projOpBase: projOpBase}, nil + op := &projBitorInt32Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projBitorInt32Int64Op{projOpBase: projOpBase}, nil + op := &projBitorInt32Int64Op{projOpBase: projOpBase} + return op, nil } } case -1: @@ -61823,12 +59507,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitorInt64Int16Op{projOpBase: projOpBase}, nil + op := &projBitorInt64Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projBitorInt64Int32Op{projOpBase: projOpBase}, nil + op := &projBitorInt64Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projBitorInt64Int64Op{projOpBase: projOpBase}, nil + op := &projBitorInt64Int64Op{projOpBase: projOpBase} + return op, nil } } } @@ -61841,7 +59528,9 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projBitorDatumDatumOp{projOpBase: projOpBase}, nil + op := &projBitorDatumDatumOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -61855,12 +59544,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitxorInt16Int16Op{projOpBase: projOpBase}, nil + op := &projBitxorInt16Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projBitxorInt16Int32Op{projOpBase: projOpBase}, nil + op := &projBitxorInt16Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projBitxorInt16Int64Op{projOpBase: projOpBase}, nil + op := &projBitxorInt16Int64Op{projOpBase: projOpBase} + return op, nil } } case 32: @@ -61868,12 +59560,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitxorInt32Int16Op{projOpBase: projOpBase}, nil + op := &projBitxorInt32Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projBitxorInt32Int32Op{projOpBase: projOpBase}, nil + op := &projBitxorInt32Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projBitxorInt32Int64Op{projOpBase: projOpBase}, nil + op := &projBitxorInt32Int64Op{projOpBase: projOpBase} + return op, nil } } case -1: @@ -61882,12 +59577,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projBitxorInt64Int16Op{projOpBase: projOpBase}, nil + op := &projBitxorInt64Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projBitxorInt64Int32Op{projOpBase: projOpBase}, nil + op := &projBitxorInt64Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projBitxorInt64Int64Op{projOpBase: projOpBase}, nil + op := &projBitxorInt64Int64Op{projOpBase: projOpBase} + return op, nil } } } @@ -61900,7 +59598,9 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projBitxorDatumDatumOp{projOpBase: projOpBase}, nil + op := &projBitxorDatumDatumOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -61915,18 +59615,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusDecimalInt16Op{projOpBase: projOpBase}, nil + op := &projPlusDecimalInt16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projPlusDecimalInt32Op{projOpBase: projOpBase}, nil + op := &projPlusDecimalInt32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projPlusDecimalInt64Op{projOpBase: projOpBase}, nil + op := &projPlusDecimalInt64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPlusDecimalDecimalOp{projOpBase: projOpBase}, nil + op := &projPlusDecimalDecimalOp{projOpBase: projOpBase} + return op, nil } } } @@ -61937,24 +59641,30 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusInt16Int16Op{projOpBase: projOpBase}, nil + op := &projPlusInt16Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projPlusInt16Int32Op{projOpBase: projOpBase}, nil + op := &projPlusInt16Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projPlusInt16Int64Op{projOpBase: projOpBase}, nil + op := &projPlusInt16Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPlusInt16DecimalOp{projOpBase: projOpBase}, nil + op := &projPlusInt16DecimalOp{projOpBase: projOpBase} + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projPlusInt16DatumOp{projOpBase: projOpBase}, nil + op := &projPlusInt16DatumOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } case 32: @@ -61962,24 +59672,30 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusInt32Int16Op{projOpBase: projOpBase}, nil + op := &projPlusInt32Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projPlusInt32Int32Op{projOpBase: projOpBase}, nil + op := &projPlusInt32Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projPlusInt32Int64Op{projOpBase: projOpBase}, nil + op := &projPlusInt32Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPlusInt32DecimalOp{projOpBase: projOpBase}, nil + op := &projPlusInt32DecimalOp{projOpBase: projOpBase} + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projPlusInt32DatumOp{projOpBase: projOpBase}, nil + op := &projPlusInt32DatumOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } case -1: @@ -61988,24 +59704,30 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusInt64Int16Op{projOpBase: projOpBase}, nil + op := &projPlusInt64Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projPlusInt64Int32Op{projOpBase: projOpBase}, nil + op := &projPlusInt64Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projPlusInt64Int64Op{projOpBase: projOpBase}, nil + op := &projPlusInt64Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPlusInt64DecimalOp{projOpBase: projOpBase}, nil + op := &projPlusInt64DecimalOp{projOpBase: projOpBase} + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projPlusInt64DatumOp{projOpBase: projOpBase}, nil + op := &projPlusInt64DatumOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -62018,7 +59740,8 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projPlusFloat64Float64Op{projOpBase: projOpBase}, nil + op := &projPlusFloat64Float64Op{projOpBase: projOpBase} + return op, nil } } } @@ -62031,7 +59754,8 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projPlusTimestampIntervalOp{projOpBase: projOpBase}, nil + op := &projPlusTimestampIntervalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62044,19 +59768,23 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projPlusIntervalTimestampOp{projOpBase: projOpBase}, nil + op := &projPlusIntervalTimestampOp{projOpBase: projOpBase} + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projPlusIntervalIntervalOp{projOpBase: projOpBase}, nil + op := &projPlusIntervalIntervalOp{projOpBase: projOpBase} + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projPlusIntervalDatumOp{projOpBase: projOpBase}, nil + op := &projPlusIntervalDatumOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -62069,17 +59797,25 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projPlusDatumIntervalOp{projOpBase: projOpBase}, nil + op := &projPlusDatumIntervalOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projPlusDatumInt16Op{projOpBase: projOpBase}, nil + op := &projPlusDatumInt16Op{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case 32: - return &projPlusDatumInt32Op{projOpBase: projOpBase}, nil + op := &projPlusDatumInt32Op{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case -1: default: - return &projPlusDatumInt64Op{projOpBase: projOpBase}, nil + op := &projPlusDatumInt64Op{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -62094,18 +59830,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusDecimalInt16Op{projOpBase: projOpBase}, nil + op := &projMinusDecimalInt16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projMinusDecimalInt32Op{projOpBase: projOpBase}, nil + op := &projMinusDecimalInt32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projMinusDecimalInt64Op{projOpBase: projOpBase}, nil + op := &projMinusDecimalInt64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMinusDecimalDecimalOp{projOpBase: projOpBase}, nil + op := &projMinusDecimalDecimalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62116,24 +59856,30 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusInt16Int16Op{projOpBase: projOpBase}, nil + op := &projMinusInt16Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projMinusInt16Int32Op{projOpBase: projOpBase}, nil + op := &projMinusInt16Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projMinusInt16Int64Op{projOpBase: projOpBase}, nil + op := &projMinusInt16Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMinusInt16DecimalOp{projOpBase: projOpBase}, nil + op := &projMinusInt16DecimalOp{projOpBase: projOpBase} + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projMinusInt16DatumOp{projOpBase: projOpBase}, nil + op := &projMinusInt16DatumOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } case 32: @@ -62141,24 +59887,30 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusInt32Int16Op{projOpBase: projOpBase}, nil + op := &projMinusInt32Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projMinusInt32Int32Op{projOpBase: projOpBase}, nil + op := &projMinusInt32Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projMinusInt32Int64Op{projOpBase: projOpBase}, nil + op := &projMinusInt32Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMinusInt32DecimalOp{projOpBase: projOpBase}, nil + op := &projMinusInt32DecimalOp{projOpBase: projOpBase} + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projMinusInt32DatumOp{projOpBase: projOpBase}, nil + op := &projMinusInt32DatumOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } case -1: @@ -62167,24 +59919,30 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusInt64Int16Op{projOpBase: projOpBase}, nil + op := &projMinusInt64Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projMinusInt64Int32Op{projOpBase: projOpBase}, nil + op := &projMinusInt64Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projMinusInt64Int64Op{projOpBase: projOpBase}, nil + op := &projMinusInt64Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMinusInt64DecimalOp{projOpBase: projOpBase}, nil + op := &projMinusInt64DecimalOp{projOpBase: projOpBase} + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projMinusInt64DatumOp{projOpBase: projOpBase}, nil + op := &projMinusInt64DatumOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -62197,7 +59955,8 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projMinusFloat64Float64Op{projOpBase: projOpBase}, nil + op := &projMinusFloat64Float64Op{projOpBase: projOpBase} + return op, nil } } } @@ -62210,13 +59969,15 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projMinusTimestampTimestampOp{projOpBase: projOpBase}, nil + op := &projMinusTimestampTimestampOp{projOpBase: projOpBase} + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMinusTimestampIntervalOp{projOpBase: projOpBase}, nil + op := &projMinusTimestampIntervalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62229,13 +59990,16 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projMinusIntervalIntervalOp{projOpBase: projOpBase}, nil + op := &projMinusIntervalIntervalOp{projOpBase: projOpBase} + return op, nil } case typeconv.DatumVecCanonicalTypeFamily: switch rightType.Width() { case -1: default: - return &projMinusIntervalDatumOp{projOpBase: projOpBase}, nil + op := &projMinusIntervalDatumOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -62248,17 +60012,21 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projMinusJSONBytesOp{projOpBase: projOpBase}, nil + op := &projMinusJSONBytesOp{projOpBase: projOpBase} + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusJSONInt16Op{projOpBase: projOpBase}, nil + op := &projMinusJSONInt16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projMinusJSONInt32Op{projOpBase: projOpBase}, nil + op := &projMinusJSONInt32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projMinusJSONInt64Op{projOpBase: projOpBase}, nil + op := &projMinusJSONInt64Op{projOpBase: projOpBase} + return op, nil } } } @@ -62271,29 +60039,41 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projMinusDatumDatumOp{projOpBase: projOpBase}, nil + op := &projMinusDatumDatumOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMinusDatumIntervalOp{projOpBase: projOpBase}, nil + op := &projMinusDatumIntervalOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } case types.BytesFamily: switch rightType.Width() { case -1: default: - return &projMinusDatumBytesOp{projOpBase: projOpBase}, nil + op := &projMinusDatumBytesOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projMinusDatumInt16Op{projOpBase: projOpBase}, nil + op := &projMinusDatumInt16Op{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case 32: - return &projMinusDatumInt32Op{projOpBase: projOpBase}, nil + op := &projMinusDatumInt32Op{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case -1: default: - return &projMinusDatumInt64Op{projOpBase: projOpBase}, nil + op := &projMinusDatumInt64Op{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -62308,24 +60088,29 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultDecimalInt16Op{projOpBase: projOpBase}, nil + op := &projMultDecimalInt16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projMultDecimalInt32Op{projOpBase: projOpBase}, nil + op := &projMultDecimalInt32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projMultDecimalInt64Op{projOpBase: projOpBase}, nil + op := &projMultDecimalInt64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultDecimalDecimalOp{projOpBase: projOpBase}, nil + op := &projMultDecimalDecimalOp{projOpBase: projOpBase} + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultDecimalIntervalOp{projOpBase: projOpBase}, nil + op := &projMultDecimalIntervalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62336,24 +60121,29 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultInt16Int16Op{projOpBase: projOpBase}, nil + op := &projMultInt16Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projMultInt16Int32Op{projOpBase: projOpBase}, nil + op := &projMultInt16Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projMultInt16Int64Op{projOpBase: projOpBase}, nil + op := &projMultInt16Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultInt16DecimalOp{projOpBase: projOpBase}, nil + op := &projMultInt16DecimalOp{projOpBase: projOpBase} + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultInt16IntervalOp{projOpBase: projOpBase}, nil + op := &projMultInt16IntervalOp{projOpBase: projOpBase} + return op, nil } } case 32: @@ -62361,24 +60151,29 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultInt32Int16Op{projOpBase: projOpBase}, nil + op := &projMultInt32Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projMultInt32Int32Op{projOpBase: projOpBase}, nil + op := &projMultInt32Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projMultInt32Int64Op{projOpBase: projOpBase}, nil + op := &projMultInt32Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultInt32DecimalOp{projOpBase: projOpBase}, nil + op := &projMultInt32DecimalOp{projOpBase: projOpBase} + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultInt32IntervalOp{projOpBase: projOpBase}, nil + op := &projMultInt32IntervalOp{projOpBase: projOpBase} + return op, nil } } case -1: @@ -62387,24 +60182,29 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultInt64Int16Op{projOpBase: projOpBase}, nil + op := &projMultInt64Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projMultInt64Int32Op{projOpBase: projOpBase}, nil + op := &projMultInt64Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projMultInt64Int64Op{projOpBase: projOpBase}, nil + op := &projMultInt64Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultInt64DecimalOp{projOpBase: projOpBase}, nil + op := &projMultInt64DecimalOp{projOpBase: projOpBase} + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultInt64IntervalOp{projOpBase: projOpBase}, nil + op := &projMultInt64IntervalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62417,13 +60217,15 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projMultFloat64Float64Op{projOpBase: projOpBase}, nil + op := &projMultFloat64Float64Op{projOpBase: projOpBase} + return op, nil } case types.IntervalFamily: switch rightType.Width() { case -1: default: - return &projMultFloat64IntervalOp{projOpBase: projOpBase}, nil + op := &projMultFloat64IntervalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62435,24 +60237,29 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projMultIntervalInt16Op{projOpBase: projOpBase}, nil + op := &projMultIntervalInt16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projMultIntervalInt32Op{projOpBase: projOpBase}, nil + op := &projMultIntervalInt32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projMultIntervalInt64Op{projOpBase: projOpBase}, nil + op := &projMultIntervalInt64Op{projOpBase: projOpBase} + return op, nil } case types.FloatFamily: switch rightType.Width() { case -1: default: - return &projMultIntervalFloat64Op{projOpBase: projOpBase}, nil + op := &projMultIntervalFloat64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projMultIntervalDecimalOp{projOpBase: projOpBase}, nil + op := &projMultIntervalDecimalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62467,18 +60274,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projDivDecimalInt16Op{projOpBase: projOpBase}, nil + op := &projDivDecimalInt16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projDivDecimalInt32Op{projOpBase: projOpBase}, nil + op := &projDivDecimalInt32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projDivDecimalInt64Op{projOpBase: projOpBase}, nil + op := &projDivDecimalInt64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projDivDecimalDecimalOp{projOpBase: projOpBase}, nil + op := &projDivDecimalDecimalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62489,18 +60300,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projDivInt16Int16Op{projOpBase: projOpBase}, nil + op := &projDivInt16Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projDivInt16Int32Op{projOpBase: projOpBase}, nil + op := &projDivInt16Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projDivInt16Int64Op{projOpBase: projOpBase}, nil + op := &projDivInt16Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projDivInt16DecimalOp{projOpBase: projOpBase}, nil + op := &projDivInt16DecimalOp{projOpBase: projOpBase} + return op, nil } } case 32: @@ -62508,18 +60323,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projDivInt32Int16Op{projOpBase: projOpBase}, nil + op := &projDivInt32Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projDivInt32Int32Op{projOpBase: projOpBase}, nil + op := &projDivInt32Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projDivInt32Int64Op{projOpBase: projOpBase}, nil + op := &projDivInt32Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projDivInt32DecimalOp{projOpBase: projOpBase}, nil + op := &projDivInt32DecimalOp{projOpBase: projOpBase} + return op, nil } } case -1: @@ -62528,18 +60347,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projDivInt64Int16Op{projOpBase: projOpBase}, nil + op := &projDivInt64Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projDivInt64Int32Op{projOpBase: projOpBase}, nil + op := &projDivInt64Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projDivInt64Int64Op{projOpBase: projOpBase}, nil + op := &projDivInt64Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projDivInt64DecimalOp{projOpBase: projOpBase}, nil + op := &projDivInt64DecimalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62552,7 +60375,8 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projDivFloat64Float64Op{projOpBase: projOpBase}, nil + op := &projDivFloat64Float64Op{projOpBase: projOpBase} + return op, nil } } } @@ -62565,13 +60389,15 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projDivIntervalInt64Op{projOpBase: projOpBase}, nil + op := &projDivIntervalInt64Op{projOpBase: projOpBase} + return op, nil } case types.FloatFamily: switch rightType.Width() { case -1: default: - return &projDivIntervalFloat64Op{projOpBase: projOpBase}, nil + op := &projDivIntervalFloat64Op{projOpBase: projOpBase} + return op, nil } } } @@ -62586,18 +60412,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projFloorDivDecimalInt16Op{projOpBase: projOpBase}, nil + op := &projFloorDivDecimalInt16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projFloorDivDecimalInt32Op{projOpBase: projOpBase}, nil + op := &projFloorDivDecimalInt32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projFloorDivDecimalInt64Op{projOpBase: projOpBase}, nil + op := &projFloorDivDecimalInt64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projFloorDivDecimalDecimalOp{projOpBase: projOpBase}, nil + op := &projFloorDivDecimalDecimalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62608,18 +60438,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projFloorDivInt16Int16Op{projOpBase: projOpBase}, nil + op := &projFloorDivInt16Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projFloorDivInt16Int32Op{projOpBase: projOpBase}, nil + op := &projFloorDivInt16Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projFloorDivInt16Int64Op{projOpBase: projOpBase}, nil + op := &projFloorDivInt16Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projFloorDivInt16DecimalOp{projOpBase: projOpBase}, nil + op := &projFloorDivInt16DecimalOp{projOpBase: projOpBase} + return op, nil } } case 32: @@ -62627,18 +60461,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projFloorDivInt32Int16Op{projOpBase: projOpBase}, nil + op := &projFloorDivInt32Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projFloorDivInt32Int32Op{projOpBase: projOpBase}, nil + op := &projFloorDivInt32Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projFloorDivInt32Int64Op{projOpBase: projOpBase}, nil + op := &projFloorDivInt32Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projFloorDivInt32DecimalOp{projOpBase: projOpBase}, nil + op := &projFloorDivInt32DecimalOp{projOpBase: projOpBase} + return op, nil } } case -1: @@ -62647,18 +60485,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projFloorDivInt64Int16Op{projOpBase: projOpBase}, nil + op := &projFloorDivInt64Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projFloorDivInt64Int32Op{projOpBase: projOpBase}, nil + op := &projFloorDivInt64Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projFloorDivInt64Int64Op{projOpBase: projOpBase}, nil + op := &projFloorDivInt64Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projFloorDivInt64DecimalOp{projOpBase: projOpBase}, nil + op := &projFloorDivInt64DecimalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62671,7 +60513,8 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projFloorDivFloat64Float64Op{projOpBase: projOpBase}, nil + op := &projFloorDivFloat64Float64Op{projOpBase: projOpBase} + return op, nil } } } @@ -62686,18 +60529,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projModDecimalInt16Op{projOpBase: projOpBase}, nil + op := &projModDecimalInt16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projModDecimalInt32Op{projOpBase: projOpBase}, nil + op := &projModDecimalInt32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projModDecimalInt64Op{projOpBase: projOpBase}, nil + op := &projModDecimalInt64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projModDecimalDecimalOp{projOpBase: projOpBase}, nil + op := &projModDecimalDecimalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62708,18 +60555,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projModInt16Int16Op{projOpBase: projOpBase}, nil + op := &projModInt16Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projModInt16Int32Op{projOpBase: projOpBase}, nil + op := &projModInt16Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projModInt16Int64Op{projOpBase: projOpBase}, nil + op := &projModInt16Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projModInt16DecimalOp{projOpBase: projOpBase}, nil + op := &projModInt16DecimalOp{projOpBase: projOpBase} + return op, nil } } case 32: @@ -62727,18 +60578,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projModInt32Int16Op{projOpBase: projOpBase}, nil + op := &projModInt32Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projModInt32Int32Op{projOpBase: projOpBase}, nil + op := &projModInt32Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projModInt32Int64Op{projOpBase: projOpBase}, nil + op := &projModInt32Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projModInt32DecimalOp{projOpBase: projOpBase}, nil + op := &projModInt32DecimalOp{projOpBase: projOpBase} + return op, nil } } case -1: @@ -62747,18 +60602,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projModInt64Int16Op{projOpBase: projOpBase}, nil + op := &projModInt64Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projModInt64Int32Op{projOpBase: projOpBase}, nil + op := &projModInt64Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projModInt64Int64Op{projOpBase: projOpBase}, nil + op := &projModInt64Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projModInt64DecimalOp{projOpBase: projOpBase}, nil + op := &projModInt64DecimalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62771,7 +60630,8 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projModFloat64Float64Op{projOpBase: projOpBase}, nil + op := &projModFloat64Float64Op{projOpBase: projOpBase} + return op, nil } } } @@ -62786,18 +60646,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPowDecimalInt16Op{projOpBase: projOpBase}, nil + op := &projPowDecimalInt16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projPowDecimalInt32Op{projOpBase: projOpBase}, nil + op := &projPowDecimalInt32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projPowDecimalInt64Op{projOpBase: projOpBase}, nil + op := &projPowDecimalInt64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPowDecimalDecimalOp{projOpBase: projOpBase}, nil + op := &projPowDecimalDecimalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62808,18 +60672,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPowInt16Int16Op{projOpBase: projOpBase}, nil + op := &projPowInt16Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projPowInt16Int32Op{projOpBase: projOpBase}, nil + op := &projPowInt16Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projPowInt16Int64Op{projOpBase: projOpBase}, nil + op := &projPowInt16Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPowInt16DecimalOp{projOpBase: projOpBase}, nil + op := &projPowInt16DecimalOp{projOpBase: projOpBase} + return op, nil } } case 32: @@ -62827,18 +60695,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPowInt32Int16Op{projOpBase: projOpBase}, nil + op := &projPowInt32Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projPowInt32Int32Op{projOpBase: projOpBase}, nil + op := &projPowInt32Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projPowInt32Int64Op{projOpBase: projOpBase}, nil + op := &projPowInt32Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPowInt32DecimalOp{projOpBase: projOpBase}, nil + op := &projPowInt32DecimalOp{projOpBase: projOpBase} + return op, nil } } case -1: @@ -62847,18 +60719,22 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projPowInt64Int16Op{projOpBase: projOpBase}, nil + op := &projPowInt64Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projPowInt64Int32Op{projOpBase: projOpBase}, nil + op := &projPowInt64Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projPowInt64Int64Op{projOpBase: projOpBase}, nil + op := &projPowInt64Int64Op{projOpBase: projOpBase} + return op, nil } case types.DecimalFamily: switch rightType.Width() { case -1: default: - return &projPowInt64DecimalOp{projOpBase: projOpBase}, nil + op := &projPowInt64DecimalOp{projOpBase: projOpBase} + return op, nil } } } @@ -62871,7 +60747,8 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projPowFloat64Float64Op{projOpBase: projOpBase}, nil + op := &projPowFloat64Float64Op{projOpBase: projOpBase} + return op, nil } } } @@ -62887,7 +60764,8 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projConcatBytesBytesOp{projOpBase: projOpBase}, nil + op := &projConcatBytesBytesOp{projOpBase: projOpBase} + return op, nil } } } @@ -62900,7 +60778,8 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projConcatJSONJSONOp{projOpBase: projOpBase}, nil + op := &projConcatJSONJSONOp{projOpBase: projOpBase} + return op, nil } } } @@ -62913,7 +60792,9 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projConcatDatumDatumOp{projOpBase: projOpBase}, nil + op := &projConcatDatumDatumOp{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -62927,12 +60808,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projLShiftInt16Int16Op{projOpBase: projOpBase}, nil + op := &projLShiftInt16Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projLShiftInt16Int32Op{projOpBase: projOpBase}, nil + op := &projLShiftInt16Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projLShiftInt16Int64Op{projOpBase: projOpBase}, nil + op := &projLShiftInt16Int64Op{projOpBase: projOpBase} + return op, nil } } case 32: @@ -62940,12 +60824,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projLShiftInt32Int16Op{projOpBase: projOpBase}, nil + op := &projLShiftInt32Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projLShiftInt32Int32Op{projOpBase: projOpBase}, nil + op := &projLShiftInt32Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projLShiftInt32Int64Op{projOpBase: projOpBase}, nil + op := &projLShiftInt32Int64Op{projOpBase: projOpBase} + return op, nil } } case -1: @@ -62954,12 +60841,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projLShiftInt64Int16Op{projOpBase: projOpBase}, nil + op := &projLShiftInt64Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projLShiftInt64Int32Op{projOpBase: projOpBase}, nil + op := &projLShiftInt64Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projLShiftInt64Int64Op{projOpBase: projOpBase}, nil + op := &projLShiftInt64Int64Op{projOpBase: projOpBase} + return op, nil } } } @@ -62971,12 +60861,18 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projLShiftDatumInt16Op{projOpBase: projOpBase}, nil + op := &projLShiftDatumInt16Op{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case 32: - return &projLShiftDatumInt32Op{projOpBase: projOpBase}, nil + op := &projLShiftDatumInt32Op{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case -1: default: - return &projLShiftDatumInt64Op{projOpBase: projOpBase}, nil + op := &projLShiftDatumInt64Op{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -62990,12 +60886,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projRShiftInt16Int16Op{projOpBase: projOpBase}, nil + op := &projRShiftInt16Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projRShiftInt16Int32Op{projOpBase: projOpBase}, nil + op := &projRShiftInt16Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projRShiftInt16Int64Op{projOpBase: projOpBase}, nil + op := &projRShiftInt16Int64Op{projOpBase: projOpBase} + return op, nil } } case 32: @@ -63003,12 +60902,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projRShiftInt32Int16Op{projOpBase: projOpBase}, nil + op := &projRShiftInt32Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projRShiftInt32Int32Op{projOpBase: projOpBase}, nil + op := &projRShiftInt32Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projRShiftInt32Int64Op{projOpBase: projOpBase}, nil + op := &projRShiftInt32Int64Op{projOpBase: projOpBase} + return op, nil } } case -1: @@ -63017,12 +60919,15 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projRShiftInt64Int16Op{projOpBase: projOpBase}, nil + op := &projRShiftInt64Int16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projRShiftInt64Int32Op{projOpBase: projOpBase}, nil + op := &projRShiftInt64Int32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projRShiftInt64Int64Op{projOpBase: projOpBase}, nil + op := &projRShiftInt64Int64Op{projOpBase: projOpBase} + return op, nil } } } @@ -63034,12 +60939,18 @@ func GetProjectionOperator( case types.IntFamily: switch rightType.Width() { case 16: - return &projRShiftDatumInt16Op{projOpBase: projOpBase}, nil + op := &projRShiftDatumInt16Op{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case 32: - return &projRShiftDatumInt32Op{projOpBase: projOpBase}, nil + op := &projRShiftDatumInt32Op{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil case -1: default: - return &projRShiftDatumInt64Op{projOpBase: projOpBase}, nil + op := &projRShiftDatumInt64Op{projOpBase: projOpBase} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + return op, nil } } } @@ -63055,17 +60966,21 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projJSONFetchValJSONBytesOp{projOpBase: projOpBase}, nil + op := &projJSONFetchValJSONBytesOp{projOpBase: projOpBase} + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projJSONFetchValJSONInt16Op{projOpBase: projOpBase}, nil + op := &projJSONFetchValJSONInt16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projJSONFetchValJSONInt32Op{projOpBase: projOpBase}, nil + op := &projJSONFetchValJSONInt32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projJSONFetchValJSONInt64Op{projOpBase: projOpBase}, nil + op := &projJSONFetchValJSONInt64Op{projOpBase: projOpBase} + return op, nil } } } @@ -63081,17 +60996,21 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projJSONFetchTextJSONBytesOp{projOpBase: projOpBase}, nil + op := &projJSONFetchTextJSONBytesOp{projOpBase: projOpBase} + return op, nil } case types.IntFamily: switch rightType.Width() { case 16: - return &projJSONFetchTextJSONInt16Op{projOpBase: projOpBase}, nil + op := &projJSONFetchTextJSONInt16Op{projOpBase: projOpBase} + return op, nil case 32: - return &projJSONFetchTextJSONInt32Op{projOpBase: projOpBase}, nil + op := &projJSONFetchTextJSONInt32Op{projOpBase: projOpBase} + return op, nil case -1: default: - return &projJSONFetchTextJSONInt64Op{projOpBase: projOpBase}, nil + op := &projJSONFetchTextJSONInt64Op{projOpBase: projOpBase} + return op, nil } } } @@ -63107,7 +61026,8 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projJSONFetchValPathJSONDatumOp{projOpBase: projOpBase}, nil + op := &projJSONFetchValPathJSONDatumOp{projOpBase: projOpBase} + return op, nil } } } @@ -63123,7 +61043,8 @@ func GetProjectionOperator( switch rightType.Width() { case -1: default: - return &projJSONFetchTextPathJSONDatumOp{projOpBase: projOpBase}, nil + op := &projJSONFetchTextPathJSONDatumOp{projOpBase: projOpBase} + return op, nil } } } diff --git a/pkg/sql/colexec/colexecproj/proj_non_const_ops_tmpl.go b/pkg/sql/colexec/colexecproj/proj_non_const_ops_tmpl.go index 10f327dd2335..57c139f8148a 100644 --- a/pkg/sql/colexec/colexecproj/proj_non_const_ops_tmpl.go +++ b/pkg/sql/colexec/colexecproj/proj_non_const_ops_tmpl.go @@ -22,6 +22,7 @@ package colexecproj import ( + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" @@ -47,6 +48,7 @@ var ( _ = coldataext.CompareDatum _ sqltelemetry.EnumTelemetryType _ telemetry.Counter + _ apd.Context ) // {{/* @@ -79,35 +81,36 @@ func _ASSIGN(_, _, _, _, _, _ interface{}) { // around the problem we specify it here. type projConstOpBase struct { colexecop.OneInputHelper - allocator *colmem.Allocator - colIdx int - outputIdx int - overloadHelper execgen.OverloadHelper + allocator *colmem.Allocator + colIdx int + outputIdx int } // projOpBase contains all of the fields for non-constant projections. type projOpBase struct { colexecop.OneInputHelper - allocator *colmem.Allocator - col1Idx int - col2Idx int - outputIdx int - overloadHelper execgen.OverloadHelper + allocator *colmem.Allocator + col1Idx int + col2Idx int + outputIdx int } // {{define "projOp"}} type _OP_NAME struct { projOpBase + // {{if .NeedsBinaryOverloadHelper}} + execgen.BinaryOverloadHelper + // {{end}} } func (p _OP_NAME) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the projection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper + // {{if .NeedsBinaryOverloadHelper}} + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. + _overloadHelper := p.BinaryOverloadHelper + // {{end}} batch := p.Input.Next() n := batch.Length() if n == 0 { @@ -173,7 +176,7 @@ func _SET_PROJECTION(_HAS_NULLS bool) { // If _HAS_NULLS is false, then there are no input Nulls. _outNulls is // projVec.Nulls() so there is no need to call projVec.SetNulls(). // {{if _HAS_NULLS}} - projVec.SetNulls(_outNulls.Or(col1Nulls).Or(col2Nulls)) + projVec.SetNulls(_outNulls.Or(*col1Nulls).Or(*col2Nulls)) // {{end}} // {{end}} // {{end}} @@ -262,7 +265,6 @@ func GetProjectionOperator( col1Idx: col1Idx, col2Idx: col2Idx, outputIdx: outputIdx, - overloadHelper: execgen.OverloadHelper{BinFn: binFn, EvalCtx: evalCtx}, } leftType, rightType := inputTypes[col1Idx], inputTypes[col2Idx] @@ -283,7 +285,11 @@ func GetProjectionOperator( switch rightType.Width() { // {{range .RightWidths}} case _RIGHT_TYPE_WIDTH: - return &_OP_NAME{projOpBase: projOpBase}, nil + op := &_OP_NAME{projOpBase: projOpBase} + // {{if .NeedsBinaryOverloadHelper}} + op.BinaryOverloadHelper = execgen.BinaryOverloadHelper{BinFn: binFn, EvalCtx: evalCtx} + // {{end}} + return op, nil // {{end}} } // {{end}} diff --git a/pkg/sql/colexec/colexecproj/projection_ops_test.go b/pkg/sql/colexec/colexecproj/projection_ops_test.go index d6751a075aad..f93e529af332 100644 --- a/pkg/sql/colexec/colexecproj/projection_ops_test.go +++ b/pkg/sql/colexec/colexecproj/projection_ops_test.go @@ -24,7 +24,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils/skip" @@ -185,7 +184,7 @@ func TestRandomComparisons(t *testing.T) { rng, _ := randutil.NewTestRand() expected := make([]bool, numTuples) - var da rowenc.DatumAlloc + var da tree.DatumAlloc lDatums := make([]tree.Datum, numTuples) rDatums := make([]tree.Datum, numTuples) for _, typ := range types.Scalar { diff --git a/pkg/sql/colexec/colexecsel/BUILD.bazel b/pkg/sql/colexec/colexecsel/BUILD.bazel index 8c318da34bbe..121ab6ae5e54 100644 --- a/pkg/sql/colexec/colexecsel/BUILD.bazel +++ b/pkg/sql/colexec/colexecsel/BUILD.bazel @@ -23,7 +23,7 @@ go_library( "//pkg/sql/types", # keep "//pkg/util/duration", # keep "//pkg/util/json", # keep - "@com_github_cockroachdb_apd_v2//:apd", # keep + "@com_github_cockroachdb_apd_v3//:apd", # keep "@com_github_cockroachdb_errors//:errors", ], ) @@ -37,6 +37,7 @@ go_test( "selection_ops_test.go", ], embed = [":colexecsel"], + tags = ["no-remote"], deps = [ "//pkg/col/coldata", "//pkg/col/coldataext", diff --git a/pkg/sql/colexec/colexecsel/sel_like_ops.eg.go b/pkg/sql/colexec/colexecsel/sel_like_ops.eg.go index b42a68cd0157..f01e2d913dae 100644 --- a/pkg/sql/colexec/colexecsel/sel_like_ops.eg.go +++ b/pkg/sql/colexec/colexecsel/sel_like_ops.eg.go @@ -23,12 +23,6 @@ type selPrefixBytesBytesConstOp struct { } func (p *selPrefixBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -112,12 +106,6 @@ type selSuffixBytesBytesConstOp struct { } func (p *selSuffixBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -201,12 +189,6 @@ type selContainsBytesBytesConstOp struct { } func (p *selContainsBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -290,12 +272,6 @@ type selRegexpBytesBytesConstOp struct { } func (p *selRegexpBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -379,12 +355,6 @@ type selNotPrefixBytesBytesConstOp struct { } func (p *selNotPrefixBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -468,12 +438,6 @@ type selNotSuffixBytesBytesConstOp struct { } func (p *selNotSuffixBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -557,12 +521,6 @@ type selNotContainsBytesBytesConstOp struct { } func (p *selNotContainsBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -646,12 +604,6 @@ type selNotRegexpBytesBytesConstOp struct { } func (p *selNotRegexpBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { diff --git a/pkg/sql/colexec/colexecsel/selection_ops.eg.go b/pkg/sql/colexec/colexecsel/selection_ops.eg.go index afa58719f194..31209f014543 100644 --- a/pkg/sql/colexec/colexecsel/selection_ops.eg.go +++ b/pkg/sql/colexec/colexecsel/selection_ops.eg.go @@ -14,13 +14,12 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colconv" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexeccmp" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -42,16 +41,14 @@ var ( // constant, except for the constant itself. type selConstOpBase struct { colexecop.OneInputHelper - colIdx int - overloadHelper execgen.OverloadHelper + colIdx int } // selOpBase contains all of the fields for non-constant binary selections. type selOpBase struct { colexecop.OneInputHelper - col1Idx int - col2Idx int - overloadHelper execgen.OverloadHelper + col1Idx int + col2Idx int } type selEQBoolBoolConstOp struct { @@ -60,12 +57,6 @@ type selEQBoolBoolConstOp struct { } func (p *selEQBoolBoolConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -206,12 +197,6 @@ type selEQBoolBoolOp struct { } func (p *selEQBoolBoolOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -226,7 +211,7 @@ func (p *selEQBoolBoolOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -364,12 +349,6 @@ type selEQBytesBytesConstOp struct { } func (p *selEQBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -476,12 +455,6 @@ type selEQBytesBytesOp struct { } func (p *selEQBytesBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -496,7 +469,7 @@ func (p *selEQBytesBytesOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -598,12 +571,6 @@ type selEQDecimalInt16ConstOp struct { } func (p *selEQDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -629,9 +596,9 @@ func (p *selEQDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -658,9 +625,9 @@ func (p *selEQDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -683,9 +650,9 @@ func (p *selEQDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -709,9 +676,9 @@ func (p *selEQDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -736,12 +703,6 @@ type selEQDecimalInt16Op struct { } func (p *selEQDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -756,7 +717,7 @@ func (p *selEQDecimalInt16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -771,9 +732,9 @@ func (p *selEQDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -803,9 +764,9 @@ func (p *selEQDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -829,9 +790,9 @@ func (p *selEQDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -858,9 +819,9 @@ func (p *selEQDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -886,12 +847,6 @@ type selEQDecimalInt32ConstOp struct { } func (p *selEQDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -917,9 +872,9 @@ func (p *selEQDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -946,9 +901,9 @@ func (p *selEQDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -971,9 +926,9 @@ func (p *selEQDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -997,9 +952,9 @@ func (p *selEQDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -1024,12 +979,6 @@ type selEQDecimalInt32Op struct { } func (p *selEQDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -1044,7 +993,7 @@ func (p *selEQDecimalInt32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -1059,9 +1008,9 @@ func (p *selEQDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -1091,9 +1040,9 @@ func (p *selEQDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -1117,9 +1066,9 @@ func (p *selEQDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -1146,9 +1095,9 @@ func (p *selEQDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -1174,12 +1123,6 @@ type selEQDecimalInt64ConstOp struct { } func (p *selEQDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -1205,9 +1148,9 @@ func (p *selEQDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -1234,9 +1177,9 @@ func (p *selEQDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -1259,9 +1202,9 @@ func (p *selEQDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -1285,9 +1228,9 @@ func (p *selEQDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -1312,12 +1255,6 @@ type selEQDecimalInt64Op struct { } func (p *selEQDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -1332,7 +1269,7 @@ func (p *selEQDecimalInt64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -1347,9 +1284,9 @@ func (p *selEQDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -1379,9 +1316,9 @@ func (p *selEQDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -1405,9 +1342,9 @@ func (p *selEQDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -1434,9 +1371,9 @@ func (p *selEQDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -1462,12 +1399,6 @@ type selEQDecimalFloat64ConstOp struct { } func (p *selEQDecimalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -1493,11 +1424,11 @@ func (p *selEQDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -1524,11 +1455,11 @@ func (p *selEQDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -1551,11 +1482,11 @@ func (p *selEQDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -1579,11 +1510,11 @@ func (p *selEQDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult == 0 @@ -1608,12 +1539,6 @@ type selEQDecimalFloat64Op struct { } func (p *selEQDecimalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -1628,7 +1553,7 @@ func (p *selEQDecimalFloat64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -1643,11 +1568,11 @@ func (p *selEQDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -1677,11 +1602,11 @@ func (p *selEQDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -1705,11 +1630,11 @@ func (p *selEQDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -1736,11 +1661,11 @@ func (p *selEQDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult == 0 @@ -1766,12 +1691,6 @@ type selEQDecimalDecimalConstOp struct { } func (p *selEQDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -1880,12 +1799,6 @@ type selEQDecimalDecimalOp struct { } func (p *selEQDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -1900,7 +1813,7 @@ func (p *selEQDecimalDecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -2006,12 +1919,6 @@ type selEQInt16Int16ConstOp struct { } func (p *selEQInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -2164,12 +2071,6 @@ type selEQInt16Int16Op struct { } func (p *selEQInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -2184,7 +2085,7 @@ func (p *selEQInt16Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -2334,12 +2235,6 @@ type selEQInt16Int32ConstOp struct { } func (p *selEQInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -2492,12 +2387,6 @@ type selEQInt16Int32Op struct { } func (p *selEQInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -2512,7 +2401,7 @@ func (p *selEQInt16Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -2662,12 +2551,6 @@ type selEQInt16Int64ConstOp struct { } func (p *selEQInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -2820,12 +2703,6 @@ type selEQInt16Int64Op struct { } func (p *selEQInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -2840,7 +2717,7 @@ func (p *selEQInt16Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -2990,12 +2867,6 @@ type selEQInt16Float64ConstOp struct { } func (p *selEQInt16Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -3180,12 +3051,6 @@ type selEQInt16Float64Op struct { } func (p *selEQInt16Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -3200,7 +3065,7 @@ func (p *selEQInt16Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -3382,12 +3247,6 @@ type selEQInt16DecimalConstOp struct { } func (p *selEQInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -3413,9 +3272,9 @@ func (p *selEQInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -3442,9 +3301,9 @@ func (p *selEQInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -3467,9 +3326,9 @@ func (p *selEQInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -3493,9 +3352,9 @@ func (p *selEQInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -3520,12 +3379,6 @@ type selEQInt16DecimalOp struct { } func (p *selEQInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -3540,7 +3393,7 @@ func (p *selEQInt16DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -3555,9 +3408,9 @@ func (p *selEQInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -3587,9 +3440,9 @@ func (p *selEQInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -3613,9 +3466,9 @@ func (p *selEQInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -3642,9 +3495,9 @@ func (p *selEQInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -3670,12 +3523,6 @@ type selEQInt32Int16ConstOp struct { } func (p *selEQInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -3828,12 +3675,6 @@ type selEQInt32Int16Op struct { } func (p *selEQInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -3848,7 +3689,7 @@ func (p *selEQInt32Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -3998,12 +3839,6 @@ type selEQInt32Int32ConstOp struct { } func (p *selEQInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -4156,12 +3991,6 @@ type selEQInt32Int32Op struct { } func (p *selEQInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -4176,7 +4005,7 @@ func (p *selEQInt32Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -4326,12 +4155,6 @@ type selEQInt32Int64ConstOp struct { } func (p *selEQInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -4484,12 +4307,6 @@ type selEQInt32Int64Op struct { } func (p *selEQInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -4504,7 +4321,7 @@ func (p *selEQInt32Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -4654,12 +4471,6 @@ type selEQInt32Float64ConstOp struct { } func (p *selEQInt32Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -4844,12 +4655,6 @@ type selEQInt32Float64Op struct { } func (p *selEQInt32Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -4864,7 +4669,7 @@ func (p *selEQInt32Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -5046,12 +4851,6 @@ type selEQInt32DecimalConstOp struct { } func (p *selEQInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -5077,9 +4876,9 @@ func (p *selEQInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -5106,9 +4905,9 @@ func (p *selEQInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -5131,9 +4930,9 @@ func (p *selEQInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -5157,9 +4956,9 @@ func (p *selEQInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -5184,12 +4983,6 @@ type selEQInt32DecimalOp struct { } func (p *selEQInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -5204,7 +4997,7 @@ func (p *selEQInt32DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -5219,9 +5012,9 @@ func (p *selEQInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -5251,9 +5044,9 @@ func (p *selEQInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -5277,9 +5070,9 @@ func (p *selEQInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -5306,9 +5099,9 @@ func (p *selEQInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -5334,12 +5127,6 @@ type selEQInt64Int16ConstOp struct { } func (p *selEQInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -5492,12 +5279,6 @@ type selEQInt64Int16Op struct { } func (p *selEQInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -5512,7 +5293,7 @@ func (p *selEQInt64Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -5662,12 +5443,6 @@ type selEQInt64Int32ConstOp struct { } func (p *selEQInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -5820,12 +5595,6 @@ type selEQInt64Int32Op struct { } func (p *selEQInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -5840,7 +5609,7 @@ func (p *selEQInt64Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -5990,12 +5759,6 @@ type selEQInt64Int64ConstOp struct { } func (p *selEQInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -6148,12 +5911,6 @@ type selEQInt64Int64Op struct { } func (p *selEQInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -6168,7 +5925,7 @@ func (p *selEQInt64Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -6318,12 +6075,6 @@ type selEQInt64Float64ConstOp struct { } func (p *selEQInt64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -6508,12 +6259,6 @@ type selEQInt64Float64Op struct { } func (p *selEQInt64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -6528,7 +6273,7 @@ func (p *selEQInt64Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -6710,12 +6455,6 @@ type selEQInt64DecimalConstOp struct { } func (p *selEQInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -6741,9 +6480,9 @@ func (p *selEQInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -6770,9 +6509,9 @@ func (p *selEQInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -6795,9 +6534,9 @@ func (p *selEQInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -6821,9 +6560,9 @@ func (p *selEQInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -6848,12 +6587,6 @@ type selEQInt64DecimalOp struct { } func (p *selEQInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -6868,7 +6601,7 @@ func (p *selEQInt64DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -6883,9 +6616,9 @@ func (p *selEQInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -6915,9 +6648,9 @@ func (p *selEQInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -6941,9 +6674,9 @@ func (p *selEQInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -6970,9 +6703,9 @@ func (p *selEQInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -6998,12 +6731,6 @@ type selEQFloat64Int16ConstOp struct { } func (p *selEQFloat64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -7188,12 +6915,6 @@ type selEQFloat64Int16Op struct { } func (p *selEQFloat64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -7208,7 +6929,7 @@ func (p *selEQFloat64Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -7390,12 +7111,6 @@ type selEQFloat64Int32ConstOp struct { } func (p *selEQFloat64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -7580,12 +7295,6 @@ type selEQFloat64Int32Op struct { } func (p *selEQFloat64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -7600,7 +7309,7 @@ func (p *selEQFloat64Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -7782,12 +7491,6 @@ type selEQFloat64Int64ConstOp struct { } func (p *selEQFloat64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -7972,12 +7675,6 @@ type selEQFloat64Int64Op struct { } func (p *selEQFloat64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -7992,7 +7689,7 @@ func (p *selEQFloat64Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -8174,12 +7871,6 @@ type selEQFloat64Float64ConstOp struct { } func (p *selEQFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -8364,12 +8055,6 @@ type selEQFloat64Float64Op struct { } func (p *selEQFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -8384,7 +8069,7 @@ func (p *selEQFloat64Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -8566,12 +8251,6 @@ type selEQFloat64DecimalConstOp struct { } func (p *selEQFloat64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -8597,11 +8276,11 @@ func (p *selEQFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -8628,11 +8307,11 @@ func (p *selEQFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -8655,11 +8334,11 @@ func (p *selEQFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -8683,11 +8362,11 @@ func (p *selEQFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult == 0 @@ -8712,12 +8391,6 @@ type selEQFloat64DecimalOp struct { } func (p *selEQFloat64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -8732,7 +8405,7 @@ func (p *selEQFloat64DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -8747,11 +8420,11 @@ func (p *selEQFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -8781,11 +8454,11 @@ func (p *selEQFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -8809,11 +8482,11 @@ func (p *selEQFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -8840,11 +8513,11 @@ func (p *selEQFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult == 0 @@ -8870,12 +8543,6 @@ type selEQTimestampTimestampConstOp struct { } func (p *selEQTimestampTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -9012,12 +8679,6 @@ type selEQTimestampTimestampOp struct { } func (p *selEQTimestampTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -9032,7 +8693,7 @@ func (p *selEQTimestampTimestampOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -9166,12 +8827,6 @@ type selEQIntervalIntervalConstOp struct { } func (p *selEQIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -9280,12 +8935,6 @@ type selEQIntervalIntervalOp struct { } func (p *selEQIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -9300,7 +8949,7 @@ func (p *selEQIntervalIntervalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -9406,12 +9055,6 @@ type selEQJSONJSONConstOp struct { } func (p *selEQJSONJSONConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -9542,12 +9185,6 @@ type selEQJSONJSONOp struct { } func (p *selEQJSONJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -9562,7 +9199,7 @@ func (p *selEQJSONJSONOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -9688,12 +9325,6 @@ type selEQDatumDatumConstOp struct { } func (p *selEQDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -9808,12 +9439,6 @@ type selEQDatumDatumOp struct { } func (p *selEQDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -9828,7 +9453,7 @@ func (p *selEQDatumDatumOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -9938,12 +9563,6 @@ type selNEBoolBoolConstOp struct { } func (p *selNEBoolBoolConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -10084,12 +9703,6 @@ type selNEBoolBoolOp struct { } func (p *selNEBoolBoolOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -10104,7 +9717,7 @@ func (p *selNEBoolBoolOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -10242,12 +9855,6 @@ type selNEBytesBytesConstOp struct { } func (p *selNEBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -10354,12 +9961,6 @@ type selNEBytesBytesOp struct { } func (p *selNEBytesBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -10374,7 +9975,7 @@ func (p *selNEBytesBytesOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -10476,12 +10077,6 @@ type selNEDecimalInt16ConstOp struct { } func (p *selNEDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -10507,9 +10102,9 @@ func (p *selNEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -10536,9 +10131,9 @@ func (p *selNEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -10561,9 +10156,9 @@ func (p *selNEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -10587,9 +10182,9 @@ func (p *selNEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -10614,12 +10209,6 @@ type selNEDecimalInt16Op struct { } func (p *selNEDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -10634,7 +10223,7 @@ func (p *selNEDecimalInt16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -10649,9 +10238,9 @@ func (p *selNEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -10681,9 +10270,9 @@ func (p *selNEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -10707,9 +10296,9 @@ func (p *selNEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -10736,9 +10325,9 @@ func (p *selNEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -10764,12 +10353,6 @@ type selNEDecimalInt32ConstOp struct { } func (p *selNEDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -10795,9 +10378,9 @@ func (p *selNEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -10824,9 +10407,9 @@ func (p *selNEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -10849,9 +10432,9 @@ func (p *selNEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -10875,9 +10458,9 @@ func (p *selNEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -10902,12 +10485,6 @@ type selNEDecimalInt32Op struct { } func (p *selNEDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -10922,7 +10499,7 @@ func (p *selNEDecimalInt32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -10937,9 +10514,9 @@ func (p *selNEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -10969,9 +10546,9 @@ func (p *selNEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -10995,9 +10572,9 @@ func (p *selNEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -11024,9 +10601,9 @@ func (p *selNEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -11052,12 +10629,6 @@ type selNEDecimalInt64ConstOp struct { } func (p *selNEDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -11083,9 +10654,9 @@ func (p *selNEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -11112,9 +10683,9 @@ func (p *selNEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -11137,9 +10708,9 @@ func (p *selNEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -11163,9 +10734,9 @@ func (p *selNEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -11190,12 +10761,6 @@ type selNEDecimalInt64Op struct { } func (p *selNEDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -11210,7 +10775,7 @@ func (p *selNEDecimalInt64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -11225,9 +10790,9 @@ func (p *selNEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -11257,9 +10822,9 @@ func (p *selNEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -11283,9 +10848,9 @@ func (p *selNEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -11312,9 +10877,9 @@ func (p *selNEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -11340,12 +10905,6 @@ type selNEDecimalFloat64ConstOp struct { } func (p *selNEDecimalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -11371,11 +10930,11 @@ func (p *selNEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -11402,11 +10961,11 @@ func (p *selNEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -11429,11 +10988,11 @@ func (p *selNEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -11457,11 +11016,11 @@ func (p *selNEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult != 0 @@ -11486,12 +11045,6 @@ type selNEDecimalFloat64Op struct { } func (p *selNEDecimalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -11506,7 +11059,7 @@ func (p *selNEDecimalFloat64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -11521,11 +11074,11 @@ func (p *selNEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -11555,11 +11108,11 @@ func (p *selNEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -11583,11 +11136,11 @@ func (p *selNEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -11614,11 +11167,11 @@ func (p *selNEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult != 0 @@ -11644,12 +11197,6 @@ type selNEDecimalDecimalConstOp struct { } func (p *selNEDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -11758,12 +11305,6 @@ type selNEDecimalDecimalOp struct { } func (p *selNEDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -11778,7 +11319,7 @@ func (p *selNEDecimalDecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -11884,12 +11425,6 @@ type selNEInt16Int16ConstOp struct { } func (p *selNEInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -12042,12 +11577,6 @@ type selNEInt16Int16Op struct { } func (p *selNEInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -12062,7 +11591,7 @@ func (p *selNEInt16Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -12212,12 +11741,6 @@ type selNEInt16Int32ConstOp struct { } func (p *selNEInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -12370,12 +11893,6 @@ type selNEInt16Int32Op struct { } func (p *selNEInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -12390,7 +11907,7 @@ func (p *selNEInt16Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -12540,12 +12057,6 @@ type selNEInt16Int64ConstOp struct { } func (p *selNEInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -12698,12 +12209,6 @@ type selNEInt16Int64Op struct { } func (p *selNEInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -12718,7 +12223,7 @@ func (p *selNEInt16Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -12868,12 +12373,6 @@ type selNEInt16Float64ConstOp struct { } func (p *selNEInt16Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -13058,12 +12557,6 @@ type selNEInt16Float64Op struct { } func (p *selNEInt16Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -13078,7 +12571,7 @@ func (p *selNEInt16Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -13260,12 +12753,6 @@ type selNEInt16DecimalConstOp struct { } func (p *selNEInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -13291,9 +12778,9 @@ func (p *selNEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -13320,9 +12807,9 @@ func (p *selNEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -13345,9 +12832,9 @@ func (p *selNEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -13371,9 +12858,9 @@ func (p *selNEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -13398,12 +12885,6 @@ type selNEInt16DecimalOp struct { } func (p *selNEInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -13418,7 +12899,7 @@ func (p *selNEInt16DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -13433,9 +12914,9 @@ func (p *selNEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -13465,9 +12946,9 @@ func (p *selNEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -13491,9 +12972,9 @@ func (p *selNEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -13520,9 +13001,9 @@ func (p *selNEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -13548,12 +13029,6 @@ type selNEInt32Int16ConstOp struct { } func (p *selNEInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -13706,12 +13181,6 @@ type selNEInt32Int16Op struct { } func (p *selNEInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -13726,7 +13195,7 @@ func (p *selNEInt32Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -13876,12 +13345,6 @@ type selNEInt32Int32ConstOp struct { } func (p *selNEInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -14034,12 +13497,6 @@ type selNEInt32Int32Op struct { } func (p *selNEInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -14054,7 +13511,7 @@ func (p *selNEInt32Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -14204,12 +13661,6 @@ type selNEInt32Int64ConstOp struct { } func (p *selNEInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -14362,12 +13813,6 @@ type selNEInt32Int64Op struct { } func (p *selNEInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -14382,7 +13827,7 @@ func (p *selNEInt32Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -14532,12 +13977,6 @@ type selNEInt32Float64ConstOp struct { } func (p *selNEInt32Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -14722,12 +14161,6 @@ type selNEInt32Float64Op struct { } func (p *selNEInt32Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -14742,7 +14175,7 @@ func (p *selNEInt32Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -14924,12 +14357,6 @@ type selNEInt32DecimalConstOp struct { } func (p *selNEInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -14955,9 +14382,9 @@ func (p *selNEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -14984,9 +14411,9 @@ func (p *selNEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -15009,9 +14436,9 @@ func (p *selNEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -15035,9 +14462,9 @@ func (p *selNEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -15062,12 +14489,6 @@ type selNEInt32DecimalOp struct { } func (p *selNEInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -15082,7 +14503,7 @@ func (p *selNEInt32DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -15097,9 +14518,9 @@ func (p *selNEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -15129,9 +14550,9 @@ func (p *selNEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -15155,9 +14576,9 @@ func (p *selNEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -15184,9 +14605,9 @@ func (p *selNEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -15212,12 +14633,6 @@ type selNEInt64Int16ConstOp struct { } func (p *selNEInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -15370,12 +14785,6 @@ type selNEInt64Int16Op struct { } func (p *selNEInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -15390,7 +14799,7 @@ func (p *selNEInt64Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -15540,12 +14949,6 @@ type selNEInt64Int32ConstOp struct { } func (p *selNEInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -15698,12 +15101,6 @@ type selNEInt64Int32Op struct { } func (p *selNEInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -15718,7 +15115,7 @@ func (p *selNEInt64Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -15868,12 +15265,6 @@ type selNEInt64Int64ConstOp struct { } func (p *selNEInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -16026,12 +15417,6 @@ type selNEInt64Int64Op struct { } func (p *selNEInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -16046,7 +15431,7 @@ func (p *selNEInt64Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -16196,12 +15581,6 @@ type selNEInt64Float64ConstOp struct { } func (p *selNEInt64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -16386,12 +15765,6 @@ type selNEInt64Float64Op struct { } func (p *selNEInt64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -16406,7 +15779,7 @@ func (p *selNEInt64Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -16588,12 +15961,6 @@ type selNEInt64DecimalConstOp struct { } func (p *selNEInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -16619,9 +15986,9 @@ func (p *selNEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -16648,9 +16015,9 @@ func (p *selNEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -16673,9 +16040,9 @@ func (p *selNEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -16699,9 +16066,9 @@ func (p *selNEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -16726,12 +16093,6 @@ type selNEInt64DecimalOp struct { } func (p *selNEInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -16746,7 +16107,7 @@ func (p *selNEInt64DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -16761,9 +16122,9 @@ func (p *selNEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -16793,9 +16154,9 @@ func (p *selNEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -16819,9 +16180,9 @@ func (p *selNEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -16848,9 +16209,9 @@ func (p *selNEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -16876,12 +16237,6 @@ type selNEFloat64Int16ConstOp struct { } func (p *selNEFloat64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -17066,12 +16421,6 @@ type selNEFloat64Int16Op struct { } func (p *selNEFloat64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -17086,7 +16435,7 @@ func (p *selNEFloat64Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -17268,12 +16617,6 @@ type selNEFloat64Int32ConstOp struct { } func (p *selNEFloat64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -17458,12 +16801,6 @@ type selNEFloat64Int32Op struct { } func (p *selNEFloat64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -17478,7 +16815,7 @@ func (p *selNEFloat64Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -17660,12 +16997,6 @@ type selNEFloat64Int64ConstOp struct { } func (p *selNEFloat64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -17850,12 +17181,6 @@ type selNEFloat64Int64Op struct { } func (p *selNEFloat64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -17870,7 +17195,7 @@ func (p *selNEFloat64Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -18052,12 +17377,6 @@ type selNEFloat64Float64ConstOp struct { } func (p *selNEFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -18242,12 +17561,6 @@ type selNEFloat64Float64Op struct { } func (p *selNEFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -18262,7 +17575,7 @@ func (p *selNEFloat64Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -18444,12 +17757,6 @@ type selNEFloat64DecimalConstOp struct { } func (p *selNEFloat64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -18475,11 +17782,11 @@ func (p *selNEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -18506,11 +17813,11 @@ func (p *selNEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -18533,11 +17840,11 @@ func (p *selNEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -18561,11 +17868,11 @@ func (p *selNEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult != 0 @@ -18590,12 +17897,6 @@ type selNEFloat64DecimalOp struct { } func (p *selNEFloat64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -18610,7 +17911,7 @@ func (p *selNEFloat64DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -18625,11 +17926,11 @@ func (p *selNEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -18659,11 +17960,11 @@ func (p *selNEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -18687,11 +17988,11 @@ func (p *selNEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -18718,11 +18019,11 @@ func (p *selNEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult != 0 @@ -18748,12 +18049,6 @@ type selNETimestampTimestampConstOp struct { } func (p *selNETimestampTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -18890,12 +18185,6 @@ type selNETimestampTimestampOp struct { } func (p *selNETimestampTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -18910,7 +18199,7 @@ func (p *selNETimestampTimestampOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -19044,12 +18333,6 @@ type selNEIntervalIntervalConstOp struct { } func (p *selNEIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -19158,12 +18441,6 @@ type selNEIntervalIntervalOp struct { } func (p *selNEIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -19178,7 +18455,7 @@ func (p *selNEIntervalIntervalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -19284,12 +18561,6 @@ type selNEJSONJSONConstOp struct { } func (p *selNEJSONJSONConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -19420,12 +18691,6 @@ type selNEJSONJSONOp struct { } func (p *selNEJSONJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -19440,7 +18705,7 @@ func (p *selNEJSONJSONOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -19566,12 +18831,6 @@ type selNEDatumDatumConstOp struct { } func (p *selNEDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -19686,12 +18945,6 @@ type selNEDatumDatumOp struct { } func (p *selNEDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -19706,7 +18959,7 @@ func (p *selNEDatumDatumOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -19816,12 +19069,6 @@ type selLTBoolBoolConstOp struct { } func (p *selLTBoolBoolConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -19962,12 +19209,6 @@ type selLTBoolBoolOp struct { } func (p *selLTBoolBoolOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -19982,7 +19223,7 @@ func (p *selLTBoolBoolOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -20120,12 +19361,6 @@ type selLTBytesBytesConstOp struct { } func (p *selLTBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -20232,12 +19467,6 @@ type selLTBytesBytesOp struct { } func (p *selLTBytesBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -20252,7 +19481,7 @@ func (p *selLTBytesBytesOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -20354,12 +19583,6 @@ type selLTDecimalInt16ConstOp struct { } func (p *selLTDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -20385,9 +19608,9 @@ func (p *selLTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -20414,9 +19637,9 @@ func (p *selLTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -20439,9 +19662,9 @@ func (p *selLTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -20465,9 +19688,9 @@ func (p *selLTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -20492,12 +19715,6 @@ type selLTDecimalInt16Op struct { } func (p *selLTDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -20512,7 +19729,7 @@ func (p *selLTDecimalInt16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -20527,9 +19744,9 @@ func (p *selLTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -20559,9 +19776,9 @@ func (p *selLTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -20585,9 +19802,9 @@ func (p *selLTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -20614,9 +19831,9 @@ func (p *selLTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -20642,12 +19859,6 @@ type selLTDecimalInt32ConstOp struct { } func (p *selLTDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -20673,9 +19884,9 @@ func (p *selLTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -20702,9 +19913,9 @@ func (p *selLTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -20727,9 +19938,9 @@ func (p *selLTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -20753,9 +19964,9 @@ func (p *selLTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -20780,12 +19991,6 @@ type selLTDecimalInt32Op struct { } func (p *selLTDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -20800,7 +20005,7 @@ func (p *selLTDecimalInt32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -20815,9 +20020,9 @@ func (p *selLTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -20847,9 +20052,9 @@ func (p *selLTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -20873,9 +20078,9 @@ func (p *selLTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -20902,9 +20107,9 @@ func (p *selLTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -20930,12 +20135,6 @@ type selLTDecimalInt64ConstOp struct { } func (p *selLTDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -20961,9 +20160,9 @@ func (p *selLTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -20990,9 +20189,9 @@ func (p *selLTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -21015,9 +20214,9 @@ func (p *selLTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -21041,9 +20240,9 @@ func (p *selLTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -21068,12 +20267,6 @@ type selLTDecimalInt64Op struct { } func (p *selLTDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -21088,7 +20281,7 @@ func (p *selLTDecimalInt64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -21103,9 +20296,9 @@ func (p *selLTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -21135,9 +20328,9 @@ func (p *selLTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -21161,9 +20354,9 @@ func (p *selLTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -21190,9 +20383,9 @@ func (p *selLTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -21218,12 +20411,6 @@ type selLTDecimalFloat64ConstOp struct { } func (p *selLTDecimalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -21249,11 +20436,11 @@ func (p *selLTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -21280,11 +20467,11 @@ func (p *selLTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -21307,11 +20494,11 @@ func (p *selLTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -21335,11 +20522,11 @@ func (p *selLTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult < 0 @@ -21364,12 +20551,6 @@ type selLTDecimalFloat64Op struct { } func (p *selLTDecimalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -21384,7 +20565,7 @@ func (p *selLTDecimalFloat64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -21399,11 +20580,11 @@ func (p *selLTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -21433,11 +20614,11 @@ func (p *selLTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -21461,11 +20642,11 @@ func (p *selLTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -21492,11 +20673,11 @@ func (p *selLTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult < 0 @@ -21522,12 +20703,6 @@ type selLTDecimalDecimalConstOp struct { } func (p *selLTDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -21636,12 +20811,6 @@ type selLTDecimalDecimalOp struct { } func (p *selLTDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -21656,7 +20825,7 @@ func (p *selLTDecimalDecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -21762,12 +20931,6 @@ type selLTInt16Int16ConstOp struct { } func (p *selLTInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -21920,12 +21083,6 @@ type selLTInt16Int16Op struct { } func (p *selLTInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -21940,7 +21097,7 @@ func (p *selLTInt16Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -22090,12 +21247,6 @@ type selLTInt16Int32ConstOp struct { } func (p *selLTInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -22248,12 +21399,6 @@ type selLTInt16Int32Op struct { } func (p *selLTInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -22268,7 +21413,7 @@ func (p *selLTInt16Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -22418,12 +21563,6 @@ type selLTInt16Int64ConstOp struct { } func (p *selLTInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -22576,12 +21715,6 @@ type selLTInt16Int64Op struct { } func (p *selLTInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -22596,7 +21729,7 @@ func (p *selLTInt16Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -22746,12 +21879,6 @@ type selLTInt16Float64ConstOp struct { } func (p *selLTInt16Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -22936,12 +22063,6 @@ type selLTInt16Float64Op struct { } func (p *selLTInt16Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -22956,7 +22077,7 @@ func (p *selLTInt16Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -23138,12 +22259,6 @@ type selLTInt16DecimalConstOp struct { } func (p *selLTInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -23169,9 +22284,9 @@ func (p *selLTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -23198,9 +22313,9 @@ func (p *selLTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -23223,9 +22338,9 @@ func (p *selLTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -23249,9 +22364,9 @@ func (p *selLTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -23276,12 +22391,6 @@ type selLTInt16DecimalOp struct { } func (p *selLTInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -23296,7 +22405,7 @@ func (p *selLTInt16DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -23311,9 +22420,9 @@ func (p *selLTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -23343,9 +22452,9 @@ func (p *selLTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -23369,9 +22478,9 @@ func (p *selLTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -23398,9 +22507,9 @@ func (p *selLTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -23426,12 +22535,6 @@ type selLTInt32Int16ConstOp struct { } func (p *selLTInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -23584,12 +22687,6 @@ type selLTInt32Int16Op struct { } func (p *selLTInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -23604,7 +22701,7 @@ func (p *selLTInt32Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -23754,12 +22851,6 @@ type selLTInt32Int32ConstOp struct { } func (p *selLTInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -23912,12 +23003,6 @@ type selLTInt32Int32Op struct { } func (p *selLTInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -23932,7 +23017,7 @@ func (p *selLTInt32Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -24082,12 +23167,6 @@ type selLTInt32Int64ConstOp struct { } func (p *selLTInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -24240,12 +23319,6 @@ type selLTInt32Int64Op struct { } func (p *selLTInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -24260,7 +23333,7 @@ func (p *selLTInt32Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -24410,12 +23483,6 @@ type selLTInt32Float64ConstOp struct { } func (p *selLTInt32Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -24600,12 +23667,6 @@ type selLTInt32Float64Op struct { } func (p *selLTInt32Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -24620,7 +23681,7 @@ func (p *selLTInt32Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -24802,12 +23863,6 @@ type selLTInt32DecimalConstOp struct { } func (p *selLTInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -24833,9 +23888,9 @@ func (p *selLTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -24862,9 +23917,9 @@ func (p *selLTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -24887,9 +23942,9 @@ func (p *selLTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -24913,9 +23968,9 @@ func (p *selLTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -24940,12 +23995,6 @@ type selLTInt32DecimalOp struct { } func (p *selLTInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -24960,7 +24009,7 @@ func (p *selLTInt32DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -24975,9 +24024,9 @@ func (p *selLTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -25007,9 +24056,9 @@ func (p *selLTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -25033,9 +24082,9 @@ func (p *selLTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -25062,9 +24111,9 @@ func (p *selLTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -25090,12 +24139,6 @@ type selLTInt64Int16ConstOp struct { } func (p *selLTInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -25248,12 +24291,6 @@ type selLTInt64Int16Op struct { } func (p *selLTInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -25268,7 +24305,7 @@ func (p *selLTInt64Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -25418,12 +24455,6 @@ type selLTInt64Int32ConstOp struct { } func (p *selLTInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -25576,12 +24607,6 @@ type selLTInt64Int32Op struct { } func (p *selLTInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -25596,7 +24621,7 @@ func (p *selLTInt64Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -25746,12 +24771,6 @@ type selLTInt64Int64ConstOp struct { } func (p *selLTInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -25904,12 +24923,6 @@ type selLTInt64Int64Op struct { } func (p *selLTInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -25924,7 +24937,7 @@ func (p *selLTInt64Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -26074,12 +25087,6 @@ type selLTInt64Float64ConstOp struct { } func (p *selLTInt64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -26264,12 +25271,6 @@ type selLTInt64Float64Op struct { } func (p *selLTInt64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -26284,7 +25285,7 @@ func (p *selLTInt64Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -26466,12 +25467,6 @@ type selLTInt64DecimalConstOp struct { } func (p *selLTInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -26497,9 +25492,9 @@ func (p *selLTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -26526,9 +25521,9 @@ func (p *selLTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -26551,9 +25546,9 @@ func (p *selLTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -26577,9 +25572,9 @@ func (p *selLTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -26604,12 +25599,6 @@ type selLTInt64DecimalOp struct { } func (p *selLTInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -26624,7 +25613,7 @@ func (p *selLTInt64DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -26639,9 +25628,9 @@ func (p *selLTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -26671,9 +25660,9 @@ func (p *selLTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -26697,9 +25686,9 @@ func (p *selLTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -26726,9 +25715,9 @@ func (p *selLTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -26754,12 +25743,6 @@ type selLTFloat64Int16ConstOp struct { } func (p *selLTFloat64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -26944,12 +25927,6 @@ type selLTFloat64Int16Op struct { } func (p *selLTFloat64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -26964,7 +25941,7 @@ func (p *selLTFloat64Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -27146,12 +26123,6 @@ type selLTFloat64Int32ConstOp struct { } func (p *selLTFloat64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -27336,12 +26307,6 @@ type selLTFloat64Int32Op struct { } func (p *selLTFloat64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -27356,7 +26321,7 @@ func (p *selLTFloat64Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -27538,12 +26503,6 @@ type selLTFloat64Int64ConstOp struct { } func (p *selLTFloat64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -27728,12 +26687,6 @@ type selLTFloat64Int64Op struct { } func (p *selLTFloat64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -27748,7 +26701,7 @@ func (p *selLTFloat64Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -27930,12 +26883,6 @@ type selLTFloat64Float64ConstOp struct { } func (p *selLTFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -28120,12 +27067,6 @@ type selLTFloat64Float64Op struct { } func (p *selLTFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -28140,7 +27081,7 @@ func (p *selLTFloat64Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -28322,12 +27263,6 @@ type selLTFloat64DecimalConstOp struct { } func (p *selLTFloat64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -28353,11 +27288,11 @@ func (p *selLTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -28384,11 +27319,11 @@ func (p *selLTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -28411,11 +27346,11 @@ func (p *selLTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -28439,11 +27374,11 @@ func (p *selLTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult < 0 @@ -28468,12 +27403,6 @@ type selLTFloat64DecimalOp struct { } func (p *selLTFloat64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -28488,7 +27417,7 @@ func (p *selLTFloat64DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -28503,11 +27432,11 @@ func (p *selLTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -28537,11 +27466,11 @@ func (p *selLTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -28565,11 +27494,11 @@ func (p *selLTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -28596,11 +27525,11 @@ func (p *selLTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult < 0 @@ -28626,12 +27555,6 @@ type selLTTimestampTimestampConstOp struct { } func (p *selLTTimestampTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -28768,12 +27691,6 @@ type selLTTimestampTimestampOp struct { } func (p *selLTTimestampTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -28788,7 +27705,7 @@ func (p *selLTTimestampTimestampOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -28922,12 +27839,6 @@ type selLTIntervalIntervalConstOp struct { } func (p *selLTIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -29036,12 +27947,6 @@ type selLTIntervalIntervalOp struct { } func (p *selLTIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -29056,7 +27961,7 @@ func (p *selLTIntervalIntervalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -29162,12 +28067,6 @@ type selLTJSONJSONConstOp struct { } func (p *selLTJSONJSONConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -29298,12 +28197,6 @@ type selLTJSONJSONOp struct { } func (p *selLTJSONJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -29318,7 +28211,7 @@ func (p *selLTJSONJSONOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -29444,12 +28337,6 @@ type selLTDatumDatumConstOp struct { } func (p *selLTDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -29564,12 +28451,6 @@ type selLTDatumDatumOp struct { } func (p *selLTDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -29584,7 +28465,7 @@ func (p *selLTDatumDatumOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -29694,12 +28575,6 @@ type selLEBoolBoolConstOp struct { } func (p *selLEBoolBoolConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -29840,12 +28715,6 @@ type selLEBoolBoolOp struct { } func (p *selLEBoolBoolOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -29860,7 +28729,7 @@ func (p *selLEBoolBoolOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -29998,12 +28867,6 @@ type selLEBytesBytesConstOp struct { } func (p *selLEBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -30110,12 +28973,6 @@ type selLEBytesBytesOp struct { } func (p *selLEBytesBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -30130,7 +28987,7 @@ func (p *selLEBytesBytesOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -30232,12 +29089,6 @@ type selLEDecimalInt16ConstOp struct { } func (p *selLEDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -30263,9 +29114,9 @@ func (p *selLEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -30292,9 +29143,9 @@ func (p *selLEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -30317,9 +29168,9 @@ func (p *selLEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -30343,9 +29194,9 @@ func (p *selLEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -30370,12 +29221,6 @@ type selLEDecimalInt16Op struct { } func (p *selLEDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -30390,7 +29235,7 @@ func (p *selLEDecimalInt16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -30405,9 +29250,9 @@ func (p *selLEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -30437,9 +29282,9 @@ func (p *selLEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -30463,9 +29308,9 @@ func (p *selLEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -30492,9 +29337,9 @@ func (p *selLEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -30520,12 +29365,6 @@ type selLEDecimalInt32ConstOp struct { } func (p *selLEDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -30551,9 +29390,9 @@ func (p *selLEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -30580,9 +29419,9 @@ func (p *selLEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -30605,9 +29444,9 @@ func (p *selLEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -30631,9 +29470,9 @@ func (p *selLEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -30658,12 +29497,6 @@ type selLEDecimalInt32Op struct { } func (p *selLEDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -30678,7 +29511,7 @@ func (p *selLEDecimalInt32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -30693,9 +29526,9 @@ func (p *selLEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -30725,9 +29558,9 @@ func (p *selLEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -30751,9 +29584,9 @@ func (p *selLEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -30780,9 +29613,9 @@ func (p *selLEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -30808,12 +29641,6 @@ type selLEDecimalInt64ConstOp struct { } func (p *selLEDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -30839,9 +29666,9 @@ func (p *selLEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -30868,9 +29695,9 @@ func (p *selLEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -30893,9 +29720,9 @@ func (p *selLEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -30919,9 +29746,9 @@ func (p *selLEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -30946,12 +29773,6 @@ type selLEDecimalInt64Op struct { } func (p *selLEDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -30966,7 +29787,7 @@ func (p *selLEDecimalInt64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -30981,9 +29802,9 @@ func (p *selLEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -31013,9 +29834,9 @@ func (p *selLEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -31039,9 +29860,9 @@ func (p *selLEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -31068,9 +29889,9 @@ func (p *selLEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -31096,12 +29917,6 @@ type selLEDecimalFloat64ConstOp struct { } func (p *selLEDecimalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -31127,11 +29942,11 @@ func (p *selLEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -31158,11 +29973,11 @@ func (p *selLEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -31185,11 +30000,11 @@ func (p *selLEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -31213,11 +30028,11 @@ func (p *selLEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult <= 0 @@ -31242,12 +30057,6 @@ type selLEDecimalFloat64Op struct { } func (p *selLEDecimalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -31262,7 +30071,7 @@ func (p *selLEDecimalFloat64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -31277,11 +30086,11 @@ func (p *selLEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -31311,11 +30120,11 @@ func (p *selLEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -31339,11 +30148,11 @@ func (p *selLEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -31370,11 +30179,11 @@ func (p *selLEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult <= 0 @@ -31400,12 +30209,6 @@ type selLEDecimalDecimalConstOp struct { } func (p *selLEDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -31514,12 +30317,6 @@ type selLEDecimalDecimalOp struct { } func (p *selLEDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -31534,7 +30331,7 @@ func (p *selLEDecimalDecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -31640,12 +30437,6 @@ type selLEInt16Int16ConstOp struct { } func (p *selLEInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -31798,12 +30589,6 @@ type selLEInt16Int16Op struct { } func (p *selLEInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -31818,7 +30603,7 @@ func (p *selLEInt16Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -31968,12 +30753,6 @@ type selLEInt16Int32ConstOp struct { } func (p *selLEInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -32126,12 +30905,6 @@ type selLEInt16Int32Op struct { } func (p *selLEInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -32146,7 +30919,7 @@ func (p *selLEInt16Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -32296,12 +31069,6 @@ type selLEInt16Int64ConstOp struct { } func (p *selLEInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -32454,12 +31221,6 @@ type selLEInt16Int64Op struct { } func (p *selLEInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -32474,7 +31235,7 @@ func (p *selLEInt16Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -32624,12 +31385,6 @@ type selLEInt16Float64ConstOp struct { } func (p *selLEInt16Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -32814,12 +31569,6 @@ type selLEInt16Float64Op struct { } func (p *selLEInt16Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -32834,7 +31583,7 @@ func (p *selLEInt16Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -33016,12 +31765,6 @@ type selLEInt16DecimalConstOp struct { } func (p *selLEInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -33047,9 +31790,9 @@ func (p *selLEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -33076,9 +31819,9 @@ func (p *selLEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -33101,9 +31844,9 @@ func (p *selLEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -33127,9 +31870,9 @@ func (p *selLEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -33154,12 +31897,6 @@ type selLEInt16DecimalOp struct { } func (p *selLEInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -33174,7 +31911,7 @@ func (p *selLEInt16DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -33189,9 +31926,9 @@ func (p *selLEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -33221,9 +31958,9 @@ func (p *selLEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -33247,9 +31984,9 @@ func (p *selLEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -33276,9 +32013,9 @@ func (p *selLEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -33304,12 +32041,6 @@ type selLEInt32Int16ConstOp struct { } func (p *selLEInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -33462,12 +32193,6 @@ type selLEInt32Int16Op struct { } func (p *selLEInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -33482,7 +32207,7 @@ func (p *selLEInt32Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -33632,12 +32357,6 @@ type selLEInt32Int32ConstOp struct { } func (p *selLEInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -33790,12 +32509,6 @@ type selLEInt32Int32Op struct { } func (p *selLEInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -33810,7 +32523,7 @@ func (p *selLEInt32Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -33960,12 +32673,6 @@ type selLEInt32Int64ConstOp struct { } func (p *selLEInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -34118,12 +32825,6 @@ type selLEInt32Int64Op struct { } func (p *selLEInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -34138,7 +32839,7 @@ func (p *selLEInt32Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -34288,12 +32989,6 @@ type selLEInt32Float64ConstOp struct { } func (p *selLEInt32Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -34478,12 +33173,6 @@ type selLEInt32Float64Op struct { } func (p *selLEInt32Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -34498,7 +33187,7 @@ func (p *selLEInt32Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -34680,12 +33369,6 @@ type selLEInt32DecimalConstOp struct { } func (p *selLEInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -34711,9 +33394,9 @@ func (p *selLEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -34740,9 +33423,9 @@ func (p *selLEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -34765,9 +33448,9 @@ func (p *selLEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -34791,9 +33474,9 @@ func (p *selLEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -34818,12 +33501,6 @@ type selLEInt32DecimalOp struct { } func (p *selLEInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -34838,7 +33515,7 @@ func (p *selLEInt32DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -34853,9 +33530,9 @@ func (p *selLEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -34885,9 +33562,9 @@ func (p *selLEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -34911,9 +33588,9 @@ func (p *selLEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -34940,9 +33617,9 @@ func (p *selLEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -34968,12 +33645,6 @@ type selLEInt64Int16ConstOp struct { } func (p *selLEInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -35126,12 +33797,6 @@ type selLEInt64Int16Op struct { } func (p *selLEInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -35146,7 +33811,7 @@ func (p *selLEInt64Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -35296,12 +33961,6 @@ type selLEInt64Int32ConstOp struct { } func (p *selLEInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -35454,12 +34113,6 @@ type selLEInt64Int32Op struct { } func (p *selLEInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -35474,7 +34127,7 @@ func (p *selLEInt64Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -35624,12 +34277,6 @@ type selLEInt64Int64ConstOp struct { } func (p *selLEInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -35782,12 +34429,6 @@ type selLEInt64Int64Op struct { } func (p *selLEInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -35802,7 +34443,7 @@ func (p *selLEInt64Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -35952,12 +34593,6 @@ type selLEInt64Float64ConstOp struct { } func (p *selLEInt64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -36142,12 +34777,6 @@ type selLEInt64Float64Op struct { } func (p *selLEInt64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -36162,7 +34791,7 @@ func (p *selLEInt64Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -36344,12 +34973,6 @@ type selLEInt64DecimalConstOp struct { } func (p *selLEInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -36375,9 +34998,9 @@ func (p *selLEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -36404,9 +35027,9 @@ func (p *selLEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -36429,9 +35052,9 @@ func (p *selLEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -36455,9 +35078,9 @@ func (p *selLEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -36482,12 +35105,6 @@ type selLEInt64DecimalOp struct { } func (p *selLEInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -36502,7 +35119,7 @@ func (p *selLEInt64DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -36517,9 +35134,9 @@ func (p *selLEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -36549,9 +35166,9 @@ func (p *selLEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -36575,9 +35192,9 @@ func (p *selLEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -36604,9 +35221,9 @@ func (p *selLEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -36632,12 +35249,6 @@ type selLEFloat64Int16ConstOp struct { } func (p *selLEFloat64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -36822,12 +35433,6 @@ type selLEFloat64Int16Op struct { } func (p *selLEFloat64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -36842,7 +35447,7 @@ func (p *selLEFloat64Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -37024,12 +35629,6 @@ type selLEFloat64Int32ConstOp struct { } func (p *selLEFloat64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -37214,12 +35813,6 @@ type selLEFloat64Int32Op struct { } func (p *selLEFloat64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -37234,7 +35827,7 @@ func (p *selLEFloat64Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -37416,12 +36009,6 @@ type selLEFloat64Int64ConstOp struct { } func (p *selLEFloat64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -37606,12 +36193,6 @@ type selLEFloat64Int64Op struct { } func (p *selLEFloat64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -37626,7 +36207,7 @@ func (p *selLEFloat64Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -37808,12 +36389,6 @@ type selLEFloat64Float64ConstOp struct { } func (p *selLEFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -37998,12 +36573,6 @@ type selLEFloat64Float64Op struct { } func (p *selLEFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -38018,7 +36587,7 @@ func (p *selLEFloat64Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -38200,12 +36769,6 @@ type selLEFloat64DecimalConstOp struct { } func (p *selLEFloat64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -38231,11 +36794,11 @@ func (p *selLEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -38262,11 +36825,11 @@ func (p *selLEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -38289,11 +36852,11 @@ func (p *selLEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -38317,11 +36880,11 @@ func (p *selLEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult <= 0 @@ -38346,12 +36909,6 @@ type selLEFloat64DecimalOp struct { } func (p *selLEFloat64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -38366,7 +36923,7 @@ func (p *selLEFloat64DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -38381,11 +36938,11 @@ func (p *selLEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -38415,11 +36972,11 @@ func (p *selLEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -38443,11 +37000,11 @@ func (p *selLEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -38474,11 +37031,11 @@ func (p *selLEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult <= 0 @@ -38504,12 +37061,6 @@ type selLETimestampTimestampConstOp struct { } func (p *selLETimestampTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -38646,12 +37197,6 @@ type selLETimestampTimestampOp struct { } func (p *selLETimestampTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -38666,7 +37211,7 @@ func (p *selLETimestampTimestampOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -38800,12 +37345,6 @@ type selLEIntervalIntervalConstOp struct { } func (p *selLEIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -38914,12 +37453,6 @@ type selLEIntervalIntervalOp struct { } func (p *selLEIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -38934,7 +37467,7 @@ func (p *selLEIntervalIntervalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -39040,12 +37573,6 @@ type selLEJSONJSONConstOp struct { } func (p *selLEJSONJSONConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -39176,12 +37703,6 @@ type selLEJSONJSONOp struct { } func (p *selLEJSONJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -39196,7 +37717,7 @@ func (p *selLEJSONJSONOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -39322,12 +37843,6 @@ type selLEDatumDatumConstOp struct { } func (p *selLEDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -39442,12 +37957,6 @@ type selLEDatumDatumOp struct { } func (p *selLEDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -39462,7 +37971,7 @@ func (p *selLEDatumDatumOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -39572,12 +38081,6 @@ type selGTBoolBoolConstOp struct { } func (p *selGTBoolBoolConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -39718,12 +38221,6 @@ type selGTBoolBoolOp struct { } func (p *selGTBoolBoolOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -39738,7 +38235,7 @@ func (p *selGTBoolBoolOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -39876,12 +38373,6 @@ type selGTBytesBytesConstOp struct { } func (p *selGTBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -39988,12 +38479,6 @@ type selGTBytesBytesOp struct { } func (p *selGTBytesBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -40008,7 +38493,7 @@ func (p *selGTBytesBytesOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -40110,12 +38595,6 @@ type selGTDecimalInt16ConstOp struct { } func (p *selGTDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -40141,9 +38620,9 @@ func (p *selGTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -40170,9 +38649,9 @@ func (p *selGTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -40195,9 +38674,9 @@ func (p *selGTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -40221,9 +38700,9 @@ func (p *selGTDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -40248,12 +38727,6 @@ type selGTDecimalInt16Op struct { } func (p *selGTDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -40268,7 +38741,7 @@ func (p *selGTDecimalInt16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -40283,9 +38756,9 @@ func (p *selGTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -40315,9 +38788,9 @@ func (p *selGTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -40341,9 +38814,9 @@ func (p *selGTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -40370,9 +38843,9 @@ func (p *selGTDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -40398,12 +38871,6 @@ type selGTDecimalInt32ConstOp struct { } func (p *selGTDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -40429,9 +38896,9 @@ func (p *selGTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -40458,9 +38925,9 @@ func (p *selGTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -40483,9 +38950,9 @@ func (p *selGTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -40509,9 +38976,9 @@ func (p *selGTDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -40536,12 +39003,6 @@ type selGTDecimalInt32Op struct { } func (p *selGTDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -40556,7 +39017,7 @@ func (p *selGTDecimalInt32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -40571,9 +39032,9 @@ func (p *selGTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -40603,9 +39064,9 @@ func (p *selGTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -40629,9 +39090,9 @@ func (p *selGTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -40658,9 +39119,9 @@ func (p *selGTDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -40686,12 +39147,6 @@ type selGTDecimalInt64ConstOp struct { } func (p *selGTDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -40717,9 +39172,9 @@ func (p *selGTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -40746,9 +39201,9 @@ func (p *selGTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -40771,9 +39226,9 @@ func (p *selGTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -40797,9 +39252,9 @@ func (p *selGTDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -40824,12 +39279,6 @@ type selGTDecimalInt64Op struct { } func (p *selGTDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -40844,7 +39293,7 @@ func (p *selGTDecimalInt64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -40859,9 +39308,9 @@ func (p *selGTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -40891,9 +39340,9 @@ func (p *selGTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -40917,9 +39366,9 @@ func (p *selGTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -40946,9 +39395,9 @@ func (p *selGTDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -40974,12 +39423,6 @@ type selGTDecimalFloat64ConstOp struct { } func (p *selGTDecimalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -41005,11 +39448,11 @@ func (p *selGTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -41036,11 +39479,11 @@ func (p *selGTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -41063,11 +39506,11 @@ func (p *selGTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -41091,11 +39534,11 @@ func (p *selGTDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult > 0 @@ -41120,12 +39563,6 @@ type selGTDecimalFloat64Op struct { } func (p *selGTDecimalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -41140,7 +39577,7 @@ func (p *selGTDecimalFloat64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -41155,11 +39592,11 @@ func (p *selGTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -41189,11 +39626,11 @@ func (p *selGTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -41217,11 +39654,11 @@ func (p *selGTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -41248,11 +39685,11 @@ func (p *selGTDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult > 0 @@ -41278,12 +39715,6 @@ type selGTDecimalDecimalConstOp struct { } func (p *selGTDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -41392,12 +39823,6 @@ type selGTDecimalDecimalOp struct { } func (p *selGTDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -41412,7 +39837,7 @@ func (p *selGTDecimalDecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -41518,12 +39943,6 @@ type selGTInt16Int16ConstOp struct { } func (p *selGTInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -41676,12 +40095,6 @@ type selGTInt16Int16Op struct { } func (p *selGTInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -41696,7 +40109,7 @@ func (p *selGTInt16Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -41846,12 +40259,6 @@ type selGTInt16Int32ConstOp struct { } func (p *selGTInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -42004,12 +40411,6 @@ type selGTInt16Int32Op struct { } func (p *selGTInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -42024,7 +40425,7 @@ func (p *selGTInt16Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -42174,12 +40575,6 @@ type selGTInt16Int64ConstOp struct { } func (p *selGTInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -42332,12 +40727,6 @@ type selGTInt16Int64Op struct { } func (p *selGTInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -42352,7 +40741,7 @@ func (p *selGTInt16Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -42502,12 +40891,6 @@ type selGTInt16Float64ConstOp struct { } func (p *selGTInt16Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -42692,12 +41075,6 @@ type selGTInt16Float64Op struct { } func (p *selGTInt16Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -42712,7 +41089,7 @@ func (p *selGTInt16Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -42894,12 +41271,6 @@ type selGTInt16DecimalConstOp struct { } func (p *selGTInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -42925,9 +41296,9 @@ func (p *selGTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -42954,9 +41325,9 @@ func (p *selGTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -42979,9 +41350,9 @@ func (p *selGTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -43005,9 +41376,9 @@ func (p *selGTInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -43032,12 +41403,6 @@ type selGTInt16DecimalOp struct { } func (p *selGTInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -43052,7 +41417,7 @@ func (p *selGTInt16DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -43067,9 +41432,9 @@ func (p *selGTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -43099,9 +41464,9 @@ func (p *selGTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -43125,9 +41490,9 @@ func (p *selGTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -43154,9 +41519,9 @@ func (p *selGTInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -43182,12 +41547,6 @@ type selGTInt32Int16ConstOp struct { } func (p *selGTInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -43340,12 +41699,6 @@ type selGTInt32Int16Op struct { } func (p *selGTInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -43360,7 +41713,7 @@ func (p *selGTInt32Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -43510,12 +41863,6 @@ type selGTInt32Int32ConstOp struct { } func (p *selGTInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -43668,12 +42015,6 @@ type selGTInt32Int32Op struct { } func (p *selGTInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -43688,7 +42029,7 @@ func (p *selGTInt32Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -43838,12 +42179,6 @@ type selGTInt32Int64ConstOp struct { } func (p *selGTInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -43996,12 +42331,6 @@ type selGTInt32Int64Op struct { } func (p *selGTInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -44016,7 +42345,7 @@ func (p *selGTInt32Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -44166,12 +42495,6 @@ type selGTInt32Float64ConstOp struct { } func (p *selGTInt32Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -44356,12 +42679,6 @@ type selGTInt32Float64Op struct { } func (p *selGTInt32Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -44376,7 +42693,7 @@ func (p *selGTInt32Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -44558,12 +42875,6 @@ type selGTInt32DecimalConstOp struct { } func (p *selGTInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -44589,9 +42900,9 @@ func (p *selGTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -44618,9 +42929,9 @@ func (p *selGTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -44643,9 +42954,9 @@ func (p *selGTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -44669,9 +42980,9 @@ func (p *selGTInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -44696,12 +43007,6 @@ type selGTInt32DecimalOp struct { } func (p *selGTInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -44716,7 +43021,7 @@ func (p *selGTInt32DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -44731,9 +43036,9 @@ func (p *selGTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -44763,9 +43068,9 @@ func (p *selGTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -44789,9 +43094,9 @@ func (p *selGTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -44818,9 +43123,9 @@ func (p *selGTInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -44846,12 +43151,6 @@ type selGTInt64Int16ConstOp struct { } func (p *selGTInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -45004,12 +43303,6 @@ type selGTInt64Int16Op struct { } func (p *selGTInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -45024,7 +43317,7 @@ func (p *selGTInt64Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -45174,12 +43467,6 @@ type selGTInt64Int32ConstOp struct { } func (p *selGTInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -45332,12 +43619,6 @@ type selGTInt64Int32Op struct { } func (p *selGTInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -45352,7 +43633,7 @@ func (p *selGTInt64Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -45502,12 +43783,6 @@ type selGTInt64Int64ConstOp struct { } func (p *selGTInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -45660,12 +43935,6 @@ type selGTInt64Int64Op struct { } func (p *selGTInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -45680,7 +43949,7 @@ func (p *selGTInt64Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -45830,12 +44099,6 @@ type selGTInt64Float64ConstOp struct { } func (p *selGTInt64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -46020,12 +44283,6 @@ type selGTInt64Float64Op struct { } func (p *selGTInt64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -46040,7 +44297,7 @@ func (p *selGTInt64Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -46222,12 +44479,6 @@ type selGTInt64DecimalConstOp struct { } func (p *selGTInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -46253,9 +44504,9 @@ func (p *selGTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -46282,9 +44533,9 @@ func (p *selGTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -46307,9 +44558,9 @@ func (p *selGTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -46333,9 +44584,9 @@ func (p *selGTInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -46360,12 +44611,6 @@ type selGTInt64DecimalOp struct { } func (p *selGTInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -46380,7 +44625,7 @@ func (p *selGTInt64DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -46395,9 +44640,9 @@ func (p *selGTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -46427,9 +44672,9 @@ func (p *selGTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -46453,9 +44698,9 @@ func (p *selGTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -46482,9 +44727,9 @@ func (p *selGTInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -46510,12 +44755,6 @@ type selGTFloat64Int16ConstOp struct { } func (p *selGTFloat64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -46700,12 +44939,6 @@ type selGTFloat64Int16Op struct { } func (p *selGTFloat64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -46720,7 +44953,7 @@ func (p *selGTFloat64Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -46902,12 +45135,6 @@ type selGTFloat64Int32ConstOp struct { } func (p *selGTFloat64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -47092,12 +45319,6 @@ type selGTFloat64Int32Op struct { } func (p *selGTFloat64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -47112,7 +45333,7 @@ func (p *selGTFloat64Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -47294,12 +45515,6 @@ type selGTFloat64Int64ConstOp struct { } func (p *selGTFloat64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -47484,12 +45699,6 @@ type selGTFloat64Int64Op struct { } func (p *selGTFloat64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -47504,7 +45713,7 @@ func (p *selGTFloat64Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -47686,12 +45895,6 @@ type selGTFloat64Float64ConstOp struct { } func (p *selGTFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -47876,12 +46079,6 @@ type selGTFloat64Float64Op struct { } func (p *selGTFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -47896,7 +46093,7 @@ func (p *selGTFloat64Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -48078,12 +46275,6 @@ type selGTFloat64DecimalConstOp struct { } func (p *selGTFloat64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -48109,11 +46300,11 @@ func (p *selGTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -48140,11 +46331,11 @@ func (p *selGTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -48167,11 +46358,11 @@ func (p *selGTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -48195,11 +46386,11 @@ func (p *selGTFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult > 0 @@ -48224,12 +46415,6 @@ type selGTFloat64DecimalOp struct { } func (p *selGTFloat64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -48244,7 +46429,7 @@ func (p *selGTFloat64DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -48259,11 +46444,11 @@ func (p *selGTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -48293,11 +46478,11 @@ func (p *selGTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -48321,11 +46506,11 @@ func (p *selGTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -48352,11 +46537,11 @@ func (p *selGTFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult > 0 @@ -48382,12 +46567,6 @@ type selGTTimestampTimestampConstOp struct { } func (p *selGTTimestampTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -48524,12 +46703,6 @@ type selGTTimestampTimestampOp struct { } func (p *selGTTimestampTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -48544,7 +46717,7 @@ func (p *selGTTimestampTimestampOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -48678,12 +46851,6 @@ type selGTIntervalIntervalConstOp struct { } func (p *selGTIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -48792,12 +46959,6 @@ type selGTIntervalIntervalOp struct { } func (p *selGTIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -48812,7 +46973,7 @@ func (p *selGTIntervalIntervalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -48918,12 +47079,6 @@ type selGTJSONJSONConstOp struct { } func (p *selGTJSONJSONConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -49054,12 +47209,6 @@ type selGTJSONJSONOp struct { } func (p *selGTJSONJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -49074,7 +47223,7 @@ func (p *selGTJSONJSONOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -49200,12 +47349,6 @@ type selGTDatumDatumConstOp struct { } func (p *selGTDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -49320,12 +47463,6 @@ type selGTDatumDatumOp struct { } func (p *selGTDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -49340,7 +47477,7 @@ func (p *selGTDatumDatumOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -49450,12 +47587,6 @@ type selGEBoolBoolConstOp struct { } func (p *selGEBoolBoolConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -49596,12 +47727,6 @@ type selGEBoolBoolOp struct { } func (p *selGEBoolBoolOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -49616,7 +47741,7 @@ func (p *selGEBoolBoolOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -49754,12 +47879,6 @@ type selGEBytesBytesConstOp struct { } func (p *selGEBytesBytesConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -49866,12 +47985,6 @@ type selGEBytesBytesOp struct { } func (p *selGEBytesBytesOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -49886,7 +47999,7 @@ func (p *selGEBytesBytesOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -49988,12 +48101,6 @@ type selGEDecimalInt16ConstOp struct { } func (p *selGEDecimalInt16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -50019,9 +48126,9 @@ func (p *selGEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50048,9 +48155,9 @@ func (p *selGEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50073,9 +48180,9 @@ func (p *selGEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50099,9 +48206,9 @@ func (p *selGEDecimalInt16ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50126,12 +48233,6 @@ type selGEDecimalInt16Op struct { } func (p *selGEDecimalInt16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -50146,7 +48247,7 @@ func (p *selGEDecimalInt16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -50161,9 +48262,9 @@ func (p *selGEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -50193,9 +48294,9 @@ func (p *selGEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -50219,9 +48320,9 @@ func (p *selGEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -50248,9 +48349,9 @@ func (p *selGEDecimalInt16Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -50276,12 +48377,6 @@ type selGEDecimalInt32ConstOp struct { } func (p *selGEDecimalInt32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -50307,9 +48402,9 @@ func (p *selGEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50336,9 +48431,9 @@ func (p *selGEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50361,9 +48456,9 @@ func (p *selGEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50387,9 +48482,9 @@ func (p *selGEDecimalInt32ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50414,12 +48509,6 @@ type selGEDecimalInt32Op struct { } func (p *selGEDecimalInt32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -50434,7 +48523,7 @@ func (p *selGEDecimalInt32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -50449,9 +48538,9 @@ func (p *selGEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -50481,9 +48570,9 @@ func (p *selGEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -50507,9 +48596,9 @@ func (p *selGEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -50536,9 +48625,9 @@ func (p *selGEDecimalInt32Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -50564,12 +48653,6 @@ type selGEDecimalInt64ConstOp struct { } func (p *selGEDecimalInt64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -50595,9 +48678,9 @@ func (p *selGEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50624,9 +48707,9 @@ func (p *selGEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50649,9 +48732,9 @@ func (p *selGEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50675,9 +48758,9 @@ func (p *selGEDecimalInt64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(p.constArg)) - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50702,12 +48785,6 @@ type selGEDecimalInt64Op struct { } func (p *selGEDecimalInt64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -50722,7 +48799,7 @@ func (p *selGEDecimalInt64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -50737,9 +48814,9 @@ func (p *selGEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -50769,9 +48846,9 @@ func (p *selGEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -50795,9 +48872,9 @@ func (p *selGEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -50824,9 +48901,9 @@ func (p *selGEDecimalInt64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg2)) - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -50852,12 +48929,6 @@ type selGEDecimalFloat64ConstOp struct { } func (p *selGEDecimalFloat64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -50883,11 +48954,11 @@ func (p *selGEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50914,11 +48985,11 @@ func (p *selGEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50941,11 +49012,11 @@ func (p *selGEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50969,11 +49040,11 @@ func (p *selGEDecimalFloat64ConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(p.constArg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg, tmpDec) + cmpResult = tree.CompareDecimals(&arg, &tmpDec) } cmp = cmpResult >= 0 @@ -50998,12 +49069,6 @@ type selGEDecimalFloat64Op struct { } func (p *selGEDecimalFloat64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -51018,7 +49083,7 @@ func (p *selGEDecimalFloat64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -51033,11 +49098,11 @@ func (p *selGEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -51067,11 +49132,11 @@ func (p *selGEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -51095,11 +49160,11 @@ func (p *selGEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -51126,11 +49191,11 @@ func (p *selGEDecimalFloat64Op) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg2)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(&arg1, tmpDec) + cmpResult = tree.CompareDecimals(&arg1, &tmpDec) } cmp = cmpResult >= 0 @@ -51156,12 +49221,6 @@ type selGEDecimalDecimalConstOp struct { } func (p *selGEDecimalDecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -51270,12 +49329,6 @@ type selGEDecimalDecimalOp struct { } func (p *selGEDecimalDecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -51290,7 +49343,7 @@ func (p *selGEDecimalDecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -51396,12 +49449,6 @@ type selGEInt16Int16ConstOp struct { } func (p *selGEInt16Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -51554,12 +49601,6 @@ type selGEInt16Int16Op struct { } func (p *selGEInt16Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -51574,7 +49615,7 @@ func (p *selGEInt16Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -51724,12 +49765,6 @@ type selGEInt16Int32ConstOp struct { } func (p *selGEInt16Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -51882,12 +49917,6 @@ type selGEInt16Int32Op struct { } func (p *selGEInt16Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -51902,7 +49931,7 @@ func (p *selGEInt16Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -52052,12 +50081,6 @@ type selGEInt16Int64ConstOp struct { } func (p *selGEInt16Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -52210,12 +50233,6 @@ type selGEInt16Int64Op struct { } func (p *selGEInt16Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -52230,7 +50247,7 @@ func (p *selGEInt16Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -52380,12 +50397,6 @@ type selGEInt16Float64ConstOp struct { } func (p *selGEInt16Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -52570,12 +50581,6 @@ type selGEInt16Float64Op struct { } func (p *selGEInt16Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -52590,7 +50595,7 @@ func (p *selGEInt16Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -52772,12 +50777,6 @@ type selGEInt16DecimalConstOp struct { } func (p *selGEInt16DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -52803,9 +50802,9 @@ func (p *selGEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -52832,9 +50831,9 @@ func (p *selGEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -52857,9 +50856,9 @@ func (p *selGEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -52883,9 +50882,9 @@ func (p *selGEInt16DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -52910,12 +50909,6 @@ type selGEInt16DecimalOp struct { } func (p *selGEInt16DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -52930,7 +50923,7 @@ func (p *selGEInt16DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -52945,9 +50938,9 @@ func (p *selGEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -52977,9 +50970,9 @@ func (p *selGEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -53003,9 +50996,9 @@ func (p *selGEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -53032,9 +51025,9 @@ func (p *selGEInt16DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -53060,12 +51053,6 @@ type selGEInt32Int16ConstOp struct { } func (p *selGEInt32Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -53218,12 +51205,6 @@ type selGEInt32Int16Op struct { } func (p *selGEInt32Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -53238,7 +51219,7 @@ func (p *selGEInt32Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -53388,12 +51369,6 @@ type selGEInt32Int32ConstOp struct { } func (p *selGEInt32Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -53546,12 +51521,6 @@ type selGEInt32Int32Op struct { } func (p *selGEInt32Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -53566,7 +51535,7 @@ func (p *selGEInt32Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -53716,12 +51685,6 @@ type selGEInt32Int64ConstOp struct { } func (p *selGEInt32Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -53874,12 +51837,6 @@ type selGEInt32Int64Op struct { } func (p *selGEInt32Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -53894,7 +51851,7 @@ func (p *selGEInt32Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -54044,12 +52001,6 @@ type selGEInt32Float64ConstOp struct { } func (p *selGEInt32Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -54234,12 +52185,6 @@ type selGEInt32Float64Op struct { } func (p *selGEInt32Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -54254,7 +52199,7 @@ func (p *selGEInt32Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -54436,12 +52381,6 @@ type selGEInt32DecimalConstOp struct { } func (p *selGEInt32DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -54467,9 +52406,9 @@ func (p *selGEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -54496,9 +52435,9 @@ func (p *selGEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -54521,9 +52460,9 @@ func (p *selGEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -54547,9 +52486,9 @@ func (p *selGEInt32DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -54574,12 +52513,6 @@ type selGEInt32DecimalOp struct { } func (p *selGEInt32DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -54594,7 +52527,7 @@ func (p *selGEInt32DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -54609,9 +52542,9 @@ func (p *selGEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -54641,9 +52574,9 @@ func (p *selGEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -54667,9 +52600,9 @@ func (p *selGEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -54696,9 +52629,9 @@ func (p *selGEInt32DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -54724,12 +52657,6 @@ type selGEInt64Int16ConstOp struct { } func (p *selGEInt64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -54882,12 +52809,6 @@ type selGEInt64Int16Op struct { } func (p *selGEInt64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -54902,7 +52823,7 @@ func (p *selGEInt64Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -55052,12 +52973,6 @@ type selGEInt64Int32ConstOp struct { } func (p *selGEInt64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -55210,12 +53125,6 @@ type selGEInt64Int32Op struct { } func (p *selGEInt64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -55230,7 +53139,7 @@ func (p *selGEInt64Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -55380,12 +53289,6 @@ type selGEInt64Int64ConstOp struct { } func (p *selGEInt64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -55538,12 +53441,6 @@ type selGEInt64Int64Op struct { } func (p *selGEInt64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -55558,7 +53455,7 @@ func (p *selGEInt64Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -55708,12 +53605,6 @@ type selGEInt64Float64ConstOp struct { } func (p *selGEInt64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -55898,12 +53789,6 @@ type selGEInt64Float64Op struct { } func (p *selGEInt64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -55918,7 +53803,7 @@ func (p *selGEInt64Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -56100,12 +53985,6 @@ type selGEInt64DecimalConstOp struct { } func (p *selGEInt64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -56131,9 +54010,9 @@ func (p *selGEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -56160,9 +54039,9 @@ func (p *selGEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -56185,9 +54064,9 @@ func (p *selGEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -56211,9 +54090,9 @@ func (p *selGEInt64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg)) - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -56238,12 +54117,6 @@ type selGEInt64DecimalOp struct { } func (p *selGEInt64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -56258,7 +54131,7 @@ func (p *selGEInt64DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -56273,9 +54146,9 @@ func (p *selGEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -56305,9 +54178,9 @@ func (p *selGEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -56331,9 +54204,9 @@ func (p *selGEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -56360,9 +54233,9 @@ func (p *selGEInt64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64(arg1)) - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -56388,12 +54261,6 @@ type selGEFloat64Int16ConstOp struct { } func (p *selGEFloat64Int16ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -56578,12 +54445,6 @@ type selGEFloat64Int16Op struct { } func (p *selGEFloat64Int16Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -56598,7 +54459,7 @@ func (p *selGEFloat64Int16Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -56780,12 +54641,6 @@ type selGEFloat64Int32ConstOp struct { } func (p *selGEFloat64Int32ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -56970,12 +54825,6 @@ type selGEFloat64Int32Op struct { } func (p *selGEFloat64Int32Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -56990,7 +54839,7 @@ func (p *selGEFloat64Int32Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -57172,12 +55021,6 @@ type selGEFloat64Int64ConstOp struct { } func (p *selGEFloat64Int64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -57362,12 +55205,6 @@ type selGEFloat64Int64Op struct { } func (p *selGEFloat64Int64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -57382,7 +55219,7 @@ func (p *selGEFloat64Int64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -57564,12 +55401,6 @@ type selGEFloat64Float64ConstOp struct { } func (p *selGEFloat64Float64ConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -57754,12 +55585,6 @@ type selGEFloat64Float64Op struct { } func (p *selGEFloat64Float64Op) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -57774,7 +55599,7 @@ func (p *selGEFloat64Float64Op) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -57956,12 +55781,6 @@ type selGEFloat64DecimalConstOp struct { } func (p *selGEFloat64DecimalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -57987,11 +55806,11 @@ func (p *selGEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -58018,11 +55837,11 @@ func (p *selGEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -58045,11 +55864,11 @@ func (p *selGEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -58073,11 +55892,11 @@ func (p *selGEFloat64DecimalConstOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &p.constArg) + cmpResult = tree.CompareDecimals(&tmpDec, &p.constArg) } cmp = cmpResult >= 0 @@ -58102,12 +55921,6 @@ type selGEFloat64DecimalOp struct { } func (p *selGEFloat64DecimalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -58122,7 +55935,7 @@ func (p *selGEFloat64DecimalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -58137,11 +55950,11 @@ func (p *selGEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -58171,11 +55984,11 @@ func (p *selGEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -58199,11 +56012,11 @@ func (p *selGEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -58230,11 +56043,11 @@ func (p *selGEFloat64DecimalOp) Next() coldata.Batch { var cmpResult int { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64(arg1)); err != nil { colexecerror.ExpectedError(err) } - cmpResult = tree.CompareDecimals(tmpDec, &arg2) + cmpResult = tree.CompareDecimals(&tmpDec, &arg2) } cmp = cmpResult >= 0 @@ -58260,12 +56073,6 @@ type selGETimestampTimestampConstOp struct { } func (p *selGETimestampTimestampConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -58402,12 +56209,6 @@ type selGETimestampTimestampOp struct { } func (p *selGETimestampTimestampOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -58422,7 +56223,7 @@ func (p *selGETimestampTimestampOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -58556,12 +56357,6 @@ type selGEIntervalIntervalConstOp struct { } func (p *selGEIntervalIntervalConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -58670,12 +56465,6 @@ type selGEIntervalIntervalOp struct { } func (p *selGEIntervalIntervalOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -58690,7 +56479,7 @@ func (p *selGEIntervalIntervalOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -58796,12 +56585,6 @@ type selGEJSONJSONConstOp struct { } func (p *selGEJSONJSONConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -58932,12 +56715,6 @@ type selGEJSONJSONOp struct { } func (p *selGEJSONJSONOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -58952,7 +56729,7 @@ func (p *selGEJSONJSONOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { @@ -59078,12 +56855,6 @@ type selGEDatumDatumConstOp struct { } func (p *selGEDatumDatumConstOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -59198,12 +56969,6 @@ type selGEDatumDatumOp struct { } func (p *selGEDatumDatumOp) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -59218,7 +56983,7 @@ func (p *selGEDatumDatumOp) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) if sel := batch.Selection(); sel != nil { sel = sel[:n] for _, i := range sel { diff --git a/pkg/sql/colexec/colexecsel/selection_ops_tmpl.go b/pkg/sql/colexec/colexecsel/selection_ops_tmpl.go index 0e8e5d5839d7..c0a4c4922c17 100644 --- a/pkg/sql/colexec/colexecsel/selection_ops_tmpl.go +++ b/pkg/sql/colexec/colexecsel/selection_ops_tmpl.go @@ -22,13 +22,12 @@ package colexecsel import ( - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colconv" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexeccmp" - "github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -176,16 +175,14 @@ func _SEL_LOOP(_HAS_NULLS bool) { // */}} // constant, except for the constant itself. type selConstOpBase struct { colexecop.OneInputHelper - colIdx int - overloadHelper execgen.OverloadHelper + colIdx int } // selOpBase contains all of the fields for non-constant binary selections. type selOpBase struct { colexecop.OneInputHelper - col1Idx int - col2Idx int - overloadHelper execgen.OverloadHelper + col1Idx int + col2Idx int } // {{define "selConstOp"}} @@ -195,12 +192,6 @@ type _OP_CONST_NAME struct { } func (p *_OP_CONST_NAME) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -232,12 +223,6 @@ type _OP_NAME struct { } func (p *_OP_NAME) Next() coldata.Batch { - // In order to inline the templated code of overloads, we need to have a - // `_overloadHelper` local variable of type `execgen.OverloadHelper`. - _overloadHelper := p.overloadHelper - // However, the scratch is not used in all of the selection operators, so - // we add this to go around "unused" error. - _ = _overloadHelper for { batch := p.Input.Next() if batch.Length() == 0 { @@ -252,7 +237,7 @@ func (p *_OP_NAME) Next() coldata.Batch { var idx int if vec1.MaybeHasNulls() || vec2.MaybeHasNulls() { - nulls := vec1.Nulls().Or(vec2.Nulls()) + nulls := vec1.Nulls().Or(*vec2.Nulls()) _SEL_LOOP(true) } else { _SEL_LOOP(false) diff --git a/pkg/sql/colexec/colexecspan/BUILD.bazel b/pkg/sql/colexec/colexecspan/BUILD.bazel index a3ea026fe7e9..75b4549f290d 100644 --- a/pkg/sql/colexec/colexecspan/BUILD.bazel +++ b/pkg/sql/colexec/colexecspan/BUILD.bazel @@ -20,12 +20,13 @@ go_library( "//pkg/sql/colmem", # keep "//pkg/sql/execinfra", # keep "//pkg/sql/rowenc", # keep + "//pkg/sql/rowenc/keyside", # keep "//pkg/sql/sem/tree", # keep "//pkg/sql/types", # keep "//pkg/util", # keep "//pkg/util/duration", # keep "//pkg/util/encoding", # keep - "@com_github_cockroachdb_apd_v2//:apd", # keep + "@com_github_cockroachdb_apd_v3//:apd", # keep "@com_github_cockroachdb_errors//:errors", # keep ], ) @@ -38,6 +39,7 @@ go_test( "span_assembler_test.go", ], embed = [":colexecspan"], # keep + tags = ["no-remote"], deps = [ "//pkg/col/coldata", "//pkg/col/coldataext", diff --git a/pkg/sql/colexec/colexecspan/span_assembler.eg.go b/pkg/sql/colexec/colexecspan/span_assembler.eg.go index 4eaafdfc63b8..932bda356786 100644 --- a/pkg/sql/colexec/colexecspan/span_assembler.eg.go +++ b/pkg/sql/colexec/colexecspan/span_assembler.eg.go @@ -42,7 +42,7 @@ func NewColSpanAssembler( ) ColSpanAssembler { base := spanAssemblerPool.Get().(*spanAssemblerBase) base.colFamStartKeys, base.colFamEndKeys = getColFamilyEncodings(neededColOrdsInWholeTable, table, index) - keyPrefix := rowenc.MakeIndexKeyPrefix(codec, table, index.GetID()) + keyPrefix := rowenc.MakeIndexKeyPrefix(codec, table.GetID(), index.GetID()) base.scratchKey = append(base.scratchKey[:0], keyPrefix...) base.prefixLength = len(keyPrefix) base.allocator = allocator diff --git a/pkg/sql/colexec/colexecspan/span_assembler_tmpl.go b/pkg/sql/colexec/colexecspan/span_assembler_tmpl.go index bb3251e62cb2..cccdd5e0c0bc 100644 --- a/pkg/sql/colexec/colexecspan/span_assembler_tmpl.go +++ b/pkg/sql/colexec/colexecspan/span_assembler_tmpl.go @@ -54,7 +54,7 @@ func NewColSpanAssembler( ) ColSpanAssembler { base := spanAssemblerPool.Get().(*spanAssemblerBase) base.colFamStartKeys, base.colFamEndKeys = getColFamilyEncodings(neededColOrdsInWholeTable, table, index) - keyPrefix := rowenc.MakeIndexKeyPrefix(codec, table, index.GetID()) + keyPrefix := rowenc.MakeIndexKeyPrefix(codec, table.GetID(), index.GetID()) base.scratchKey = append(base.scratchKey[:0], keyPrefix...) base.prefixLength = len(keyPrefix) base.allocator = allocator diff --git a/pkg/sql/colexec/colexecspan/span_encoder.eg.go b/pkg/sql/colexec/colexecspan/span_encoder.eg.go index 0f88dc9c8279..2c40ec37adb8 100644 --- a/pkg/sql/colexec/colexecspan/span_encoder.eg.go +++ b/pkg/sql/colexec/colexecspan/span_encoder.eg.go @@ -14,7 +14,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colmem" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -24,7 +24,7 @@ import ( // Workaround for bazel auto-generated code. goimports does not automatically // pick up the right packages when run within the bazel sandbox. var ( - _ = rowenc.EncodeTableKey + _ = keyside.Encode _ tree.Datum ) @@ -963,7 +963,7 @@ func (op *spanEncoderDatumAsc) next(batch coldata.Batch, startIdx, endIdx int) * val := col.Get(i) var err error - op.scratch, err = rowenc.EncodeTableKey(op.scratch, val.(tree.Datum), encoding.Ascending) + op.scratch, err = keyside.Encode(op.scratch, val.(tree.Datum), encoding.Ascending) if err != nil { colexecerror.ExpectedError(err) } @@ -978,7 +978,7 @@ func (op *spanEncoderDatumAsc) next(batch coldata.Batch, startIdx, endIdx int) * val := col.Get(i) var err error - op.scratch, err = rowenc.EncodeTableKey(op.scratch, val.(tree.Datum), encoding.Ascending) + op.scratch, err = keyside.Encode(op.scratch, val.(tree.Datum), encoding.Ascending) if err != nil { colexecerror.ExpectedError(err) } @@ -1001,7 +1001,7 @@ func (op *spanEncoderDatumAsc) next(batch coldata.Batch, startIdx, endIdx int) * val := col.Get(i) var err error - op.scratch, err = rowenc.EncodeTableKey(op.scratch, val.(tree.Datum), encoding.Ascending) + op.scratch, err = keyside.Encode(op.scratch, val.(tree.Datum), encoding.Ascending) if err != nil { colexecerror.ExpectedError(err) } @@ -1016,7 +1016,7 @@ func (op *spanEncoderDatumAsc) next(batch coldata.Batch, startIdx, endIdx int) * val := col.Get(i) var err error - op.scratch, err = rowenc.EncodeTableKey(op.scratch, val.(tree.Datum), encoding.Ascending) + op.scratch, err = keyside.Encode(op.scratch, val.(tree.Datum), encoding.Ascending) if err != nil { colexecerror.ExpectedError(err) } @@ -1815,7 +1815,7 @@ func (op *spanEncoderDatumDesc) next(batch coldata.Batch, startIdx, endIdx int) val := col.Get(i) var err error - op.scratch, err = rowenc.EncodeTableKey(op.scratch, val.(tree.Datum), encoding.Descending) + op.scratch, err = keyside.Encode(op.scratch, val.(tree.Datum), encoding.Descending) if err != nil { colexecerror.ExpectedError(err) } @@ -1830,7 +1830,7 @@ func (op *spanEncoderDatumDesc) next(batch coldata.Batch, startIdx, endIdx int) val := col.Get(i) var err error - op.scratch, err = rowenc.EncodeTableKey(op.scratch, val.(tree.Datum), encoding.Descending) + op.scratch, err = keyside.Encode(op.scratch, val.(tree.Datum), encoding.Descending) if err != nil { colexecerror.ExpectedError(err) } @@ -1853,7 +1853,7 @@ func (op *spanEncoderDatumDesc) next(batch coldata.Batch, startIdx, endIdx int) val := col.Get(i) var err error - op.scratch, err = rowenc.EncodeTableKey(op.scratch, val.(tree.Datum), encoding.Descending) + op.scratch, err = keyside.Encode(op.scratch, val.(tree.Datum), encoding.Descending) if err != nil { colexecerror.ExpectedError(err) } @@ -1868,7 +1868,7 @@ func (op *spanEncoderDatumDesc) next(batch coldata.Batch, startIdx, endIdx int) val := col.Get(i) var err error - op.scratch, err = rowenc.EncodeTableKey(op.scratch, val.(tree.Datum), encoding.Descending) + op.scratch, err = keyside.Encode(op.scratch, val.(tree.Datum), encoding.Descending) if err != nil { colexecerror.ExpectedError(err) } diff --git a/pkg/sql/colexec/colexecspan/span_encoder_tmpl.go b/pkg/sql/colexec/colexecspan/span_encoder_tmpl.go index a15af608788d..2b88ebabf110 100644 --- a/pkg/sql/colexec/colexecspan/span_encoder_tmpl.go +++ b/pkg/sql/colexec/colexecspan/span_encoder_tmpl.go @@ -26,7 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colmem" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -36,7 +36,7 @@ import ( // Workaround for bazel auto-generated code. goimports does not automatically // pick up the right packages when run within the bazel sandbox. var ( - _ = rowenc.EncodeTableKey + _ = keyside.Encode _ tree.Datum ) diff --git a/pkg/sql/colexec/colexectestutils/BUILD.bazel b/pkg/sql/colexec/colexectestutils/BUILD.bazel index cfc55bf60278..7d0bdd640134 100644 --- a/pkg/sql/colexec/colexectestutils/BUILD.bazel +++ b/pkg/sql/colexec/colexectestutils/BUILD.bazel @@ -13,7 +13,6 @@ go_library( "//pkg/col/coldata", "//pkg/col/typeconv", "//pkg/settings/cluster", - "//pkg/sql/catalog/descpb", "//pkg/sql/colexec/colexecargs", "//pkg/sql/colexecerror", "//pkg/sql/colexecop", @@ -22,12 +21,11 @@ go_library( "//pkg/sql/execinfrapb", "//pkg/sql/parser", "//pkg/sql/randgen", - "//pkg/sql/rowenc", + "//pkg/sql/rowenc/valueside", "//pkg/sql/rowexec", "//pkg/sql/sem/tree", "//pkg/sql/types", "//pkg/util/duration", - "//pkg/util/encoding", "//pkg/util/envutil", "//pkg/util/json", "//pkg/util/log", @@ -35,7 +33,7 @@ go_library( "//pkg/util/randutil", "//pkg/util/timeofday", "//pkg/util/timeutil", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_pmezard_go_difflib//difflib", "@com_github_stretchr_testify//assert", diff --git a/pkg/sql/colexec/colexectestutils/utils.go b/pkg/sql/colexec/colexectestutils/utils.go index 665bf2e29e9b..b0db336ca2ec 100644 --- a/pkg/sql/colexec/colexectestutils/utils.go +++ b/pkg/sql/colexec/colexectestutils/utils.go @@ -22,11 +22,10 @@ import ( "testing/quick" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" @@ -34,11 +33,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/randgen" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/duration" - "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/json" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -684,8 +682,8 @@ func RunTestsWithFn( if typs != nil { inputTypes = typs[i] } - rng, _ := randutil.NewTestRand() - inputSources[i] = newOpTestSelInput(allocator, rng, batchSize, tup, inputTypes) + inputSources[i] = newOpTestSelInput(allocator, batchSize, tup, inputTypes) + inputSources[i].(*opTestInput).batchLengthRandomizationEnabled = true } } else { for i, tup := range tups { @@ -693,6 +691,7 @@ func RunTestsWithFn( inputTypes = typs[i] } inputSources[i] = NewOpTestInput(allocator, batchSize, tup, inputTypes) + inputSources[i].(*opTestInput).batchLengthRandomizationEnabled = true } } test(t, inputSources) @@ -837,8 +836,9 @@ type opTestInput struct { typs []*types.T - batchSize int - tuples Tuples + batchSize int + batchLengthRandomizationEnabled bool + tuples Tuples // initialTuples are tuples passed in into the constructor, and we keep the // reference to them in order to be able to reset the operator. initialTuples Tuples @@ -877,12 +877,11 @@ func NewOpTestInput( } func newOpTestSelInput( - allocator *colmem.Allocator, rng *rand.Rand, batchSize int, tuples Tuples, typs []*types.T, + allocator *colmem.Allocator, batchSize int, tuples Tuples, typs []*types.T, ) *opTestInput { ret := &opTestInput{ allocator: allocator, useSel: true, - rng: rng, batchSize: batchSize, tuples: tuples, initialTuples: tuples, @@ -900,7 +899,7 @@ func (s *opTestInput) Init(context.Context) { s.typs = extrapolateTypesFromTuples(s.tuples) } s.batch = s.allocator.NewMemBatchWithMaxCapacity(s.typs) - + s.rng, _ = randutil.NewTestRand() s.selection = make([]int, coldata.BatchSize()) for i := range s.selection { s.selection[i] = i @@ -916,6 +915,13 @@ func (s *opTestInput) Next() coldata.Batch { if len(s.tuples) < batchSize { batchSize = len(s.tuples) } + if s.batchLengthRandomizationEnabled { + // With 50% probability for this particular batch use a random length in + // range [1, batchSize]. + if s.rng.Float64() < 0.5 { + batchSize = s.rng.Intn(batchSize) + 1 + } + } tups := s.tuples[:batchSize] s.tuples = s.tuples[batchSize:] @@ -1655,7 +1661,7 @@ func (c *chunkingBatchSource) Init(context.Context) { c.batch = c.allocator.NewMemBatchWithMaxCapacity(c.typs) for i := range c.cols { c.batch.ColVec(i).SetCol(c.cols[i].Col()) - c.batch.ColVec(i).SetNulls(c.cols[i].Nulls()) + c.batch.ColVec(i).SetNulls(*c.cols[i].Nulls()) } } @@ -1677,8 +1683,7 @@ func (c *chunkingBatchSource) Next() coldata.Batch { // responsible for updating those, so we iterate only up to len(c.typs) // as per out initialization. c.batch.ColVec(i).SetCol(c.cols[i].Window(c.curIdx, lastIdx).Col()) - nullsSlice := c.cols[i].Nulls().Slice(c.curIdx, lastIdx) - c.batch.ColVec(i).SetNulls(&nullsSlice) + c.batch.ColVec(i).SetNulls(c.cols[i].Nulls().Slice(c.curIdx, lastIdx)) } c.batch.SetLength(lastIdx - c.curIdx) c.curIdx = lastIdx @@ -1762,8 +1767,8 @@ func MakeRandWindowFrameRangeOffset(t *testing.T, rng *rand.Rand, typ *types.T) // use in testing window functions in RANGE mode with offsets. func EncodeWindowFrameOffset(t *testing.T, offset tree.Datum) []byte { var encoded, scratch []byte - encoded, err := rowenc.EncodeTableValue( - encoded, descpb.ColumnID(encoding.NoColumnID), offset, scratch) + encoded, err := valueside.Encode( + encoded, valueside.NoColumnID, offset, scratch) require.NoError(t, err) return encoded } diff --git a/pkg/sql/colexec/colexecutils/BUILD.bazel b/pkg/sql/colexec/colexecutils/BUILD.bazel index 2e6990b74e14..9fe4edc20e45 100644 --- a/pkg/sql/colexec/colexecutils/BUILD.bazel +++ b/pkg/sql/colexec/colexecutils/BUILD.bazel @@ -42,6 +42,7 @@ go_test( "spilling_queue_test.go", ], embed = [":colexecutils"], + tags = ["no-remote"], deps = [ "//pkg/col/coldata", "//pkg/col/coldataext", diff --git a/pkg/sql/colexec/colexecwindow/BUILD.bazel b/pkg/sql/colexec/colexecwindow/BUILD.bazel index c50385e8cdb9..051aace60a27 100644 --- a/pkg/sql/colexec/colexecwindow/BUILD.bazel +++ b/pkg/sql/colexec/colexecwindow/BUILD.bazel @@ -37,7 +37,7 @@ go_library( "//pkg/util/json", # keep "//pkg/util/mon", # keep "//pkg/util/timeutil/pgdate", # keep - "@com_github_cockroachdb_apd_v2//:apd", # keep + "@com_github_cockroachdb_apd_v3//:apd", # keep "@com_github_cockroachdb_errors//:errors", # keep "@com_github_marusama_semaphore//:semaphore", # keep ], @@ -54,6 +54,7 @@ go_test( "window_functions_test.go", ], embed = [":colexecwindow"], + tags = ["no-remote"], deps = [ "//pkg/col/coldata", "//pkg/col/coldataext", @@ -81,7 +82,7 @@ go_test( "//pkg/util/log", "//pkg/util/mon", "//pkg/util/randutil", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_marusama_semaphore//:semaphore", "@com_github_stretchr_testify//require", diff --git a/pkg/sql/colexec/colexecwindow/min_max_removable_agg.eg.go b/pkg/sql/colexec/colexecwindow/min_max_removable_agg.eg.go index e2415fb674b2..59a9a155e4f1 100644 --- a/pkg/sql/colexec/colexecwindow/min_max_removable_agg.eg.go +++ b/pkg/sql/colexec/colexecwindow/min_max_removable_agg.eg.go @@ -15,7 +15,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecwindow/min_max_removable_agg_tmpl.go b/pkg/sql/colexec/colexecwindow/min_max_removable_agg_tmpl.go index 4f466289b141..36dd5e2442d9 100644 --- a/pkg/sql/colexec/colexecwindow/min_max_removable_agg_tmpl.go +++ b/pkg/sql/colexec/colexecwindow/min_max_removable_agg_tmpl.go @@ -24,7 +24,7 @@ package colexecwindow import ( "context" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/colexecwindow/range_offset_handler.eg.go b/pkg/sql/colexec/colexecwindow/range_offset_handler.eg.go index 746c05b10a83..c2be2770cf7a 100644 --- a/pkg/sql/colexec/colexecwindow/range_offset_handler.eg.go +++ b/pkg/sql/colexec/colexecwindow/range_offset_handler.eg.go @@ -14,7 +14,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" @@ -24,7 +24,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/duration" @@ -59,7 +58,7 @@ type rangeOffsetHandler interface { func newRangeOffsetHandler( evalCtx *tree.EvalContext, - datumAlloc *rowenc.DatumAlloc, + datumAlloc *tree.DatumAlloc, bound *execinfrapb.WindowerSpec_Frame_Bound, ordColType *types.T, ordColAsc, isStart bool, @@ -77,26 +76,17 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingStartAscInt16{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int16), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case 32: op := &rangeHandlerOffsetPrecedingStartAscInt32{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int32), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case -1: default: op := &rangeHandlerOffsetPrecedingStartAscInt64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int64), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DecimalFamily: @@ -106,9 +96,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingStartAscDecimal{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(apd.Decimal), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.FloatFamily: @@ -118,9 +105,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingStartAscFloat64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(float64), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.IntervalFamily: @@ -130,9 +114,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingStartAscInterval{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DateFamily: @@ -142,9 +123,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingStartAscDate{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimestampTZFamily, types.TimestampFamily: @@ -154,9 +132,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingStartAscTimestamp{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimeTZFamily, types.TimeFamily: @@ -168,7 +143,7 @@ func newRangeOffsetHandler( } _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} + op.overloadHelper = execgen.BinaryOverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } } @@ -180,26 +155,17 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingStartDescInt16{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int16), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case 32: op := &rangeHandlerOffsetPrecedingStartDescInt32{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int32), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case -1: default: op := &rangeHandlerOffsetPrecedingStartDescInt64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int64), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DecimalFamily: @@ -209,9 +175,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingStartDescDecimal{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(apd.Decimal), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.FloatFamily: @@ -221,9 +184,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingStartDescFloat64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(float64), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.IntervalFamily: @@ -233,9 +193,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingStartDescInterval{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DateFamily: @@ -245,9 +202,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingStartDescDate{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimestampTZFamily, types.TimestampFamily: @@ -257,9 +211,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingStartDescTimestamp{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimeTZFamily, types.TimeFamily: @@ -271,7 +222,7 @@ func newRangeOffsetHandler( } binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} + op.overloadHelper = execgen.BinaryOverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } } @@ -286,26 +237,17 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingEndAscInt16{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int16), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case 32: op := &rangeHandlerOffsetPrecedingEndAscInt32{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int32), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case -1: default: op := &rangeHandlerOffsetPrecedingEndAscInt64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int64), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DecimalFamily: @@ -315,9 +257,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingEndAscDecimal{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(apd.Decimal), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.FloatFamily: @@ -327,9 +266,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingEndAscFloat64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(float64), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.IntervalFamily: @@ -339,9 +275,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingEndAscInterval{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DateFamily: @@ -351,9 +284,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingEndAscDate{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimestampTZFamily, types.TimestampFamily: @@ -363,9 +293,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingEndAscTimestamp{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimeTZFamily, types.TimeFamily: @@ -377,7 +304,7 @@ func newRangeOffsetHandler( } _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} + op.overloadHelper = execgen.BinaryOverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } } @@ -389,26 +316,17 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingEndDescInt16{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int16), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case 32: op := &rangeHandlerOffsetPrecedingEndDescInt32{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int32), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case -1: default: op := &rangeHandlerOffsetPrecedingEndDescInt64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int64), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DecimalFamily: @@ -418,9 +336,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingEndDescDecimal{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(apd.Decimal), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.FloatFamily: @@ -430,9 +345,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingEndDescFloat64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(float64), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.IntervalFamily: @@ -442,9 +354,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingEndDescInterval{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DateFamily: @@ -454,9 +363,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingEndDescDate{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimestampTZFamily, types.TimestampFamily: @@ -466,9 +372,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetPrecedingEndDescTimestamp{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimeTZFamily, types.TimeFamily: @@ -480,7 +383,7 @@ func newRangeOffsetHandler( } binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} + op.overloadHelper = execgen.BinaryOverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } } @@ -498,26 +401,17 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingStartAscInt16{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int16), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case 32: op := &rangeHandlerOffsetFollowingStartAscInt32{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int32), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case -1: default: op := &rangeHandlerOffsetFollowingStartAscInt64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int64), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DecimalFamily: @@ -527,9 +421,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingStartAscDecimal{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(apd.Decimal), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.FloatFamily: @@ -539,9 +430,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingStartAscFloat64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(float64), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.IntervalFamily: @@ -551,9 +439,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingStartAscInterval{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DateFamily: @@ -563,9 +448,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingStartAscDate{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimestampTZFamily, types.TimestampFamily: @@ -575,9 +457,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingStartAscTimestamp{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimeTZFamily, types.TimeFamily: @@ -589,7 +468,7 @@ func newRangeOffsetHandler( } binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} + op.overloadHelper = execgen.BinaryOverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } } @@ -601,26 +480,17 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingStartDescInt16{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int16), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case 32: op := &rangeHandlerOffsetFollowingStartDescInt32{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int32), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case -1: default: op := &rangeHandlerOffsetFollowingStartDescInt64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int64), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DecimalFamily: @@ -630,9 +500,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingStartDescDecimal{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(apd.Decimal), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.FloatFamily: @@ -642,9 +509,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingStartDescFloat64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(float64), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.IntervalFamily: @@ -654,9 +518,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingStartDescInterval{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DateFamily: @@ -666,9 +527,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingStartDescDate{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimestampTZFamily, types.TimestampFamily: @@ -678,9 +536,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingStartDescTimestamp{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimeTZFamily, types.TimeFamily: @@ -692,7 +547,7 @@ func newRangeOffsetHandler( } _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} + op.overloadHelper = execgen.BinaryOverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } } @@ -707,26 +562,17 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingEndAscInt16{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int16), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case 32: op := &rangeHandlerOffsetFollowingEndAscInt32{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int32), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case -1: default: op := &rangeHandlerOffsetFollowingEndAscInt64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int64), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DecimalFamily: @@ -736,9 +582,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingEndAscDecimal{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(apd.Decimal), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.FloatFamily: @@ -748,9 +591,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingEndAscFloat64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(float64), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.IntervalFamily: @@ -760,9 +600,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingEndAscInterval{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DateFamily: @@ -772,9 +609,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingEndAscDate{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimestampTZFamily, types.TimestampFamily: @@ -784,9 +618,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingEndAscTimestamp{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimeTZFamily, types.TimeFamily: @@ -798,7 +629,7 @@ func newRangeOffsetHandler( } binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} + op.overloadHelper = execgen.BinaryOverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } } @@ -810,26 +641,17 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingEndDescInt16{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int16), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case 32: op := &rangeHandlerOffsetFollowingEndDescInt32{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int32), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op case -1: default: op := &rangeHandlerOffsetFollowingEndDescInt64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(int64), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DecimalFamily: @@ -839,9 +661,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingEndDescDecimal{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(apd.Decimal), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.FloatFamily: @@ -851,9 +670,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingEndDescFloat64{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(float64), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.IntervalFamily: @@ -863,9 +679,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingEndDescInterval{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.DateFamily: @@ -875,9 +688,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingEndDescDate{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimestampTZFamily, types.TimestampFamily: @@ -887,9 +697,6 @@ func newRangeOffsetHandler( op := &rangeHandlerOffsetFollowingEndDescTimestamp{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(duration.Duration), } - _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( - ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } case types.TimeTZFamily, types.TimeFamily: @@ -901,7 +708,7 @@ func newRangeOffsetHandler( } _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( ordColType, getOffsetType(ordColType)) - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} + op.overloadHelper = execgen.BinaryOverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} return op } } @@ -915,10 +722,9 @@ func newRangeOffsetHandler( // rangeOffsetHandlerBase extracts common fields and methods of the // rangeOffsetHandler utility operators. type rangeOffsetHandlerBase struct { - storedCols *colexecutils.SpillingBuffer - ordColIdx int - peersColIdx int - overloadHelper execgen.OverloadHelper + storedCols *colexecutils.SpillingBuffer + ordColIdx int + peersColIdx int } // rangeHandlerOffsetPrecedingStartAscInt16 is a utility operator used to retrieve the location of @@ -949,11 +755,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscInt16{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartAscInt16) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -1098,11 +899,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscInt32{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartAscInt32) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -1247,11 +1043,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscInt64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartAscInt64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -1396,11 +1187,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscDecimal{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartAscDecimal) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -1534,11 +1320,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscFloat64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartAscFloat64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -1688,11 +1469,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscInterval{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartAscInterval) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -1819,11 +1595,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscDate{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartAscDate) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -1980,11 +1751,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscTimestamp{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartAscTimestamp) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -2099,7 +1865,8 @@ func (h *rangeHandlerOffsetPrecedingStartAscTimestamp) close() { // the start or end bound for each row when in RANGE mode with an offset. type rangeHandlerOffsetPrecedingStartAscDatum struct { rangeOffsetHandlerBase - offset tree.Datum + overloadHelper execgen.BinaryOverloadHelper + offset tree.Datum } var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscDatum{} @@ -2123,11 +1890,11 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartAscDatum{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartAscDatum) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. This is necessary when dealing with Time + // and TimeTZ columns since they aren't yet handled natively. _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -2261,11 +2028,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescInt16{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartDescInt16) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -2383,11 +2145,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescInt32{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartDescInt32) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -2505,11 +2262,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescInt64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartDescInt64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -2627,11 +2379,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescDecimal{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartDescDecimal) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -2738,11 +2485,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescFloat64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartDescFloat64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -2865,11 +2607,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescInterval{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartDescInterval) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -2969,11 +2706,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescDate{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartDescDate) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -3103,11 +2835,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescTimestamp{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartDescTimestamp) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -3195,7 +2922,8 @@ func (h *rangeHandlerOffsetPrecedingStartDescTimestamp) close() { // the start or end bound for each row when in RANGE mode with an offset. type rangeHandlerOffsetPrecedingStartDescDatum struct { rangeOffsetHandlerBase - offset tree.Datum + overloadHelper execgen.BinaryOverloadHelper + offset tree.Datum } var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescDatum{} @@ -3219,11 +2947,11 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingStartDescDatum{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingStartDescDatum) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. This is necessary when dealing with Time + // and TimeTZ columns since they aren't yet handled natively. _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -3330,11 +3058,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscInt16{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndAscInt16) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -3496,11 +3219,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscInt32{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndAscInt32) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -3662,11 +3380,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscInt64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndAscInt64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -3828,11 +3541,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscDecimal{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndAscDecimal) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -3983,11 +3691,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscFloat64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndAscFloat64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -4154,11 +3857,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscInterval{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndAscInterval) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -4302,11 +4000,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscDate{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndAscDate) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -4480,11 +4173,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscTimestamp{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndAscTimestamp) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -4616,7 +4304,8 @@ func (h *rangeHandlerOffsetPrecedingEndAscTimestamp) close() { // the start or end bound for each row when in RANGE mode with an offset. type rangeHandlerOffsetPrecedingEndAscDatum struct { rangeOffsetHandlerBase - offset tree.Datum + overloadHelper execgen.BinaryOverloadHelper + offset tree.Datum } var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscDatum{} @@ -4640,11 +4329,11 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndAscDatum{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndAscDatum) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. This is necessary when dealing with Time + // and TimeTZ columns since they aren't yet handled natively. _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -4795,11 +4484,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescInt16{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndDescInt16) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -4934,11 +4618,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescInt32{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndDescInt32) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -5073,11 +4752,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescInt64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndDescInt64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -5212,11 +4886,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescDecimal{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndDescDecimal) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -5340,11 +5009,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescFloat64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndDescFloat64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -5484,11 +5148,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescInterval{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndDescInterval) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -5605,11 +5264,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescDate{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndDescDate) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -5756,11 +5410,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescTimestamp{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndDescTimestamp) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -5865,7 +5514,8 @@ func (h *rangeHandlerOffsetPrecedingEndDescTimestamp) close() { // the start or end bound for each row when in RANGE mode with an offset. type rangeHandlerOffsetPrecedingEndDescDatum struct { rangeOffsetHandlerBase - offset tree.Datum + overloadHelper execgen.BinaryOverloadHelper + offset tree.Datum } var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescDatum{} @@ -5889,11 +5539,11 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetPrecedingEndDescDatum{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetPrecedingEndDescDatum) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. This is necessary when dealing with Time + // and TimeTZ columns since they aren't yet handled natively. _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -6017,11 +5667,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscInt16{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartAscInt16) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -6166,11 +5811,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscInt32{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartAscInt32) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -6315,11 +5955,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscInt64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartAscInt64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -6464,11 +6099,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscDecimal{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartAscDecimal) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -6602,11 +6232,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscFloat64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartAscFloat64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -6756,11 +6381,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscInterval{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartAscInterval) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -6887,11 +6507,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscDate{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartAscDate) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -7048,11 +6663,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscTimestamp{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartAscTimestamp) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -7167,7 +6777,8 @@ func (h *rangeHandlerOffsetFollowingStartAscTimestamp) close() { // the start or end bound for each row when in RANGE mode with an offset. type rangeHandlerOffsetFollowingStartAscDatum struct { rangeOffsetHandlerBase - offset tree.Datum + overloadHelper execgen.BinaryOverloadHelper + offset tree.Datum } var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscDatum{} @@ -7191,11 +6802,11 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartAscDatum{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartAscDatum) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. This is necessary when dealing with Time + // and TimeTZ columns since they aren't yet handled natively. _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -7329,11 +6940,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescInt16{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartDescInt16) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -7451,11 +7057,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescInt32{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartDescInt32) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -7573,11 +7174,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescInt64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartDescInt64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -7695,11 +7291,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescDecimal{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartDescDecimal) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -7806,11 +7397,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescFloat64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartDescFloat64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -7933,11 +7519,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescInterval{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartDescInterval) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -8037,11 +7618,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescDate{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartDescDate) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -8171,11 +7747,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescTimestamp{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartDescTimestamp) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -8263,7 +7834,8 @@ func (h *rangeHandlerOffsetFollowingStartDescTimestamp) close() { // the start or end bound for each row when in RANGE mode with an offset. type rangeHandlerOffsetFollowingStartDescDatum struct { rangeOffsetHandlerBase - offset tree.Datum + overloadHelper execgen.BinaryOverloadHelper + offset tree.Datum } var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescDatum{} @@ -8287,11 +7859,11 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingStartDescDatum{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingStartDescDatum) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. This is necessary when dealing with Time + // and TimeTZ columns since they aren't yet handled natively. _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -8398,11 +7970,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscInt16{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndAscInt16) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -8564,11 +8131,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscInt32{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndAscInt32) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -8730,11 +8292,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscInt64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndAscInt64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -8896,11 +8453,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscDecimal{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndAscDecimal) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -9051,11 +8603,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscFloat64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndAscFloat64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -9222,11 +8769,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscInterval{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndAscInterval) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -9370,11 +8912,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscDate{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndAscDate) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -9548,11 +9085,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscTimestamp{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndAscTimestamp) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -9684,7 +9216,8 @@ func (h *rangeHandlerOffsetFollowingEndAscTimestamp) close() { // the start or end bound for each row when in RANGE mode with an offset. type rangeHandlerOffsetFollowingEndAscDatum struct { rangeOffsetHandlerBase - offset tree.Datum + overloadHelper execgen.BinaryOverloadHelper + offset tree.Datum } var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscDatum{} @@ -9708,11 +9241,11 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndAscDatum{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndAscDatum) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. This is necessary when dealing with Time + // and TimeTZ columns since they aren't yet handled natively. _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -9863,11 +9396,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescInt16{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndDescInt16) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -10002,11 +9530,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescInt32{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndDescInt32) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -10141,11 +9664,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescInt64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndDescInt64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -10280,11 +9798,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescDecimal{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndDescDecimal) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -10408,11 +9921,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescFloat64{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndDescFloat64) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -10552,11 +10060,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescInterval{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndDescInterval) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -10673,11 +10176,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescDate{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndDescDate) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -10824,11 +10322,6 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescTimestamp{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndDescTimestamp) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. - _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -10933,7 +10426,8 @@ func (h *rangeHandlerOffsetFollowingEndDescTimestamp) close() { // the start or end bound for each row when in RANGE mode with an offset. type rangeHandlerOffsetFollowingEndDescDatum struct { rangeOffsetHandlerBase - offset tree.Datum + overloadHelper execgen.BinaryOverloadHelper + offset tree.Datum } var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescDatum{} @@ -10957,11 +10451,11 @@ var _ rangeOffsetHandler = &rangeHandlerOffsetFollowingEndDescDatum{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *rangeHandlerOffsetFollowingEndDescDatum) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. This is necessary when dealing with Time + // and TimeTZ columns since they aren't yet handled natively. _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. if lastIdx >= h.storedCols.Length() { return lastIdx @@ -11069,7 +10563,7 @@ func (b *rangeOffsetHandlerBase) startPartition( // decodeOffset decodes the given encoded offset into the given type. func decodeOffset( - datumAlloc *rowenc.DatumAlloc, orderColType *types.T, typedOffset []byte, + datumAlloc *tree.DatumAlloc, orderColType *types.T, typedOffset []byte, ) interface{} { offsetType := getOffsetType(orderColType) datum, err := execinfra.DecodeDatum(datumAlloc, offsetType, typedOffset) diff --git a/pkg/sql/colexec/colexecwindow/range_offset_handler_tmpl.go b/pkg/sql/colexec/colexecwindow/range_offset_handler_tmpl.go index 7789071ee4e5..1d8d31ba287c 100644 --- a/pkg/sql/colexec/colexecwindow/range_offset_handler_tmpl.go +++ b/pkg/sql/colexec/colexecwindow/range_offset_handler_tmpl.go @@ -24,7 +24,7 @@ package colexecwindow import ( "context" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" @@ -34,7 +34,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/duration" @@ -96,7 +95,7 @@ type rangeOffsetHandler interface { func newRangeOffsetHandler( evalCtx *tree.EvalContext, - datumAlloc *rowenc.DatumAlloc, + datumAlloc *tree.DatumAlloc, bound *execinfrapb.WindowerSpec_Frame_Bound, ordColType *types.T, ordColAsc, isStart bool, @@ -119,6 +118,7 @@ func newRangeOffsetHandler( op := &_OP_STRING{ offset: decodeOffset(datumAlloc, ordColType, bound.TypedOffset).(_OFFSET_GOTYPE), } + // {{if eq .VecMethod "Datum"}} // {{if .BinOpIsPlus}} binOp, _, _ := tree.WindowFrameRangeOps{}.LookupImpl( ordColType, getOffsetType(ordColType)) @@ -126,7 +126,8 @@ func newRangeOffsetHandler( _, binOp, _ := tree.WindowFrameRangeOps{}.LookupImpl( ordColType, getOffsetType(ordColType)) // {{end}} - op.overloadHelper = execgen.OverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} + op.overloadHelper = execgen.BinaryOverloadHelper{BinFn: binOp.Fn, EvalCtx: evalCtx} + // {{end}} return op // {{end}} } @@ -145,10 +146,9 @@ func newRangeOffsetHandler( // rangeOffsetHandlerBase extracts common fields and methods of the // rangeOffsetHandler utility operators. type rangeOffsetHandlerBase struct { - storedCols *colexecutils.SpillingBuffer - ordColIdx int - peersColIdx int - overloadHelper execgen.OverloadHelper + storedCols *colexecutils.SpillingBuffer + ordColIdx int + peersColIdx int } // {{range .}} @@ -161,6 +161,9 @@ type rangeOffsetHandlerBase struct { // the start or end bound for each row when in RANGE mode with an offset. type _OP_STRING struct { rangeOffsetHandlerBase + // {{if eq .VecMethod "Datum"}} + overloadHelper execgen.BinaryOverloadHelper + // {{end}} offset _OFFSET_GOTYPE } @@ -188,11 +191,13 @@ var _ rangeOffsetHandler = &_OP_STRING{} // the partition, whichever comes first. In this case, the returned index would // be '4' to indicate that the end index is the end of the partition. func (h *_OP_STRING) getIdx(ctx context.Context, currRow, lastIdx int) (idx int) { - // In order to inline the templated code of overloads, we need to have a - // "_overloadHelper" local variable of type "overloadHelper". This is - // necessary when dealing with Datum columns. + // {{if eq .VecMethod "Datum"}} + // In order to inline the templated code of the binary overloads operating + // on datums, we need to have a `_overloadHelper` local variable of type + // `execgen.BinaryOverloadHelper`. This is necessary when dealing with Time + // and TimeTZ columns since they aren't yet handled natively. _overloadHelper := h.overloadHelper - _ = _overloadHelper // Avoid unused variable warnings. + // {{end}} if lastIdx >= h.storedCols.Length() { return lastIdx @@ -360,7 +365,7 @@ func (b *rangeOffsetHandlerBase) startPartition( // decodeOffset decodes the given encoded offset into the given type. func decodeOffset( - datumAlloc *rowenc.DatumAlloc, orderColType *types.T, typedOffset []byte, + datumAlloc *tree.DatumAlloc, orderColType *types.T, typedOffset []byte, ) interface{} { offsetType := getOffsetType(orderColType) datum, err := execinfra.DecodeDatum(datumAlloc, offsetType, typedOffset) diff --git a/pkg/sql/colexec/colexecwindow/window_framer.eg.go b/pkg/sql/colexec/colexecwindow/window_framer.eg.go index b4111a700729..712c209492ec 100644 --- a/pkg/sql/colexec/colexecwindow/window_framer.eg.go +++ b/pkg/sql/colexec/colexecwindow/window_framer.eg.go @@ -15,7 +15,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" @@ -1087,7 +1086,7 @@ type windowFramerBase struct { // datumAlloc is used to decode the offsets in RANGE mode. It is initialized // lazily. - datumAlloc *rowenc.DatumAlloc + datumAlloc *tree.DatumAlloc exclusion execinfrapb.WindowerSpec_Frame_Exclusion @@ -1290,7 +1289,7 @@ func (b *windowFramerBase) handleOffsets( errors.AssertionFailedf("expected exactly one ordering column for RANGE mode with offset")) } // Only initialize the DatumAlloc field when we know we will need it. - b.datumAlloc = &rowenc.DatumAlloc{} + b.datumAlloc = &tree.DatumAlloc{} b.ordColIdx = int(ordering.Columns[0].ColIdx) ordColType := inputTypes[b.ordColIdx] ordColAsc := ordering.Columns[0].Direction == execinfrapb.Ordering_Column_ASC diff --git a/pkg/sql/colexec/colexecwindow/window_framer_tmpl.go b/pkg/sql/colexec/colexecwindow/window_framer_tmpl.go index 3db3661cf88a..fe34b599b75d 100644 --- a/pkg/sql/colexec/colexecwindow/window_framer_tmpl.go +++ b/pkg/sql/colexec/colexecwindow/window_framer_tmpl.go @@ -27,7 +27,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" "github.com/cockroachdb/cockroach/pkg/sql/colexecerror" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" @@ -196,7 +195,7 @@ type windowFramerBase struct { // datumAlloc is used to decode the offsets in RANGE mode. It is initialized // lazily. - datumAlloc *rowenc.DatumAlloc + datumAlloc *tree.DatumAlloc exclusion execinfrapb.WindowerSpec_Frame_Exclusion @@ -399,7 +398,7 @@ func (b *windowFramerBase) handleOffsets( errors.AssertionFailedf("expected exactly one ordering column for RANGE mode with offset")) } // Only initialize the DatumAlloc field when we know we will need it. - b.datumAlloc = &rowenc.DatumAlloc{} + b.datumAlloc = &tree.DatumAlloc{} b.ordColIdx = int(ordering.Columns[0].ColIdx) ordColType := inputTypes[b.ordColIdx] ordColAsc := ordering.Columns[0].Direction == execinfrapb.Ordering_Column_ASC diff --git a/pkg/sql/colexec/colexecwindow/window_functions_test.go b/pkg/sql/colexec/colexecwindow/window_functions_test.go index 01740738e09e..28e8851d7ced 100644 --- a/pkg/sql/colexec/colexecwindow/window_functions_test.go +++ b/pkg/sql/colexec/colexecwindow/window_functions_test.go @@ -15,7 +15,7 @@ import ( "fmt" "testing" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecagg" diff --git a/pkg/sql/colexec/columnarizer.go b/pkg/sql/colexec/columnarizer.go index 55c26e42eb17..1faa9a145158 100644 --- a/pkg/sql/colexec/columnarizer.go +++ b/pkg/sql/colexec/columnarizer.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/errors" @@ -56,7 +57,7 @@ type Columnarizer struct { mode columnarizerMode allocator *colmem.Allocator input execinfra.RowSource - da rowenc.DatumAlloc + da tree.DatumAlloc buffered rowenc.EncDatumRows batch coldata.Batch diff --git a/pkg/sql/colexec/distinct_test.go b/pkg/sql/colexec/distinct_test.go index 700197b03faf..dc9e46d614b0 100644 --- a/pkg/sql/colexec/distinct_test.go +++ b/pkg/sql/colexec/distinct_test.go @@ -298,6 +298,28 @@ var distinctTestCases = []distinctTestCase{ errorOnDup: "\"duplicate\" distinct nulls", noError: true, }, + { + distinctCols: []uint32{0, 1}, + typs: []*types.T{types.Int, types.Int}, + // Tuples have been carefully constructed so that all of them have the + // same hash, only one tuple among first three is inserted into the hash + // table, and the last two tuples are distinct. This is a regression + // unit test for #74795. + tuples: colexectestutils.Tuples{ + {1, 2}, + {1, 2}, + {1, 2}, + {nil, 7}, + {nil, 7}, + }, + expected: colexectestutils.Tuples{ + {1, 2}, + {nil, 7}, + {nil, 7}, + }, + isOrderedOnDistinctCols: false, + nullsAreDistinct: true, + }, } func mustParseJSON(s string) json.JSON { diff --git a/pkg/sql/colexec/execgen/BUILD.bazel b/pkg/sql/colexec/execgen/BUILD.bazel index 5c594519e4de..987042184fc9 100644 --- a/pkg/sql/colexec/execgen/BUILD.bazel +++ b/pkg/sql/colexec/execgen/BUILD.bazel @@ -5,7 +5,7 @@ go_library( srcs = [ "execgen.go", "inline.go", - "overloads_util.go", + "overloads_bin_util.go", "placeholders.go", "supported_bin_cmp_ops.go", "template.go", @@ -16,7 +16,6 @@ go_library( deps = [ "//pkg/sql/colexecerror", "//pkg/sql/sem/tree", - "@com_github_cockroachdb_apd_v2//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_dave_dst//:dst", "@com_github_dave_dst//decorator", diff --git a/pkg/sql/colexec/execgen/cmd/execgen/avg_agg_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/avg_agg_gen.go index de199167a03c..bceb2ad1d199 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/avg_agg_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/avg_agg_gen.go @@ -25,7 +25,6 @@ import ( type avgTmplInfo struct { aggTmplInfoBase - NeedsHelper bool InputVecMethod string RetGoType string RetGoTypeSlice string @@ -148,14 +147,12 @@ func genAvgAgg(inputFileContents string, wr io.Writer) error { for _, inputTypeFamily := range []types.Family{types.IntFamily, types.DecimalFamily, types.FloatFamily, types.IntervalFamily} { tmplInfo := avgAggTypeTmplInfo{TypeFamily: toString(inputTypeFamily)} for _, inputTypeWidth := range supportedWidthsByCanonicalTypeFamily[inputTypeFamily] { - needsHelper := false // Note that we don't use execinfrapb.GetAggregateInfo because we don't // want to bring in a dependency on that package to reduce the burden // of regenerating execgen code when the protobufs get generated. retTypeFamily, retTypeWidth := inputTypeFamily, inputTypeWidth if inputTypeFamily == types.IntFamily { // Average of integers is a decimal. - needsHelper = true retTypeFamily, retTypeWidth = types.DecimalFamily, anyWidth } tmplInfo.WidthOverloads = append(tmplInfo.WidthOverloads, avgAggWidthTmplInfo{ @@ -164,7 +161,6 @@ func genAvgAgg(inputFileContents string, wr io.Writer) error { aggTmplInfoBase: aggTmplInfoBase{ canonicalTypeFamily: typeconv.TypeFamilyToCanonicalTypeFamily(retTypeFamily), }, - NeedsHelper: needsHelper, InputVecMethod: toVecMethod(inputTypeFamily, inputTypeWidth), RetGoType: toPhysicalRepresentation(retTypeFamily, retTypeWidth), RetGoTypeSlice: goTypeSliceName(retTypeFamily, retTypeWidth), diff --git a/pkg/sql/colexec/execgen/cmd/execgen/cast_gen_util.go b/pkg/sql/colexec/execgen/cmd/execgen/cast_gen_util.go index 4a6395f09290..4e75dfc052b5 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/cast_gen_util.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/cast_gen_util.go @@ -128,8 +128,8 @@ func getDecimalToIntCastFunc(toIntWidth int32) castFunc { // as well. convStr := ` { - tmpDec := &_overloadHelper.TmpDec1 - _, err := tree.DecimalCtx.RoundToIntegralValue(tmpDec, &%[2]s) + var tmpDec apd.Decimal //gcassert:noescape + _, err := tree.DecimalCtx.RoundToIntegralValue(&tmpDec, &%[2]s) if err != nil { colexecerror.ExpectedError(err) } diff --git a/pkg/sql/colexec/execgen/cmd/execgen/distinct_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/distinct_gen.go index 935181d8d872..d7f570d1c1ed 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/distinct_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/distinct_gen.go @@ -41,7 +41,7 @@ package %s import ( "context" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/execgen/cmd/execgen/hash_utils_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/hash_utils_gen.go index e84e2532965c..1ac9f7196489 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/hash_utils_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/hash_utils_gen.go @@ -29,7 +29,7 @@ func genHashUtils(inputFileContents string, wr io.Writer) error { s := r.Replace(inputFileContents) assignHash := makeFunctionRegex("_ASSIGN_HASH", 4) - s = assignHash.ReplaceAllString(s, makeTemplateFunctionCall("Global.UnaryAssign", 4)) + s = assignHash.ReplaceAllString(s, makeTemplateFunctionCall("Global.AssignHash", 4)) rehash := makeFunctionRegex("_REHASH_BODY", 7) s = rehash.ReplaceAllString(s, `{{template "rehashBody" buildDict "Global" . "HasSel" $6 "HasNulls" $7}}`) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/hashtable_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/hashtable_gen.go index e55329bc1b07..541da081d779 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/hashtable_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/hashtable_gen.go @@ -81,6 +81,8 @@ func genHashTable(inputFileContents string, wr io.Writer, htm hashTableMode) err "_RIGHT_TYPE_WIDTH", typeWidthReplacement, "_ProbeType", "{{.Left.VecMethod}}", "_BuildType", "{{.Right.VecMethod}}", + "_GLOBAL", "$global", + "_SELECT_DISTINCT", "$selectDistinct", "_USE_PROBE_SEL", ".UseProbeSel", "_PROBING_AGAINST_ITSELF", "$probingAgainstItself", "_DELETING_PROBE_MODE", "$deletingProbeMode", @@ -91,24 +93,19 @@ func genHashTable(inputFileContents string, wr io.Writer, htm hashTableMode) err assignNeRe := makeFunctionRegex("_ASSIGN_NE", 6) s = assignNeRe.ReplaceAllString(s, makeTemplateFunctionCall("Global.Right.Assign", 6)) - checkColBody := makeFunctionRegex("_CHECK_COL_BODY", 6) + checkColBody := makeFunctionRegex("_CHECK_COL_BODY", 7) s = checkColBody.ReplaceAllString(s, - `{{template "checkColBody" buildDict "Global" .Global "ProbeHasNulls" $1 "BuildHasNulls" $2 "SelectDistinct" $3 "UseProbeSel" $4 "ProbingAgainstItself" $5 "DeletingProbeMode" $6}}`, + `{{template "checkColBody" buildDict "Global" $1 "ProbeHasNulls" $2 "BuildHasNulls" $3 "SelectDistinct" $4 "UseProbeSel" $5 "ProbingAgainstItself" $6 "DeletingProbeMode" $7}}`, ) - checkColWithNulls := makeFunctionRegex("_CHECK_COL_WITH_NULLS", 3) + checkColWithNulls := makeFunctionRegex("_CHECK_COL_WITH_NULLS", 4) s = checkColWithNulls.ReplaceAllString(s, - `{{template "checkColWithNulls" buildDict "Global" . "UseProbeSel" $1 "ProbingAgainstItself" $2 "DeletingProbeMode" $3}}`, + `{{template "checkColWithNulls" buildDict "Global" . "SelectDistinct" $1 "UseProbeSel" $2 "ProbingAgainstItself" $3 "DeletingProbeMode" $4}}`, ) - checkColFunctionTemplate := makeFunctionRegex("_CHECK_COL_FUNCTION_TEMPLATE", 2) + checkColFunctionTemplate := makeFunctionRegex("_CHECK_COL_FUNCTION_TEMPLATE", 3) s = checkColFunctionTemplate.ReplaceAllString(s, - `{{template "checkColFunctionTemplate" buildDict "Global" . "ProbingAgainstItself" $1 "DeletingProbeMode" $2}}`, - ) - - checkColForDistinctWithNulls := makeFunctionRegex("_CHECK_COL_FOR_DISTINCT_WITH_NULLS", 1) - s = checkColForDistinctWithNulls.ReplaceAllString(s, - `{{template "checkColForDistinctWithNulls" buildDict "Global" . "UseProbeSel" $1}}`, + `{{template "checkColFunctionTemplate" buildDict "Global" . "SelectDistinct" $1 "ProbingAgainstItself" $2 "DeletingProbeMode" $3}}`, ) checkBody := makeFunctionRegex("_CHECK_BODY", 3) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/overloads_base.go b/pkg/sql/colexec/execgen/cmd/execgen/overloads_base.go index 500ab1c6da1f..98b297977539 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/overloads_base.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/overloads_base.go @@ -271,6 +271,7 @@ type lastArgWidthOverload struct { AssignFunc assignFunc CompareFunc compareFunc + HashFunc hashFunc } // newLastArgWidthOverload creates a new lastArgWidthOverload. Note that it @@ -304,7 +305,7 @@ func (o *oneArgOverload) String() string { } // twoArgsResolvedOverload is a utility struct that represents an overload that -// takes it two arguments and that has been "resolved" (meaning it supports +// takes in two arguments and that has been "resolved" (meaning it supports // only a single type family and a single type width on both sides). type twoArgsResolvedOverload struct { *overloadBase @@ -312,6 +313,12 @@ type twoArgsResolvedOverload struct { Right *lastArgWidthOverload } +// NeedsBinaryOverloadHelper returns true iff the overload is such that it needs +// access to execgen.BinaryOverloadHelper. +func (o *twoArgsResolvedOverload) NeedsBinaryOverloadHelper() bool { + return o.kind == binaryOverload && o.Right.RetVecMethod == "Datum" +} + // twoArgsResolvedOverloadsInfo contains all overloads that take in two // arguments and stores them in a similar hierarchical structure to how // twoArgsOverloads are stored, with the difference that on the "bottom" level @@ -352,6 +359,7 @@ type twoArgsResolvedOverloadRightWidthInfo struct { type assignFunc func(op *lastArgWidthOverload, targetElem, leftElem, rightElem, targetCol, leftCol, rightCol string) string type compareFunc func(targetElem, leftElem, rightElem, leftCol, rightCol string) string type castFunc func(to, from, evalCtx, toType string) string +type hashFunc func(targetElem, vElem, vVec, vIdx string) string // Assign produces a Go source string that assigns the "targetElem" variable to // the result of applying the overload to the two inputs, "leftElem" and @@ -399,14 +407,8 @@ func (o *lastArgWidthOverload) Compare( leftElem, rightElem, targetElem, leftElem, rightElem, targetElem, targetElem) } -func (o *lastArgWidthOverload) UnaryAssign(targetElem, vElem, targetCol, vVec string) string { - if o.AssignFunc != nil { - if ret := o.AssignFunc(o, targetElem, vElem, "", targetCol, vVec, ""); ret != "" { - return ret - } - } - // Default assign form assumes a function operator. - return fmt.Sprintf("%s = %s(%s)", targetElem, o.overloadBase.OpStr, vElem) +func (o *lastArgWidthOverload) AssignHash(targetElem, vElem, vVec, vIdx string) string { + return o.HashFunc(targetElem, vElem, vVec, vIdx) } func goTypeSliceName(canonicalTypeFamily types.Family, width int32) string { @@ -575,7 +577,7 @@ if %[2]s != nil { %[1]s = %[2]s.Size() }`, target, value) case types.DecimalFamily: - return fmt.Sprintf(`%s := tree.SizeOfDecimal(&%s)`, target, value) + return fmt.Sprintf(`%s := %s.Size()`, target, value) case typeconv.DatumVecCanonicalTypeFamily: return fmt.Sprintf(` var %[1]s uintptr @@ -597,8 +599,8 @@ func (b *argWidthOverloadBase) SetVariableSize(target, value string) string { var ( lawo = &lastArgWidthOverload{} _ = lawo.Assign + _ = lawo.AssignHash _ = lawo.Compare - _ = lawo.UnaryAssign awob = &argWidthOverloadBase{} _ = awob.GoTypeSliceName diff --git a/pkg/sql/colexec/execgen/cmd/execgen/overloads_bin.go b/pkg/sql/colexec/execgen/cmd/execgen/overloads_bin.go index 3399c6ca1ac8..1b0ffac90e28 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/overloads_bin.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/overloads_bin.go @@ -472,10 +472,10 @@ func (c intCustomizer) getBinOpAssignFunc() assignFunc { if {{.Right}} == 0 { colexecerror.ExpectedError(tree.ErrDivByZero) } - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64({{.Left}})) rightTmpDec.SetInt64(int64({{.Right}})) - if _, err := tree.{{.Ctx}}.Quo(&{{.Target}}, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.{{.Ctx}}.Quo(&{{.Target}}, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -485,10 +485,10 @@ func (c intCustomizer) getBinOpAssignFunc() assignFunc { t = template.Must(template.New("").Parse(` { - leftTmpDec, rightTmpDec := &_overloadHelper.TmpDec1, &_overloadHelper.TmpDec2 + var leftTmpDec, rightTmpDec apd.Decimal //gcassert:noescape leftTmpDec.SetInt64(int64({{.Left}})) rightTmpDec.SetInt64(int64({{.Right}})) - if _, err := tree.{{.Ctx}}.Pow(leftTmpDec, leftTmpDec, rightTmpDec); err != nil { + if _, err := tree.{{.Ctx}}.Pow(&leftTmpDec, &leftTmpDec, &rightTmpDec); err != nil { colexecerror.ExpectedError(err) } resultInt, err := leftTmpDec.Int64() @@ -550,9 +550,9 @@ func (c decimalIntCustomizer) getBinOpAssignFunc() assignFunc { colexecerror.ExpectedError(tree.ErrDivByZero) } {{end}} - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64({{.Right}})) - if _, err := tree.{{.Ctx}}.{{.Op}}(&{{.Target}}, &{{.Left}}, tmpDec); err != nil { + if _, err := tree.{{.Ctx}}.{{.Op}}(&{{.Target}}, &{{.Left}}, &tmpDec); err != nil { colexecerror.ExpectedError(err) } } @@ -583,9 +583,9 @@ func (c intDecimalCustomizer) getBinOpAssignFunc() assignFunc { colexecerror.ExpectedError(tree.ErrDivByZero) } {{end}} - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64({{.Left}})) - _, err := tree.{{.Ctx}}.{{.Op}}(&{{.Target}}, tmpDec, &{{.Right}}) + _, err := tree.{{.Ctx}}.{{.Op}}(&{{.Target}}, &tmpDec, &{{.Right}}) if err != nil { colexecerror.ExpectedError(err) } diff --git a/pkg/sql/colexec/execgen/cmd/execgen/overloads_cmp.go b/pkg/sql/colexec/execgen/cmd/execgen/overloads_cmp.go index b1ebb1ef0bae..abcfa7daf828 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/overloads_cmp.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/overloads_cmp.go @@ -222,11 +222,11 @@ func (c decimalFloatCustomizer) getCmpOpCompareFunc() compareFunc { buf := strings.Builder{} t := template.Must(template.New("").Parse(` { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64({{.Right}})); err != nil { colexecerror.ExpectedError(err) } - {{.Target}} = tree.CompareDecimals(&{{.Left}}, tmpDec) + {{.Target}} = tree.CompareDecimals(&{{.Left}}, &tmpDec) } `)) if err := t.Execute(&buf, args); err != nil { @@ -242,9 +242,9 @@ func (c decimalIntCustomizer) getCmpOpCompareFunc() compareFunc { buf := strings.Builder{} t := template.Must(template.New("").Parse(` { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64({{.Right}})) - {{.Target}} = tree.CompareDecimals(&{{.Left}}, tmpDec) + {{.Target}} = tree.CompareDecimals(&{{.Left}}, &tmpDec) } `)) if err := t.Execute(&buf, args); err != nil { @@ -260,11 +260,11 @@ func (c floatDecimalCustomizer) getCmpOpCompareFunc() compareFunc { buf := strings.Builder{} t := template.Must(template.New("").Parse(` { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape if _, err := tmpDec.SetFloat64(float64({{.Left}})); err != nil { colexecerror.ExpectedError(err) } - {{.Target}} = tree.CompareDecimals(tmpDec, &{{.Right}}) + {{.Target}} = tree.CompareDecimals(&tmpDec, &{{.Right}}) } `)) if err := t.Execute(&buf, args); err != nil { @@ -280,9 +280,9 @@ func (c intDecimalCustomizer) getCmpOpCompareFunc() compareFunc { buf := strings.Builder{} t := template.Must(template.New("").Parse(` { - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.SetInt64(int64({{.Left}})) - {{.Target}} = tree.CompareDecimals(tmpDec, &{{.Right}}) + {{.Target}} = tree.CompareDecimals(&tmpDec, &{{.Right}}) } `)) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/overloads_hash.go b/pkg/sql/colexec/execgen/cmd/execgen/overloads_hash.go index c1448a22eac9..b0058c7add09 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/overloads_hash.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/overloads_hash.go @@ -33,8 +33,6 @@ func populateHashOverloads() { } ov := newLastArgTypeOverload(hashOverloadBase, family) for _, width := range widths { - // Note that we pass in types.Bool as the return type just to make - // // Note that we pass in types.Bool as the return type just to make // overloads initialization happy. We don't actually care about the // return type since we know that it will be represented physically @@ -43,7 +41,7 @@ func populateHashOverloads() { sameTypeCustomizer := typeCustomizers[typePair{family, width, family, width}] if sameTypeCustomizer != nil { if b, ok := sameTypeCustomizer.(hashTypeCustomizer); ok { - lawo.AssignFunc = b.getHashAssignFunc() + lawo.HashFunc = b.getHashFunc() } } } @@ -56,11 +54,11 @@ func populateHashOverloads() { // hashTypeCustomizer is a type customizer that changes how the templater // produces hash output for a particular type. type hashTypeCustomizer interface { - getHashAssignFunc() assignFunc + getHashFunc() hashFunc } -func (boolCustomizer) getHashAssignFunc() assignFunc { - return func(op *lastArgWidthOverload, targetElem, vElem, _, _, _, _ string) string { +func (boolCustomizer) getHashFunc() hashFunc { + return func(targetElem, vElem, _, _ string) string { return fmt.Sprintf(` x := 0 if %[2]s { @@ -80,26 +78,26 @@ const hashByteSliceString = ` %[1]s = memhash(unsafe.Pointer(sh.Data), %[1]s, uintptr(len(%[2]s))) ` -func (bytesCustomizer) getHashAssignFunc() assignFunc { - return func(op *lastArgWidthOverload, targetElem, vElem, _, _, _, _ string) string { +func (bytesCustomizer) getHashFunc() hashFunc { + return func(targetElem, vElem, _, _ string) string { return fmt.Sprintf(hashByteSliceString, targetElem, vElem) } } -func (decimalCustomizer) getHashAssignFunc() assignFunc { - return func(op *lastArgWidthOverload, targetElem, vElem, _, _, _, _ string) string { +func (decimalCustomizer) getHashFunc() hashFunc { + return func(targetElem, vElem, _, _ string) string { return fmt.Sprintf(` // In order for equal decimals to hash to the same value we need to // remove the trailing zeroes if there are any. - tmpDec := &_overloadHelper.TmpDec1 + var tmpDec apd.Decimal //gcassert:noescape tmpDec.Reduce(&%[1]s) b := []byte(tmpDec.String())`, vElem) + fmt.Sprintf(hashByteSliceString, targetElem, "b") } } -func (c floatCustomizer) getHashAssignFunc() assignFunc { - return func(op *lastArgWidthOverload, targetElem, vElem, _, _, _, _ string) string { +func (c floatCustomizer) getHashFunc() hashFunc { + return func(targetElem, vElem, _, _ string) string { // TODO(yuzefovich): think through whether this is appropriate way to hash // NaNs. return fmt.Sprintf( @@ -113,8 +111,8 @@ func (c floatCustomizer) getHashAssignFunc() assignFunc { } } -func (c intCustomizer) getHashAssignFunc() assignFunc { - return func(op *lastArgWidthOverload, targetElem, vElem, _, _, _, _ string) string { +func (c intCustomizer) getHashFunc() hashFunc { + return func(targetElem, vElem, _, _ string) string { return fmt.Sprintf(` // In order for integers with different widths but of the same value to // to hash to the same value, we upcast all of them to int64. @@ -124,8 +122,8 @@ func (c intCustomizer) getHashAssignFunc() assignFunc { } } -func (c timestampCustomizer) getHashAssignFunc() assignFunc { - return func(op *lastArgWidthOverload, targetElem, vElem, _, _, _, _ string) string { +func (c timestampCustomizer) getHashFunc() hashFunc { + return func(targetElem, vElem, _, _ string) string { return fmt.Sprintf(` s := %[2]s.UnixNano() %[1]s = memhash64(noescape(unsafe.Pointer(&s)), %[1]s) @@ -133,8 +131,8 @@ func (c timestampCustomizer) getHashAssignFunc() assignFunc { } } -func (c intervalCustomizer) getHashAssignFunc() assignFunc { - return func(op *lastArgWidthOverload, targetElem, vElem, _, _, _, _ string) string { +func (c intervalCustomizer) getHashFunc() hashFunc { + return func(targetElem, vElem, _, _ string) string { return fmt.Sprintf(` months, days, nanos := %[2]s.Months, %[2]s.Days, %[2]s.Nanos() %[1]s = memhash64(noescape(unsafe.Pointer(&months)), %[1]s) @@ -144,27 +142,20 @@ func (c intervalCustomizer) getHashAssignFunc() assignFunc { } } -func (c jsonCustomizer) getHashAssignFunc() assignFunc { - return func(op *lastArgWidthOverload, targetElem, vElem, _, _, _, _ string) string { - // TODO(yuzefovich): consider refactoring this to avoid decoding-encoding of - // JSON altogether. This will require changing `assignFunc` to also have an - // access to the index of the current element and then some trickery to get - // to the bytes underlying the JSON. +func (c jsonCustomizer) getHashFunc() hashFunc { + return func(targetElem, _, vVec, vIdx string) string { return fmt.Sprintf(` - scratch := _overloadHelper.ByteScratch[:0] - _b, _err := json.EncodeJSON(scratch, %[2]s) - if _err != nil { - colexecerror.ExpectedError(_err) - } - _overloadHelper.ByteScratch = _b - %[1]s`, fmt.Sprintf(hashByteSliceString, targetElem, "_b"), vElem) + // Access the underlying []byte directly which allows us to skip + // decoding-encoding of the JSON object. + _b := %[2]s.Bytes.Get(%[3]s) + %[1]s`, fmt.Sprintf(hashByteSliceString, targetElem, "_b"), vVec, vIdx) } } -func (c datumCustomizer) getHashAssignFunc() assignFunc { - return func(op *lastArgWidthOverload, targetElem, vElem, _, _, _, _ string) string { +func (c datumCustomizer) getHashFunc() hashFunc { + return func(targetElem, vElem, _, _ string) string { // Note that this overload assumes that there exists - // var datumAlloc *rowenc.DatumAlloc. + // var datumAlloc *tree.DatumAlloc. // in the scope. return fmt.Sprintf(`b := coldataext.Hash(%s.(tree.Datum), datumAlloc)`, vElem) + fmt.Sprintf(hashByteSliceString, targetElem, "b") diff --git a/pkg/sql/colexec/execgen/cmd/execgen/span_encoder_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/span_encoder_gen.go index a164ccfbe5e7..b0f7e238b384 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/span_encoder_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/span_encoder_gen.go @@ -153,7 +153,7 @@ func (info spanEncoderTmplInfo) AssignSpanEncoding(appendTo, valToEncode string) valToEncode += ".(tree.Datum)" return fmt.Sprintf(` var err error - %[1]s, err = rowenc.EncodeTableKey(%[1]s, %[2]s, %[3]s) + %[1]s, err = keyside.Encode(%[1]s, %[2]s, %[3]s) if err != nil { colexecerror.ExpectedError(err) } diff --git a/pkg/sql/colexec/execgen/cmd/execgen/sum_agg_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/sum_agg_gen.go index 9eeebc20702a..1e4002fe49d0 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/sum_agg_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/sum_agg_gen.go @@ -25,7 +25,6 @@ import ( type sumAggTmplInfo struct { aggTmplInfoBase SumKind string - NeedsHelper bool InputVecMethod string RetGoType string RetGoTypeSlice string @@ -165,7 +164,6 @@ func genSumAgg(inputFileContents string, wr io.Writer, isSumInt bool) error { TypeFamily: toString(inputTypeFamily), } for _, inputTypeWidth := range supportedWidthsByCanonicalTypeFamily[inputTypeFamily] { - needsHelper := false // Note that we don't use execinfrapb.GetAggregateInfo because we don't // want to bring in a dependency on that package to reduce the burden // of regenerating execgen code when the protobufs get generated. @@ -174,9 +172,6 @@ func genSumAgg(inputFileContents string, wr io.Writer, isSumInt bool) error { if isSumInt { retTypeFamily, retTypeWidth = types.IntFamily, anyWidth } else { - // Non-integer summation of integers needs a helper because - // the result is a decimal. - needsHelper = true retTypeFamily, retTypeWidth = types.DecimalFamily, anyWidth } } @@ -187,7 +182,6 @@ func genSumAgg(inputFileContents string, wr io.Writer, isSumInt bool) error { canonicalTypeFamily: typeconv.TypeFamilyToCanonicalTypeFamily(retTypeFamily), }, SumKind: sumKind, - NeedsHelper: needsHelper, InputVecMethod: toVecMethod(inputTypeFamily, inputTypeWidth), RetGoType: toPhysicalRepresentation(retTypeFamily, retTypeWidth), RetGoTypeSlice: goTypeSliceName(retTypeFamily, retTypeWidth), diff --git a/pkg/sql/colexec/execgen/cmd/execgen/vec_to_datum_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/vec_to_datum_gen.go index cf23257023de..2b40e2c377bd 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/vec_to_datum_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/vec_to_datum_gen.go @@ -43,7 +43,7 @@ type vecToDatumWidthTmplInfo struct { // AssignConverted returns a string that performs a conversion of the element // sourceElem and assigns the result to the newly declared targetElem. -// datumAlloc is the name of *rowenc.DatumAlloc struct that can be used to +// datumAlloc is the name of *tree.DatumAlloc struct that can be used to // allocate new datums. func (i vecToDatumWidthTmplInfo) AssignConverted(targetElem, sourceElem, datumAlloc string) string { return fmt.Sprintf(i.ConversionTmpl, targetElem, sourceElem, datumAlloc) @@ -67,14 +67,10 @@ var vecToDatumConversionTmpls = map[types.Family]string{ types.BoolFamily: `%[1]s := tree.MakeDBool(tree.DBool(%[2]s))`, // Note that currently, regardless of the integer's width, we always return // INT8, so there is a single conversion template for IntFamily. - types.IntFamily: `%[1]s := %[3]s.NewDInt(tree.DInt(%[2]s))`, - types.FloatFamily: `%[1]s := %[3]s.NewDFloat(tree.DFloat(%[2]s))`, - types.DecimalFamily: ` %[1]s := %[3]s.NewDDecimal(tree.DDecimal{Decimal: %[2]s}) - // Clear the Coeff so that the Set below allocates a new slice for the - // Coeff.abs field. - %[1]s.Coeff = big.Int{} - %[1]s.Coeff.Set(&%[2]s.Coeff)`, - types.DateFamily: `%[1]s := %[3]s.NewDDate(tree.DDate{Date: pgdate.MakeCompatibleDateFromDisk(%[2]s)})`, + types.IntFamily: `%[1]s := %[3]s.NewDInt(tree.DInt(%[2]s))`, + types.FloatFamily: `%[1]s := %[3]s.NewDFloat(tree.DFloat(%[2]s))`, + types.DecimalFamily: `%[1]s := %[3]s.NewDDecimal(tree.DDecimal{Decimal: %[2]s})`, + types.DateFamily: `%[1]s := %[3]s.NewDDate(tree.DDate{Date: pgdate.MakeCompatibleDateFromDisk(%[2]s)})`, types.BytesFamily: `// Note that there is no need for a copy since DBytes uses a string // as underlying storage, which will perform the copy for us. %[1]s := %[3]s.NewDBytes(tree.DBytes(%[2]s))`, diff --git a/pkg/sql/colexec/execgen/overloads_util.go b/pkg/sql/colexec/execgen/overloads_bin_util.go similarity index 51% rename from pkg/sql/colexec/execgen/overloads_util.go rename to pkg/sql/colexec/execgen/overloads_bin_util.go index 26f52c351499..bec5a1ef73be 100644 --- a/pkg/sql/colexec/execgen/overloads_util.go +++ b/pkg/sql/colexec/execgen/overloads_bin_util.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Cockroach Authors. +// Copyright 2022 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -10,21 +10,15 @@ package execgen -import ( - "github.com/cockroachdb/apd/v2" - "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" -) +import "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" -// OverloadHelper is a utility struct used for templates that helps us avoid -// allocations of temporary decimals on every overloaded operation with them as -// well as plumbs through other useful information. +// BinaryOverloadHelper is a utility struct used for templates of the binary +// overloads that fall back to the row-based tree.Datum computation. // // In order for the templates to see it correctly, a local variable named // `_overloadHelper` of this type must be declared before the inlined // overloaded code. -type OverloadHelper struct { - TmpDec1, TmpDec2 apd.Decimal - BinFn tree.TwoArgFn - EvalCtx *tree.EvalContext - ByteScratch []byte +type BinaryOverloadHelper struct { + BinFn tree.TwoArgFn + EvalCtx *tree.EvalContext } diff --git a/pkg/sql/colexec/hash_aggregator.go b/pkg/sql/colexec/hash_aggregator.go index c675dfec5441..f304c8f2799a 100644 --- a/pkg/sql/colexec/hash_aggregator.go +++ b/pkg/sql/colexec/hash_aggregator.go @@ -23,7 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util" ) @@ -149,7 +149,7 @@ type hashAggregator struct { aggFnsAlloc *colexecagg.AggregateFuncsAlloc hashAlloc aggBucketAlloc - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc toClose colexecop.Closers // Distincter finds distinct groups in partially sorted input columns. diff --git a/pkg/sql/colexec/hashjoiner_test.go b/pkg/sql/colexec/hashjoiner_test.go index ed6fc406bd2c..39bb753db92b 100644 --- a/pkg/sql/colexec/hashjoiner_test.go +++ b/pkg/sql/colexec/hashjoiner_test.go @@ -15,7 +15,7 @@ import ( "fmt" "testing" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" diff --git a/pkg/sql/colexec/joiner_utils_test.go b/pkg/sql/colexec/joiner_utils_test.go index 23ef63950bfd..dd8b1d9222d8 100644 --- a/pkg/sql/colexec/joiner_utils_test.go +++ b/pkg/sql/colexec/joiner_utils_test.go @@ -14,7 +14,7 @@ import ( "fmt" "strings" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils" diff --git a/pkg/sql/colexec/not_expr_ops.go b/pkg/sql/colexec/not_expr_ops.go new file mode 100644 index 000000000000..2a9c3022d57b --- /dev/null +++ b/pkg/sql/colexec/not_expr_ops.go @@ -0,0 +1,208 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package colexec + +import ( + "github.com/cockroachdb/cockroach/pkg/col/coldata" + "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils" + "github.com/cockroachdb/cockroach/pkg/sql/colexecop" + "github.com/cockroachdb/cockroach/pkg/sql/colmem" + "github.com/cockroachdb/cockroach/pkg/sql/types" +) + +type notExprProjBase struct { + colexecop.OneInputHelper + allocator *colmem.Allocator + inputIdx int + outputIdx int +} + +// notExprProjOp is an Operator that projects into outputIdx Vec +// the corresponding negated value / expression in colIdx Vec (i.e. NOT of the +// value is TRUE). If the underlying value is NULL, then notExprProjOp projects +// a NULL value in the output. +type notExprProjOp struct { + notExprProjBase +} + +var _ colexecop.Operator = ¬ExprProjOp{} + +// NewNotExprProjOp returns a new notExprProjOp. +func NewNotExprProjOp( + allocator *colmem.Allocator, input colexecop.Operator, inputIdx, outputIdx int, +) colexecop.Operator { + input = colexecutils.NewVectorTypeEnforcer(allocator, input, types.Bool, outputIdx) + base := notExprProjBase{ + OneInputHelper: colexecop.MakeOneInputHelper(input), + allocator: allocator, + inputIdx: inputIdx, + outputIdx: outputIdx, + } + return ¬ExprProjOp{notExprProjBase: base} +} + +// projectOuput populates the output vector with the negated bool value of the +// input vector for non-null values. For null value, it projects null. +func projectOutput( + idx int, inputNulls, outputNulls *coldata.Nulls, inputVec coldata.Vec, outputBools coldata.Bools, +) { + if inputNulls.NullAt(idx) { + outputNulls.SetNull(idx) + } else { + inputVal := inputVec.Bool().Get(idx) + outputBools.Set(idx, !inputVal) + } +} + +func (o *notExprProjOp) Next() coldata.Batch { + batch := o.Input.Next() + n := batch.Length() + if n == 0 { + return coldata.ZeroBatch + } + inputVec, outputVec := batch.ColVec(o.inputIdx), batch.ColVec(o.outputIdx) + inputBools, inputNulls := inputVec.Bool(), inputVec.Nulls() + outputBools, outputNulls := outputVec.Bool(), outputVec.Nulls() + if outputNulls.MaybeHasNulls() { + // Unsetting any potential nulls in the output in case there are null + // values present beforehand. + outputNulls.UnsetNulls() + } + if inputNulls.MaybeHasNulls() { + if sel := batch.Selection(); sel != nil { + sel = sel[:n] + for _, idx := range sel { + if inputNulls.NullAt(idx) { + outputNulls.SetNull(idx) + } else { + exprVal := inputBools.Get(idx) + outputBools.Set(idx, !exprVal) + } + } + } else { + inputBools = inputBools[:n] + outputBools = outputBools[:n] + for idx := 0; idx < n; idx++ { + if inputNulls.NullAt(idx) { + outputNulls.SetNull(idx) + } else { + //gcassert:bce + exprVal := inputBools.Get(idx) + //gcassert:bce + outputBools.Set(idx, !exprVal) + } + } + } + } else { + if sel := batch.Selection(); sel != nil { + sel = sel[:n] + for _, idx := range sel { + exprVal := inputBools.Get(idx) + outputBools.Set(idx, !exprVal) + } + } else { + inputBools = inputBools[:n] + outputBools = outputBools[:n] + for idx := 0; idx < n; idx++ { + //gcassert:bce + exprVal := inputBools.Get(idx) + //gcassert:bce + outputBools.Set(idx, !exprVal) + } + } + + } + return batch +} + +type notExprSelBase struct { + colexecop.OneInputHelper + inputIdx int +} + +// notExprSelOp is an Operator that selects all the values in the input vector +// where the expression evaluates to FALSE (i.e. NOT of the expression evaluates +// to TRUE). If the input value is NULL, then that value is not selected. +type notExprSelOp struct { + notExprSelBase +} + +var _ colexecop.Operator = ¬ExprSelOp{} + +// NewNotExprSelOp returns a new notExprSelOp. +func NewNotExprSelOp(input colexecop.Operator, inputIdx int) colexecop.Operator { + base := notExprSelBase{ + OneInputHelper: colexecop.MakeOneInputHelper(input), + inputIdx: inputIdx, + } + return ¬ExprSelOp{notExprSelBase: base} +} + +func (o *notExprSelOp) Next() coldata.Batch { + for { + batch := o.Input.Next() + n := batch.Length() + if n == 0 { + return batch + } + inputVec, selectedValuesIdx := batch.ColVec(o.inputIdx), 0 + inputNulls, inputBools := inputVec.Nulls(), inputVec.Bool() + if inputNulls.MaybeHasNulls() { + if sel := batch.Selection(); sel != nil { + sel = sel[:n] + for _, idx := range sel { + if !inputNulls.NullAt(idx) && !inputBools.Get(idx) { + sel[selectedValuesIdx] = idx + selectedValuesIdx++ + } + } + } else { + batch.SetSelection(true) + sel = batch.Selection()[:n] + inputBools = inputBools[:n] + for idx := 0; idx < n; idx++ { + if !inputNulls.NullAt(idx) { + //gcassert:bce + if !inputBools.Get(idx) { + sel[selectedValuesIdx] = idx + selectedValuesIdx++ + } + } + } + } + } else { + if sel := batch.Selection(); sel != nil { + sel = sel[:n] + for _, idx := range sel { + if !inputBools.Get(idx) { + sel[selectedValuesIdx] = idx + selectedValuesIdx++ + } + } + } else { + batch.SetSelection(true) + sel = batch.Selection()[:n] + inputBools = inputBools[:n] + for idx := 0; idx < n; idx++ { + //gcassert:bce + if !inputBools.Get(idx) { + sel[selectedValuesIdx] = idx + selectedValuesIdx++ + } + } + } + } + if selectedValuesIdx > 0 { + batch.SetLength(selectedValuesIdx) + return batch + } + } +} diff --git a/pkg/sql/colexec/not_expr_ops_test.go b/pkg/sql/colexec/not_expr_ops_test.go new file mode 100644 index 000000000000..5b61f5cc8ecc --- /dev/null +++ b/pkg/sql/colexec/not_expr_ops_test.go @@ -0,0 +1,164 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package colexec + +import ( + "context" + "testing" + + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexectestutils" + "github.com/cockroachdb/cockroach/pkg/sql/colexecop" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" +) + +func TestNotExprProjOp(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + evalCtx := tree.MakeTestingEvalContext(st) + defer evalCtx.Stop(ctx) + + testCases := []struct { + desc string + inputTuples colexectestutils.Tuples + outputTuples colexectestutils.Tuples + projExpr string + }{ + { + desc: "SELECT NOT c FROM t -- NOT expr with no NULL", + inputTuples: colexectestutils.Tuples{{true}, {false}, {true}, {false}}, + outputTuples: colexectestutils.Tuples{{true, false}, {false, true}, {true, false}, {false, true}}, + projExpr: "NOT", + }, + { + desc: "SELECT NOT c FROM t -- NOT expr with only TRUE", + inputTuples: colexectestutils.Tuples{{true}, {true}, {true}, {true}}, + outputTuples: colexectestutils.Tuples{{true, false}, {true, false}, {true, false}, {true, false}}, + projExpr: "NOT", + }, + { + desc: "SELECT NOT c FROM t -- NOT expr with only FALSE", + inputTuples: colexectestutils.Tuples{{false}, {false}, {false}, {false}}, + outputTuples: colexectestutils.Tuples{{false, true}, {false, true}, {false, true}, {false, true}}, + projExpr: "NOT", + }, + { + desc: "SELECT NOT c FROM t -- NOT expr with only NULL", + inputTuples: colexectestutils.Tuples{{nil}, {nil}, {nil}, {nil}}, + outputTuples: colexectestutils.Tuples{{nil, nil}, {nil, nil}, {nil, nil}, {nil, nil}}, + projExpr: "NOT", + }, + { + desc: "SELECT NOT c FROM t -- NOT expr with NULL and only FALSE", + inputTuples: colexectestutils.Tuples{{nil}, {false}, {nil}, {false}}, + outputTuples: colexectestutils.Tuples{{nil, nil}, {false, true}, {nil, nil}, {false, true}}, + projExpr: "NOT", + }, + { + desc: "SELECT NOT c FROM t -- NOT expr with NULL and only TRUE", + inputTuples: colexectestutils.Tuples{{nil}, {true}, {nil}, {true}}, + outputTuples: colexectestutils.Tuples{{nil, nil}, {true, false}, {nil, nil}, {true, false}}, + projExpr: "NOT", + }, + { + desc: "SELECT NOT c FROM t -- NOT expr with NULL and both BOOL", + inputTuples: colexectestutils.Tuples{{nil}, {true}, {nil}, {false}}, + outputTuples: colexectestutils.Tuples{{nil, nil}, {true, false}, {nil, nil}, {false, true}}, + projExpr: "NOT", + }, + } + + for _, c := range testCases { + log.Infof(ctx, "%s", c.desc) + opConstructor := func(input []colexecop.Operator) (colexecop.Operator, error) { + return NewNotExprProjOp(testAllocator, input[0], 0, 1), nil + } + colexectestutils.RunTestsWithoutAllNullsInjection(t, testAllocator, []colexectestutils.Tuples{c.inputTuples}, [][]*types.T{{types.Bool}}, c.outputTuples, colexectestutils.OrderedVerifier, opConstructor) + } +} + +func TestNotExprSelOp(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + evalCtx := tree.MakeTestingEvalContext(st) + defer evalCtx.Stop(ctx) + + testCases := []struct { + desc string + inputTuples colexectestutils.Tuples + outputTuples colexectestutils.Tuples + selExpr string + }{ + { + desc: "SELECT c FROM t WHERE NOT c -- NOT expr with no NULL", + inputTuples: colexectestutils.Tuples{{true}, {false}, {true}, {false}}, + outputTuples: colexectestutils.Tuples{{false}, {false}}, + selExpr: "NOT", + }, + { + desc: "SELECT c FROM t WHERE NOT c -- NOT expr with only FALSE", + inputTuples: colexectestutils.Tuples{{false}, {false}, {false}, {false}}, + outputTuples: colexectestutils.Tuples{{false}, {false}, {false}, {false}}, + selExpr: "NOT", + }, + { + desc: "SELECT c FROM t WHERE NOT c -- NOT expr with only TRUE", + inputTuples: colexectestutils.Tuples{{true}, {true}, {true}, {true}}, + outputTuples: colexectestutils.Tuples{}, + selExpr: "NOT", + }, + { + desc: "SELECT c FROM t WHERE NOT c -- NOT expr with only one FALSE and rest TRUE", + inputTuples: colexectestutils.Tuples{{true}, {false}, {true}, {true}}, + outputTuples: colexectestutils.Tuples{{false}}, + selExpr: "NOT", + }, + { + desc: "SELECT c FROM t WHERE NOT c -- NOT expr with only one TRUE and rest FALSE", + inputTuples: colexectestutils.Tuples{{false}, {true}, {false}, {false}}, + outputTuples: colexectestutils.Tuples{{false}, {false}, {false}}, + selExpr: "NOT", + }, + { + desc: "SELECT c FROM t WHERE NOT c -- NOT expr with FALSE and NULL", + inputTuples: colexectestutils.Tuples{{nil}, {nil}, {false}, {nil}}, + outputTuples: colexectestutils.Tuples{{false}}, + selExpr: "NOT", + }, + { + desc: "SELECT c FROM t WHERE NOT c -- NOT expr with TRUE, FALSE and NULL", + inputTuples: colexectestutils.Tuples{{false}, {true}, {false}, {nil}}, + outputTuples: colexectestutils.Tuples{{false}, {false}}, + selExpr: "NOT", + }, + { + desc: "SELECT c FROM t WHERE NOT c -- NOT expr with only NULL", + inputTuples: colexectestutils.Tuples{{nil}, {nil}, {nil}, {nil}}, + outputTuples: colexectestutils.Tuples{}, + selExpr: "NOT", + }, + } + + for _, c := range testCases { + log.Infof(ctx, "%s", c.desc) + opConstructor := func(sources []colexecop.Operator) (colexecop.Operator, error) { + return NewNotExprSelOp(sources[0], 0), nil + } + colexectestutils.RunTestsWithoutAllNullsInjection(t, testAllocator, []colexectestutils.Tuples{c.inputTuples}, [][]*types.T{{types.Bool}}, c.outputTuples, colexectestutils.OrderedVerifier, opConstructor) + } +} diff --git a/pkg/sql/colexec/ordered_aggregator.go b/pkg/sql/colexec/ordered_aggregator.go index 7ab8ff6bec00..d5e59fc8b401 100644 --- a/pkg/sql/colexec/ordered_aggregator.go +++ b/pkg/sql/colexec/ordered_aggregator.go @@ -23,7 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/colexecop" "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" ) @@ -133,7 +133,7 @@ type orderedAggregator struct { // seenNonEmptyBatch indicates whether a non-empty input batch has been // observed. seenNonEmptyBatch bool - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc toClose colexecop.Closers } diff --git a/pkg/sql/colexec/rowstovec.eg.go b/pkg/sql/colexec/rowstovec.eg.go index ffe0d2028bee..ceecfbf9a60a 100644 --- a/pkg/sql/colexec/rowstovec.eg.go +++ b/pkg/sql/colexec/rowstovec.eg.go @@ -12,7 +12,7 @@ package colexec import ( "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colmem" @@ -42,7 +42,7 @@ func EncDatumRowsToColVec( vec coldata.Vec, columnIdx int, t *types.T, - alloc *rowenc.DatumAlloc, + alloc *tree.DatumAlloc, ) error { var err error allocator.PerformOperation( diff --git a/pkg/sql/colexec/rowstovec_test.go b/pkg/sql/colexec/rowstovec_test.go index a8266586cf21..c44412177303 100644 --- a/pkg/sql/colexec/rowstovec_test.go +++ b/pkg/sql/colexec/rowstovec_test.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/log" ) -var alloc = rowenc.DatumAlloc{} +var alloc = tree.DatumAlloc{} func TestEncDatumRowsToColVecBool(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/sql/colexec/rowstovec_tmpl.go b/pkg/sql/colexec/rowstovec_tmpl.go index ce6ac4e19d0f..b5d0ca5db908 100644 --- a/pkg/sql/colexec/rowstovec_tmpl.go +++ b/pkg/sql/colexec/rowstovec_tmpl.go @@ -22,7 +22,7 @@ package colexec import ( - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/typeconv" "github.com/cockroachdb/cockroach/pkg/sql/colmem" @@ -47,7 +47,7 @@ var ( // {{/* func _ROWS_TO_COL_VEC( - rows rowenc.EncDatumRows, vec coldata.Vec, columnIdx int, alloc *rowenc.DatumAlloc, + rows rowenc.EncDatumRows, vec coldata.Vec, columnIdx int, alloc *tree.DatumAlloc, ) { // */}} // {{define "rowsToColVec" -}} col := vec.TemplateType() @@ -96,7 +96,7 @@ func EncDatumRowsToColVec( vec coldata.Vec, columnIdx int, t *types.T, - alloc *rowenc.DatumAlloc, + alloc *tree.DatumAlloc, ) error { var err error allocator.PerformOperation( diff --git a/pkg/sql/colexec/select_in.eg.go b/pkg/sql/colexec/select_in.eg.go index 888980d976be..2d230b7c895e 100644 --- a/pkg/sql/colexec/select_in.eg.go +++ b/pkg/sql/colexec/select_in.eg.go @@ -14,7 +14,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/select_in_tmpl.go b/pkg/sql/colexec/select_in_tmpl.go index 954547a70237..15fd9b256e9e 100644 --- a/pkg/sql/colexec/select_in_tmpl.go +++ b/pkg/sql/colexec/select_in_tmpl.go @@ -22,7 +22,7 @@ package colexec import ( - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/sort_partitioner.eg.go b/pkg/sql/colexec/sort_partitioner.eg.go index 5b3c7dc2f5e8..266afff0d763 100644 --- a/pkg/sql/colexec/sort_partitioner.eg.go +++ b/pkg/sql/colexec/sort_partitioner.eg.go @@ -15,7 +15,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" diff --git a/pkg/sql/colexec/tuple_proj_op.go b/pkg/sql/colexec/tuple_proj_op.go index f52f9d7f4713..b2abcb6c323b 100644 --- a/pkg/sql/colexec/tuple_proj_op.go +++ b/pkg/sql/colexec/tuple_proj_op.go @@ -69,23 +69,34 @@ func (t *tupleProjOp) Next() coldata.Batch { // output vector. projVec.Nulls().UnsetNulls() } + t.allocator.PerformOperation([]coldata.Vec{projVec}, func() { + // Preallocate the tuples and their underlying datums in a contiguous + // slice to reduce allocations. + tuples := make([]tree.DTuple, n) + l := len(t.tupleContentsIdxs) + datums := make(tree.Datums, n*l) projCol := projVec.Datum() + projectInto := func(dst, src int) { + tuples[src] = tree.MakeDTuple( + t.outputType, datums[src*l:(src+1)*l:(src+1)*l]..., + ) + projCol.Set(dst, t.projectInto(&tuples[src], src)) + } if sel := batch.Selection(); sel != nil { for convertedIdx, i := range sel[:n] { - projCol.Set(i, t.createTuple(convertedIdx)) + projectInto(i, convertedIdx) } } else { for i := 0; i < n; i++ { - projCol.Set(i, t.createTuple(i)) + projectInto(i, i) } } }) return batch } -func (t *tupleProjOp) createTuple(convertedIdx int) tree.Datum { - tuple := tree.NewDTupleWithLen(t.outputType, len(t.tupleContentsIdxs)) +func (t *tupleProjOp) projectInto(tuple *tree.DTuple, convertedIdx int) tree.Datum { for i, columnIdx := range t.tupleContentsIdxs { tuple.D[i] = t.converter.GetDatumColumn(columnIdx)[convertedIdx] } diff --git a/pkg/sql/colexec/types_integration_test.go b/pkg/sql/colexec/types_integration_test.go index 0509b0ffe382..a831eea54535 100644 --- a/pkg/sql/colexec/types_integration_test.go +++ b/pkg/sql/colexec/types_integration_test.go @@ -56,7 +56,7 @@ func TestSQLTypesIntegration(t *testing.T) { DiskMonitor: diskMonitor, } - var da rowenc.DatumAlloc + var da tree.DatumAlloc rng, _ := randutil.NewTestRand() typesToTest := 20 diff --git a/pkg/sql/colexec/values.go b/pkg/sql/colexec/values.go index c4f23a135fc7..99b27a7002a7 100644 --- a/pkg/sql/colexec/values.go +++ b/pkg/sql/colexec/values.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/colmem" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" ) @@ -36,7 +37,7 @@ type valuesOp struct { data [][]byte allocator *colmem.Allocator - dalloc rowenc.DatumAlloc + dalloc tree.DatumAlloc batch coldata.Batch rowsBuf rowenc.EncDatumRows } diff --git a/pkg/sql/colexecop/BUILD.bazel b/pkg/sql/colexecop/BUILD.bazel index 73f3a4b28cab..784641baa6db 100644 --- a/pkg/sql/colexecop/BUILD.bazel +++ b/pkg/sql/colexecop/BUILD.bazel @@ -26,6 +26,7 @@ go_test( size = "small", srcs = ["dep_test.go"], embed = [":colexecop"], + tags = ["no-remote"], deps = [ "//pkg/testutils/buildutil", "//pkg/util/leaktest", diff --git a/pkg/sql/colfetcher/BUILD.bazel b/pkg/sql/colfetcher/BUILD.bazel index 3ab245fefc10..2a2763e6d2d9 100644 --- a/pkg/sql/colfetcher/BUILD.bazel +++ b/pkg/sql/colfetcher/BUILD.bazel @@ -17,8 +17,10 @@ go_library( "//pkg/col/typeconv", "//pkg/keys", "//pkg/kv", + "//pkg/kv/kvclient/kvstreamer", "//pkg/roachpb:with-mocks", "//pkg/sql/catalog", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", @@ -36,6 +38,7 @@ go_library( "//pkg/sql/physicalplan", "//pkg/sql/row", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", "//pkg/sql/rowinfra", "//pkg/sql/scrub", "//pkg/sql/sem/tree", @@ -47,7 +50,7 @@ go_library( "//pkg/util/mon", "//pkg/util/syncutil", "//pkg/util/tracing", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/sql/colfetcher/cfetcher.go b/pkg/sql/colfetcher/cfetcher.go index 05a0898c7a88..aa7956c4685e 100644 --- a/pkg/sql/colfetcher/cfetcher.go +++ b/pkg/sql/colfetcher/cfetcher.go @@ -19,12 +19,14 @@ import ( "sync" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvstreamer" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/colconv" @@ -34,6 +36,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" "github.com/cockroachdb/cockroach/pkg/sql/scrub" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -115,7 +118,7 @@ type cTableInfo struct { // only be used for unique secondary indexes. extraValDirections []descpb.IndexDescriptor_Direction - da rowenc.DatumAlloc + da tree.DatumAlloc } var _ execinfra.Releasable = &cTableInfo{} @@ -376,10 +379,9 @@ func (rf *cFetcher) Init( table.orderedColIdxMap.vals = make(descpb.ColumnIDs, 0, nCols) table.orderedColIdxMap.ords = make([]int, 0, nCols) } - colDescriptors := tableArgs.cols - for i := range colDescriptors { + for _, col := range tableArgs.cols { //gcassert:bce - id := colDescriptors[i].GetID() + id := col.GetID() table.orderedColIdxMap.vals = append(table.orderedColIdxMap.vals, id) table.orderedColIdxMap.ords = append(table.orderedColIdxMap.ords, tableArgs.ColIdxMap.GetDefault(id)) } @@ -407,7 +409,7 @@ func (rf *cFetcher) Init( nonSystemColOffset = 0 } for idx := nonSystemColOffset; idx < nCols; idx++ { - col := colDescriptors[idx].GetID() + colID := tableArgs.cols[idx].GetID() // Set up extra metadata for system columns, if this is a system // column. // @@ -415,40 +417,41 @@ func (rf *cFetcher) Init( // but we don't want to include them in that set because the // handling of system columns is separate from the standard value // decoding process. - switch colinfo.GetSystemColumnKindFromColumnID(col) { - case descpb.SystemColumnKind_MVCCTIMESTAMP: + switch colinfo.GetSystemColumnKindFromColumnID(colID) { + case catpb.SystemColumnKind_MVCCTIMESTAMP: table.timestampOutputIdx = idx rf.mvccDecodeStrategy = row.MVCCDecodingRequired table.neededValueColsByIdx.Remove(idx) - case descpb.SystemColumnKind_TABLEOID: + case catpb.SystemColumnKind_TABLEOID: table.oidOutputIdx = idx table.neededValueColsByIdx.Remove(idx) } } } - table.knownPrefixLength = len(rowenc.MakeIndexKeyPrefix(codec, table.desc, table.index.GetID())) + table.knownPrefixLength = len(rowenc.MakeIndexKeyPrefix(codec, table.desc.GetID(), table.index.GetID())) - var indexColumnIDs []descpb.ColumnID - indexColumnIDs, table.indexColumnDirs = catalog.FullIndexColumnIDs(table.index) + table.indexColumnDirs = table.desc.IndexFullColumnDirections(table.index) + fullColumns := table.desc.IndexFullColumns(table.index) - compositeColumnIDs := util.MakeFastIntSet() - for i := 0; i < table.index.NumCompositeColumns(); i++ { - id := table.index.GetCompositeColumnID(i) - compositeColumnIDs.Add(int(id)) - } + compositeColumnIDs := table.index.CollectCompositeColumnIDs() - nIndexCols := len(indexColumnIDs) + nIndexCols := len(fullColumns) if cap(table.indexColOrdinals) >= nIndexCols { table.indexColOrdinals = table.indexColOrdinals[:nIndexCols] } else { table.indexColOrdinals = make([]int, nIndexCols) } indexColOrdinals := table.indexColOrdinals - _ = indexColOrdinals[len(indexColumnIDs)-1] + _ = indexColOrdinals[len(fullColumns)-1] needToDecodeDecimalKey := false - for i, id := range indexColumnIDs { - colIdx, ok := tableArgs.ColIdxMap.Get(id) + for i, col := range fullColumns { + if col == nil { + //gcassert:bce + indexColOrdinals[i] = -1 + continue + } + colIdx, ok := tableArgs.ColIdxMap.Get(col.GetID()) if ok { //gcassert:bce indexColOrdinals[i] = colIdx @@ -456,7 +459,7 @@ func (rf *cFetcher) Init( needToDecodeDecimalKey = needToDecodeDecimalKey || tableArgs.typs[colIdx].Family() == types.DecimalFamily // A composite column might also have a value encoding which must be // decoded. Others can be removed from neededValueColsByIdx. - if compositeColumnIDs.Contains(int(id)) { + if compositeColumnIDs.Contains(col.GetID()) { table.compositeIndexColOrdinals.Add(colIdx) } else { table.neededValueColsByIdx.Remove(colIdx) @@ -494,7 +497,7 @@ func (rf *cFetcher) Init( id := table.index.GetKeySuffixColumnID(i) colIdx, ok := tableArgs.ColIdxMap.Get(id) if ok { - if compositeColumnIDs.Contains(int(id)) { + if compositeColumnIDs.Contains(id) { table.compositeIndexColOrdinals.Add(colIdx) table.neededValueColsByIdx.Remove(colIdx) } @@ -503,18 +506,14 @@ func (rf *cFetcher) Init( } // Prepare our index key vals slice. - table.keyValTypes = colinfo.GetColumnTypesFromColDescs( - colDescriptors, indexColumnIDs, table.keyValTypes, - ) + table.keyValTypes = getColumnTypesFromCols(fullColumns, table.keyValTypes) if table.index.NumKeySuffixColumns() > 0 { // Unique secondary indexes have a value that is the // primary index key. // Primary indexes only contain ascendingly-encoded // values. If this ever changes, we'll probably have to // figure out the directions here too. - table.extraTypes = colinfo.GetColumnTypesFromColDescs( - colDescriptors, table.index.IndexDesc().KeySuffixColumnIDs, table.extraTypes, - ) + table.extraTypes = getColumnTypesFromCols(table.desc.IndexKeySuffixColumns(table.index), table.extraTypes) nExtraColumns := table.index.NumKeySuffixColumns() if cap(table.extraValColOrdinals) >= nExtraColumns { table.extraValColOrdinals = table.extraValColOrdinals[:nExtraColumns] @@ -567,6 +566,30 @@ func (rf *cFetcher) Init( return nil } +func getColumnTypesFromCols(cols []catalog.Column, outTypes []*types.T) []*types.T { + if cap(outTypes) < len(cols) { + outTypes = make([]*types.T, len(cols)) + } else { + outTypes = outTypes[:len(cols)] + } + for i, col := range cols { + if col == nil { + continue + } + outTypes[i] = col.GetType() + } + return outTypes +} + +//gcassert:inline +func (rf *cFetcher) setFetcher(f *row.KVFetcher, limitHint rowinfra.RowLimit) { + rf.fetcher = f + rf.machine.lastRowPrefix = nil + rf.machine.limitHint = int(limitHint) + rf.machine.state[0] = stateResetBatch + rf.machine.state[1] = stateInitFetch +} + // StartScan initializes and starts the key-value scan. Can be used multiple // times. // @@ -634,11 +657,30 @@ func (rf *cFetcher) StartScan( if err != nil { return err } - rf.fetcher = f - rf.machine.lastRowPrefix = nil - rf.machine.limitHint = int(limitHint) - rf.machine.state[0] = stateResetBatch - rf.machine.state[1] = stateInitFetch + rf.setFetcher(f, limitHint) + return nil +} + +// StartScanStreaming initializes and starts the key-value scan using the +// Streamer API. Can be used multiple times. +// +// The fetcher takes ownership of the spans slice - it can modify the slice and +// will perform the memory accounting accordingly. The caller can only reuse the +// spans slice after the fetcher has been closed (which happens when the fetcher +// emits the first zero batch), and if the caller does, it becomes responsible +// for the memory accounting. +func (rf *cFetcher) StartScanStreaming( + ctx context.Context, + streamer *kvstreamer.Streamer, + spans roachpb.Spans, + limitHint rowinfra.RowLimit, +) error { + kvBatchFetcher, err := row.NewTxnKVStreamer(ctx, streamer, spans, rf.lockStrength) + if err != nil { + return err + } + f := row.NewKVStreamingFetcher(kvBatchFetcher) + rf.setFetcher(f, limitHint) return nil } @@ -871,7 +913,7 @@ func (rf *cFetcher) NextBatch(ctx context.Context) (coldata.Batch, error) { for i := 0; i < rf.table.index.NumKeySuffixColumns(); i++ { var err error // Slice off an extra encoded column from remainingBytes. - remainingBytes, err = rowenc.SkipTableKey(remainingBytes) + remainingBytes, err = keyside.Skip(remainingBytes) if err != nil { return nil, err } diff --git a/pkg/sql/colfetcher/cfetcher_setup.go b/pkg/sql/colfetcher/cfetcher_setup.go index d1e59cea6ca8..1986dfa71217 100644 --- a/pkg/sql/colfetcher/cfetcher_setup.go +++ b/pkg/sql/colfetcher/cfetcher_setup.go @@ -85,7 +85,6 @@ func populateTableArgs( table catalog.TableDescriptor, index catalog.Index, invertedCol catalog.Column, - visibility execinfrapb.ScanVisibility, hasSystemColumns bool, post *execinfrapb.PostProcessSpec, helper *colexecargs.ExprHelper, @@ -94,11 +93,7 @@ func populateTableArgs( // First, find all columns present in the table and possibly include the // system columns (when requested). cols := args.cols[:0] - if visibility == execinfra.ScanVisibilityPublicAndNotPublic { - cols = append(cols, table.ReadableColumns()...) - } else { - cols = append(cols, table.PublicColumns()...) - } + cols = append(cols, table.ReadableColumns()...) if invertedCol != nil { for i, col := range cols { if col.GetID() == invertedCol.GetID() { @@ -161,7 +156,7 @@ func populateTableArgs( // make sure they are hydrated. In row execution engine it is done during // the processor initialization, but neither ColBatchScan nor cFetcher are // processors, so we need to do the hydration ourselves. - resolver := flowCtx.TypeResolverFactory.NewTypeResolver(flowCtx.Txn) + resolver := flowCtx.NewTypeResolver(flowCtx.Txn) return args, neededColumns, resolver.HydrateTypeSlice(ctx, args.typs) } diff --git a/pkg/sql/colfetcher/colbatch_scan.go b/pkg/sql/colfetcher/colbatch_scan.go index 2988b80b5db8..0f45657941b3 100644 --- a/pkg/sql/colfetcher/colbatch_scan.go +++ b/pkg/sql/colfetcher/colbatch_scan.go @@ -194,21 +194,17 @@ func NewColBatchScan( if nodeID, ok := flowCtx.NodeID.OptionalNodeID(); nodeID == 0 && ok { return nil, errors.Errorf("attempting to create a ColBatchScan with uninitialized NodeID") } - if spec.IsCheck { + if spec.DeprecatedIsCheck { // cFetchers don't support these checks. return nil, errors.AssertionFailedf("attempting to create a cFetcher with the IsCheck flag set") } limitHint := rowinfra.RowLimit(execinfra.LimitHint(spec.LimitHint, post)) - // TODO(ajwerner): The need to construct an immutable here - // indicates that we're probably doing this wrong. Instead we should be - // just setting the ID and Version in the spec or something like that and - // retrieving the hydrated immutable from cache. - table := spec.BuildTableDescriptor() + table := flowCtx.TableDescriptor(&spec.Table) invertedColumn := tabledesc.FindInvertedColumn(table, spec.InvertedColumn) tableArgs, _, err := populateTableArgs( ctx, flowCtx, table, table.ActiveIndexes()[spec.IndexIdx], - invertedColumn, spec.Visibility, spec.HasSystemColumns, post, helper, + invertedColumn, spec.HasSystemColumns, post, helper, ) if err != nil { return nil, err diff --git a/pkg/sql/colfetcher/index_join.go b/pkg/sql/colfetcher/index_join.go index bb2a948b9710..2e17cb0bac19 100644 --- a/pkg/sql/colfetcher/index_join.go +++ b/pkg/sql/colfetcher/index_join.go @@ -12,11 +12,13 @@ package colfetcher import ( "context" + "math" "sort" "time" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/typeconv" + "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvstreamer" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecargs" "github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecspan" @@ -26,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/memsize" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -102,6 +105,15 @@ type ColIndexJoin struct { // maintainOrdering is true when the index join is required to maintain its // input ordering, in which case the ordering of the spans cannot be changed. maintainOrdering bool + + // usesStreamer indicates whether the ColIndexJoin is using the Streamer + // API. + usesStreamer bool + streamerInfo struct { + *kvstreamer.Streamer + budgetAcc *mon.BoundAccount + budgetLimit int64 + } } var _ colexecop.KVReader = &ColIndexJoin{} @@ -119,6 +131,21 @@ func (s *ColIndexJoin) Init(ctx context.Context) { // tracing is enabled. s.Ctx, s.tracingSpan = execinfra.ProcessorSpan(s.Ctx, "colindexjoin") s.Input.Init(s.Ctx) + if s.usesStreamer { + s.streamerInfo.Streamer = kvstreamer.NewStreamer( + s.flowCtx.Cfg.DistSender, + s.flowCtx.Stopper(), + s.flowCtx.Txn, + s.flowCtx.EvalCtx.Settings, + row.GetWaitPolicy(s.rf.lockWaitPolicy), + s.streamerInfo.budgetLimit, + s.streamerInfo.budgetAcc, + ) + s.streamerInfo.Streamer.Init( + kvstreamer.OutOfOrder, + kvstreamer.Hints{UniqueRequests: true}, + ) + } } type indexJoinState uint8 @@ -174,16 +201,27 @@ func (s *ColIndexJoin) Next() coldata.Batch { // the memory accounting - we don't double count for any memory of // spans because the spanAssembler released all of the relevant // memory from its account in GetSpans(). - if err := s.rf.StartScan( - s.Ctx, - s.flowCtx.Txn, - spans, - nil, /* bsHeader */ - false, /* limitBatches */ - rowinfra.NoBytesLimit, - rowinfra.NoRowLimit, - s.flowCtx.EvalCtx.TestingKnobs.ForceProductionBatchSizes, - ); err != nil { + var err error + if s.usesStreamer { + err = s.rf.StartScanStreaming( + s.Ctx, + s.streamerInfo.Streamer, + spans, + rowinfra.NoRowLimit, + ) + } else { + err = s.rf.StartScan( + s.Ctx, + s.flowCtx.Txn, + spans, + nil, /* bsHeader */ + false, /* limitBatches */ + rowinfra.NoBytesLimit, + rowinfra.NoRowLimit, + s.flowCtx.EvalCtx.TestingKnobs.ForceProductionBatchSizes, + ) + } + if err != nil { colexecerror.InternalError(err) } s.state = indexJoinScanning @@ -280,7 +318,7 @@ func (s *ColIndexJoin) getRowSize(idx int) int64 { rowSize += adjustMemEstimate(s.mem.byteLikeCols[i].ElemSize(idx)) } for i := range s.mem.decimalCols { - rowSize += adjustMemEstimate(int64(tree.SizeOfDecimal(&s.mem.decimalCols[i][idx]))) + rowSize += adjustMemEstimate(int64(s.mem.decimalCols[i][idx].Size())) } for i := range s.mem.datumCols { memEstimate := int64(s.mem.datumCols[i].Get(idx).(tree.Datum).Size()) + memsize.DatumOverhead @@ -385,6 +423,7 @@ func NewColIndexJoin( allocator *colmem.Allocator, fetcherAllocator *colmem.Allocator, kvFetcherMemAcc *mon.BoundAccount, + streamerBudgetAcc *mon.BoundAccount, flowCtx *execinfra.FlowCtx, helper *colexecargs.ExprHelper, input colexecop.Operator, @@ -406,33 +445,54 @@ func NewColIndexJoin( return nil, errors.AssertionFailedf("non-empty ON expressions are not supported for index joins") } - // TODO(ajwerner): The need to construct an immutable here - // indicates that we're probably doing this wrong. Instead we should be - // just setting the ID and Version in the spec or something like that and - // retrieving the hydrated immutable from cache. - table := spec.BuildTableDescriptor() + table := flowCtx.TableDescriptor(&spec.Table) index := table.ActiveIndexes()[spec.IndexIdx] tableArgs, neededColumns, err := populateTableArgs( ctx, flowCtx, table, index, nil, /* invertedCol */ - spec.Visibility, spec.HasSystemColumns, post, helper, + spec.HasSystemColumns, post, helper, ) if err != nil { return nil, err } + memoryLimit := execinfra.GetWorkMemLimit(flowCtx) + + useStreamer := row.CanUseStreamer(ctx, flowCtx.EvalCtx.Settings) && !spec.MaintainOrdering + if useStreamer { + // TODO(yuzefovich): remove this conditional once multiple column + // families are supported. + if maxKeysPerRow, err := tableArgs.desc.KeysPerRow(tableArgs.index.GetID()); err != nil { + return nil, err + } else if maxKeysPerRow > 1 { + // Currently, the streamer only supports cases with a single column + // family. + useStreamer = false + } else { + if streamerBudgetAcc == nil { + return nil, errors.AssertionFailedf("streamer budget account is nil when the Streamer API is desired") + } + // Keep the quarter of the memory limit for the output batch of the + // cFetcher, and we'll give the remaining three quarters to the + // streamer budget below. + memoryLimit = int64(math.Ceil(float64(memoryLimit) / 4.0)) + } + } + fetcher := cFetcherPool.Get().(*cFetcher) fetcher.cFetcherArgs = cFetcherArgs{ spec.LockingStrength, spec.LockingWaitPolicy, flowCtx.EvalCtx.SessionData().LockTimeout, - execinfra.GetWorkMemLimit(flowCtx), + memoryLimit, // Note that the correct estimated row count will be set by the index // joiner for each set of spans to read. 0, /* estimatedRowCount */ false, /* reverse */ flowCtx.TraceKV, } - if err = fetcher.Init(flowCtx.Codec(), fetcherAllocator, kvFetcherMemAcc, tableArgs, spec.HasSystemColumns); err != nil { + if err = fetcher.Init( + flowCtx.Codec(), fetcherAllocator, kvFetcherMemAcc, tableArgs, spec.HasSystemColumns, + ); err != nil { fetcher.Release() return nil, err } @@ -448,8 +508,13 @@ func NewColIndexJoin( spanAssembler: spanAssembler, ResultTypes: tableArgs.typs, maintainOrdering: spec.MaintainOrdering, + usesStreamer: useStreamer, } op.prepareMemLimit(inputTypes) + if useStreamer { + op.streamerInfo.budgetLimit = 3 * memoryLimit + op.streamerInfo.budgetAcc = streamerBudgetAcc + } return op, nil } @@ -532,5 +597,8 @@ func (s *ColIndexJoin) closeInternal() { // spanAssembler can be nil if Release() has already been called. s.spanAssembler.Close() } + if s.streamerInfo.Streamer != nil { + s.streamerInfo.Streamer.Close() + } s.batch = nil } diff --git a/pkg/sql/colflow/BUILD.bazel b/pkg/sql/colflow/BUILD.bazel index 68d931945542..adb5f63d4959 100644 --- a/pkg/sql/colflow/BUILD.bazel +++ b/pkg/sql/colflow/BUILD.bazel @@ -72,6 +72,7 @@ go_test( "vectorized_panic_propagation_test.go", ], embed = [":colflow"], + tags = ["no-remote"], deps = [ "//pkg/base", "//pkg/col/coldata", diff --git a/pkg/sql/colflow/explain_vec.go b/pkg/sql/colflow/explain_vec.go index a1711cf95ad3..b8c461663576 100644 --- a/pkg/sql/colflow/explain_vec.go +++ b/pkg/sql/colflow/explain_vec.go @@ -57,7 +57,7 @@ func convertToVecTree( creator := newVectorizedFlowCreator( newNoopFlowCreatorHelper(), vectorizedRemoteComponentCreator{}, false, false, nil, &execinfra.RowChannel{}, &fakeBatchReceiver{}, flowCtx.Cfg.NodeDialer, execinfrapb.FlowID{}, colcontainer.DiskQueueCfg{}, - flowCtx.Cfg.VecFDSemaphore, flowCtx.TypeResolverFactory.NewTypeResolver(flowCtx.EvalCtx.Txn), + flowCtx.Cfg.VecFDSemaphore, flowCtx.NewTypeResolver(flowCtx.EvalCtx.Txn), admission.WorkInfo{}, ) // We create an unlimited memory account because we're interested whether the diff --git a/pkg/sql/colflow/vectorized_flow.go b/pkg/sql/colflow/vectorized_flow.go index fdb4f4137bf9..abdd4c060c37 100644 --- a/pkg/sql/colflow/vectorized_flow.go +++ b/pkg/sql/colflow/vectorized_flow.go @@ -209,7 +209,7 @@ func (f *vectorizedFlow) Setup( f.GetID(), diskQueueCfg, f.countingSemaphore, - flowCtx.TypeResolverFactory.NewTypeResolver(flowCtx.EvalCtx.Txn), + flowCtx.NewTypeResolver(flowCtx.EvalCtx.Txn), f.FlowBase.GetAdmissionInfo(), ) if f.testingKnobs.onSetupFlow != nil { @@ -993,6 +993,7 @@ func (s *vectorizedFlowCreator) setupOutput( ) // The flow coordinator is a root of its operator chain. s.opChains = append(s.opChains, s.batchFlowCoordinator) + s.releasables = append(s.releasables, s.batchFlowCoordinator) } else { // We need to use the row receiving output. if input != nil { @@ -1021,6 +1022,9 @@ func (s *vectorizedFlowCreator) setupOutput( ) // The flow coordinator is a root of its operator chain. s.opChains = append(s.opChains, f) + // NOTE: we don't append f to s.releasables because addFlowCoordinator + // adds the FlowCoordinator to FlowBase.processors, which ensures that + // it is later released in FlowBase.Cleanup. s.addFlowCoordinator(f) } @@ -1108,7 +1112,7 @@ func (s *vectorizedFlowCreator) setupFlow( } numOldMonitors := len(s.monitorRegistry.GetMonitors()) if args.ExprHelper.SemaCtx == nil { - args.ExprHelper.SemaCtx = flowCtx.TypeResolverFactory.NewSemaContext(flowCtx.EvalCtx.Txn) + args.ExprHelper.SemaCtx = flowCtx.NewSemaContext(flowCtx.EvalCtx.Txn) } var result *colexecargs.NewColOperatorResult result, err = colbuilder.NewColOperator(ctx, flowCtx, args) diff --git a/pkg/sql/colmem/allocator.go b/pkg/sql/colmem/allocator.go index 747ea4f26ca6..8af904f12ec3 100644 --- a/pkg/sql/colmem/allocator.go +++ b/pkg/sql/colmem/allocator.go @@ -384,7 +384,7 @@ func sizeOfDecimals(decimals coldata.Decimals, startIdx int) int64 { // Account for the allocated memory beyond the length of the slice. size := int64(cap(decimals)-len(decimals)) * memsize.Decimal for i := startIdx; i < decimals.Len(); i++ { - size += int64(tree.SizeOfDecimal(&decimals[i])) + size += int64(decimals[i].Size()) } return size } @@ -393,10 +393,6 @@ func sizeOfDecimals(decimals coldata.Decimals, startIdx int) int64 { // coldata.BatchSize() length. var SizeOfBatchSizeSelVector = int64(coldata.BatchSize()) * memsize.Int -// decimalEstimate is our guess for how much space a single apd.Decimal element -// will take up. -const decimalEstimate = 50 - // EstimateBatchSizeBytes returns an estimated amount of bytes needed to // store a batch in memory that has column types vecTypes. // WARNING: This only is correct for fixed width types, and returns an @@ -423,8 +419,12 @@ func EstimateBatchSizeBytes(vecTypes []*types.T, batchLength int) int64 { } case types.DecimalFamily: // Similar to byte arrays, we can't tell how much space is used - // to hold the arbitrary precision decimal objects. - acc += decimalEstimate + // to hold the arbitrary precision decimal objects because they + // can contain a variable-length portion. However, most values + // (those with a coefficient which can fit in a uint128) do not + // contain any indirection and are stored entirely inline, so we + // use the flat struct size as an estimate. + acc += memsize.Decimal case types.JsonFamily: numBytesVectors++ case typeconv.DatumVecCanonicalTypeFamily: @@ -559,7 +559,7 @@ func (h *SetAccountingHelper) Init(allocator *Allocator, typs []*types.T) { h.bytesLikeVecIdxs.Add(vecIdx) case types.DecimalFamily: h.varSizeVecIdxs.Add(vecIdx) - h.varSizeEstimatePerRow += decimalEstimate + h.varSizeEstimatePerRow += memsize.Decimal numDecimalVecs++ case typeconv.DatumVecCanonicalTypeFamily: estimate, isVarlen := tree.DatumTypeSize(typ) @@ -659,7 +659,7 @@ func (h *SetAccountingHelper) AccountForSet(rowIdx int) { var newVarLengthDatumSize int64 for _, decimalVec := range h.decimalVecs { d := decimalVec.Get(rowIdx) - newVarLengthDatumSize += int64(tree.SizeOfDecimal(&d)) + newVarLengthDatumSize += int64(d.Size()) } for _, datumVec := range h.datumVecs { datumSize := datumVec.Get(rowIdx).(tree.Datum).Size() diff --git a/pkg/sql/comment_on_constraint.go b/pkg/sql/comment_on_constraint.go index 2feea4f12e21..da8e8d34abe8 100644 --- a/pkg/sql/comment_on_constraint.go +++ b/pkg/sql/comment_on_constraint.go @@ -14,14 +14,13 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" ) @@ -29,6 +28,7 @@ type commentOnConstraintNode struct { n *tree.CommentOnConstraint tableDesc catalog.TableDescriptor oid *tree.DOid + commenter scexec.CommentUpdater } //CommentOnConstraint add comment on a constraint @@ -52,7 +52,15 @@ func (p *planner) CommentOnConstraint( return nil, err } - return &commentOnConstraintNode{n: n, tableDesc: tableDesc}, nil + return &commentOnConstraintNode{ + n: n, + tableDesc: tableDesc, + commenter: p.execCfg.CommentUpdaterFactory.NewCommentUpdater( + ctx, + p.txn, + p.SessionData(), + ), + }, nil } @@ -94,28 +102,20 @@ func (n *commentOnConstraintNode) startExec(params runParams) error { // Setting the comment to NULL is the // equivalent of deleting the comment. if n.n.Comment != nil { - _, err := params.p.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( - params.ctx, - "set-constraint-comment", - params.p.Txn(), - sessiondata.InternalExecutorOverride{User: security.RootUserName()}, - "UPSERT INTO system.comments VALUES ($1, $2, 0, $3)", + err := n.commenter.UpsertDescriptorComment( + int64(n.oid.DInt), + 0, keys.ConstraintCommentType, - n.oid.DInt, *n.n.Comment, ) if err != nil { return err } } else { - _, err := params.p.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( - params.ctx, - "delete-constraint-comment", - params.p.Txn(), - sessiondata.InternalExecutorOverride{User: security.RootUserName()}, - "DELETE FROM system.comments WHERE type=$1 AND object_id=$2 AND sub_id=0", + err := n.commenter.DeleteDescriptorComment( + int64(n.oid.DInt), + 0, keys.ConstraintCommentType, - n.oid.DInt, ) if err != nil { return err diff --git a/pkg/sql/comment_on_database.go b/pkg/sql/comment_on_database.go index 7f9052fbe093..be3b4f7e55b9 100644 --- a/pkg/sql/comment_on_database.go +++ b/pkg/sql/comment_on_database.go @@ -14,17 +14,17 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" ) type commentOnDatabaseNode struct { - n *tree.CommentOnDatabase - dbDesc catalog.DatabaseDescriptor + n *tree.CommentOnDatabase + dbDesc catalog.DatabaseDescriptor + commenter scexec.CommentUpdater } // CommentOnDatabase add comment on a database. @@ -50,46 +50,40 @@ func (p *planner) CommentOnDatabase( return nil, err } - return &commentOnDatabaseNode{n: n, dbDesc: dbDesc}, nil + return &commentOnDatabaseNode{n: n, + dbDesc: dbDesc, + commenter: p.execCfg.CommentUpdaterFactory.NewCommentUpdater( + ctx, + p.txn, + p.SessionData(), + ), + }, nil } func (n *commentOnDatabaseNode) startExec(params runParams) error { if n.n.Comment != nil { - _, err := params.p.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( - params.ctx, - "set-db-comment", - params.p.Txn(), - sessiondata.InternalExecutorOverride{User: security.RootUserName()}, - "UPSERT INTO system.comments VALUES ($1, $2, 0, $3)", - keys.DatabaseCommentType, - n.dbDesc.GetID(), - *n.n.Comment) + err := n.commenter.UpsertDescriptorComment( + int64(n.dbDesc.GetID()), 0, keys.DatabaseCommentType, *n.n.Comment) if err != nil { return err } } else { - _, err := params.p.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( - params.ctx, - "delete-db-comment", - params.p.Txn(), - sessiondata.InternalExecutorOverride{User: security.RootUserName()}, - "DELETE FROM system.comments WHERE type=$1 AND object_id=$2 AND sub_id=0", - keys.DatabaseCommentType, - n.dbDesc.GetID()) + err := n.commenter.DeleteDescriptorComment( + int64(n.dbDesc.GetID()), 0, keys.DatabaseCommentType) if err != nil { return err } } - comment := "" + dbComment := "" if n.n.Comment != nil { - comment = *n.n.Comment + dbComment = *n.n.Comment } return params.p.logEvent(params.ctx, n.dbDesc.GetID(), &eventpb.CommentOnDatabase{ DatabaseName: n.n.Name.String(), - Comment: comment, + Comment: dbComment, NullComment: n.n.Comment == nil, }) } diff --git a/pkg/sql/comment_on_index.go b/pkg/sql/comment_on_index.go index 82d5521d5373..b7452e08e2d8 100644 --- a/pkg/sql/comment_on_index.go +++ b/pkg/sql/comment_on_index.go @@ -14,13 +14,11 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" ) @@ -28,6 +26,7 @@ type commentOnIndexNode struct { n *tree.CommentOnIndex tableDesc *tabledesc.Mutable index catalog.Index + commenter scexec.CommentUpdater } // CommentOnIndex adds a comment on an index. @@ -46,21 +45,31 @@ func (p *planner) CommentOnIndex(ctx context.Context, n *tree.CommentOnIndex) (p return nil, err } - return &commentOnIndexNode{n: n, tableDesc: tableDesc, index: index}, nil + return &commentOnIndexNode{ + n: n, + tableDesc: tableDesc, + index: index, + commenter: p.execCfg.CommentUpdaterFactory.NewCommentUpdater( + ctx, + p.txn, + p.SessionData(), + )}, nil } func (n *commentOnIndexNode) startExec(params runParams) error { if n.n.Comment != nil { - err := params.p.upsertIndexComment( - params.ctx, - n.tableDesc.ID, - n.index.GetID(), - *n.n.Comment) + err := n.commenter.UpsertDescriptorComment( + int64(n.tableDesc.ID), + int64(n.index.GetID()), + keys.IndexCommentType, + *n.n.Comment, + ) if err != nil { return err } } else { - err := params.p.removeIndexComment(params.ctx, n.tableDesc.ID, n.index.GetID()) + err := n.commenter.DeleteDescriptorComment( + int64(n.tableDesc.ID), int64(n.index.GetID()), keys.IndexCommentType) if err != nil { return err } @@ -86,39 +95,6 @@ func (n *commentOnIndexNode) startExec(params runParams) error { }) } -func (p *planner) upsertIndexComment( - ctx context.Context, tableID descpb.ID, indexID descpb.IndexID, comment string, -) error { - _, err := p.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( - ctx, - "set-index-comment", - p.Txn(), - sessiondata.InternalExecutorOverride{User: security.RootUserName()}, - "UPSERT INTO system.comments VALUES ($1, $2, $3, $4)", - keys.IndexCommentType, - tableID, - indexID, - comment) - - return err -} - -func (p *planner) removeIndexComment( - ctx context.Context, tableID descpb.ID, indexID descpb.IndexID, -) error { - _, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.ExecEx( - ctx, - "delete-index-comment", - p.txn, - sessiondata.InternalExecutorOverride{User: security.RootUserName()}, - "DELETE FROM system.comments WHERE type=$1 AND object_id=$2 AND sub_id=$3", - keys.IndexCommentType, - tableID, - indexID) - - return err -} - func (n *commentOnIndexNode) Next(runParams) (bool, error) { return false, nil } func (n *commentOnIndexNode) Values() tree.Datums { return tree.Datums{} } func (n *commentOnIndexNode) Close(context.Context) {} diff --git a/pkg/sql/comment_on_schema.go b/pkg/sql/comment_on_schema.go index 9e2095dd5383..30c677710f3f 100644 --- a/pkg/sql/comment_on_schema.go +++ b/pkg/sql/comment_on_schema.go @@ -14,18 +14,18 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" ) type commentOnSchemaNode struct { n *tree.CommentOnSchema schemaDesc catalog.SchemaDescriptor + commenter scexec.CommentUpdater } // CommentOnSchema add comment on a schema. @@ -63,25 +63,27 @@ func (p *planner) CommentOnSchema(ctx context.Context, n *tree.CommentOnSchema) return nil, err } - return &commentOnSchemaNode{n: n, schemaDesc: schemaDesc}, nil + return &commentOnSchemaNode{ + n: n, + schemaDesc: schemaDesc, + commenter: p.execCfg.CommentUpdaterFactory.NewCommentUpdater( + ctx, + p.txn, + p.SessionData(), + ), + }, nil } func (n *commentOnSchemaNode) startExec(params runParams) error { if n.n.Comment != nil { - _, err := params.p.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( - params.ctx, - "set-schema-comment", - params.p.Txn(), - sessiondata.InternalExecutorOverride{User: security.RootUserName()}, - "UPSERT INTO system.comments VALUES ($1, $2, 0, $3)", - keys.SchemaCommentType, - n.schemaDesc.GetID(), - *n.n.Comment) + err := n.commenter.UpsertDescriptorComment( + int64(n.schemaDesc.GetID()), 0, keys.SchemaCommentType, *n.n.Comment) if err != nil { return err } } else { - err := params.p.removeSchemaComment(params.ctx, n.schemaDesc.GetID()) + err := n.commenter.DeleteDescriptorComment( + int64(n.schemaDesc.GetID()), 0, keys.SchemaCommentType) if err != nil { return err } diff --git a/pkg/sql/comment_on_table.go b/pkg/sql/comment_on_table.go index 421272b8dd27..ea8924480100 100644 --- a/pkg/sql/comment_on_table.go +++ b/pkg/sql/comment_on_table.go @@ -14,17 +14,17 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" ) type commentOnTableNode struct { n *tree.CommentOnTable tableDesc catalog.TableDescriptor + commenter scexec.CommentUpdater } // CommentOnTable add comment on a table. @@ -49,32 +49,27 @@ func (p *planner) CommentOnTable(ctx context.Context, n *tree.CommentOnTable) (p return nil, err } - return &commentOnTableNode{n: n, tableDesc: tableDesc}, nil + return &commentOnTableNode{ + n: n, + tableDesc: tableDesc, + commenter: p.execCfg.CommentUpdaterFactory.NewCommentUpdater( + ctx, + p.txn, + p.SessionData(), + ), + }, nil } func (n *commentOnTableNode) startExec(params runParams) error { if n.n.Comment != nil { - _, err := params.p.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( - params.ctx, - "set-table-comment", - params.p.Txn(), - sessiondata.InternalExecutorOverride{User: security.RootUserName()}, - "UPSERT INTO system.comments VALUES ($1, $2, 0, $3)", - keys.TableCommentType, - n.tableDesc.GetID(), - *n.n.Comment) + err := n.commenter.UpsertDescriptorComment( + int64(n.tableDesc.GetID()), 0, keys.TableCommentType, *n.n.Comment) if err != nil { return err } } else { - _, err := params.p.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( - params.ctx, - "delete-table-comment", - params.p.Txn(), - sessiondata.InternalExecutorOverride{User: security.RootUserName()}, - "DELETE FROM system.comments WHERE type=$1 AND object_id=$2 AND sub_id=0", - keys.TableCommentType, - n.tableDesc.GetID()) + err := n.commenter.DeleteDescriptorComment( + int64(n.tableDesc.GetID()), 0, keys.TableCommentType) if err != nil { return err } diff --git a/pkg/sql/commenter/BUILD.bazel b/pkg/sql/commenter/BUILD.bazel new file mode 100644 index 000000000000..ac8e1933269b --- /dev/null +++ b/pkg/sql/commenter/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "commenter", + srcs = [ + "comment_updater.go", + "comment_updater_factory.go", + ], + importpath = "github.com/cockroachdb/cockroach/pkg/sql/commenter", + visibility = ["//visibility:public"], + deps = [ + "//pkg/keys", + "//pkg/kv", + "//pkg/security", + "//pkg/sql/catalog", + "//pkg/sql/catalog/descpb", + "//pkg/sql/schemachanger/scexec", + "//pkg/sql/schemachanger/scpb", + "//pkg/sql/sem/tree", + "//pkg/sql/sessiondata", + "//pkg/sql/sqlutil", + ], +) diff --git a/pkg/sql/commenter/comment_updater.go b/pkg/sql/commenter/comment_updater.go new file mode 100644 index 000000000000..a4b8ee4e5553 --- /dev/null +++ b/pkg/sql/commenter/comment_updater.go @@ -0,0 +1,178 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package commenter + +import ( + "context" + "fmt" + + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" + "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" +) + +// ConstraintOidBuilder constructs an OID based on constraint information. +type ConstraintOidBuilder interface { + // ForeignKeyConstraintOid generates a foreign key OID. + ForeignKeyConstraintOid( + dbID descpb.ID, scName string, tableID descpb.ID, fk *descpb.ForeignKeyConstraint, + ) *tree.DOid + // UniqueWithoutIndexConstraintOid generates a unique without index constraint OID. + UniqueWithoutIndexConstraintOid( + dbID descpb.ID, scName string, tableID descpb.ID, uc *descpb.UniqueWithoutIndexConstraint, + ) *tree.DOid + // UniqueConstraintOid generates a unique with index constraint OID. + UniqueConstraintOid( + dbID descpb.ID, scName string, tableID descpb.ID, indexID descpb.IndexID, + ) *tree.DOid + // PrimaryKeyConstraintOid generates a primary key constraint OID. + PrimaryKeyConstraintOid( + dbID descpb.ID, scName string, tableID descpb.ID, pkey *descpb.IndexDescriptor, + ) *tree.DOid + // CheckConstraintOid generates check constraint OID. + CheckConstraintOid( + dbID descpb.ID, scName string, tableID descpb.ID, check *descpb.TableDescriptor_CheckConstraint, + ) *tree.DOid +} + +// commentUpdater which implements scexec.CommentUpdater that is used to update +// comments on different schema objects. +type commentUpdater struct { + txn *kv.Txn + ie sqlutil.InternalExecutor + oidBuilder ConstraintOidBuilder +} + +// UpsertDescriptorComment implements scexec.CommentUpdater. +func (cu commentUpdater) UpsertDescriptorComment( + id int64, subID int64, commentType keys.CommentType, comment string, +) error { + _, err := cu.ie.ExecEx(context.Background(), + fmt.Sprintf("upsert-%s-comment", commentType), + cu.txn, + sessiondata.InternalExecutorOverride{User: security.RootUserName()}, + "UPSERT INTO system.comments VALUES ($1, $2, $3, $4)", + commentType, + id, + subID, + comment, + ) + return err +} + +// DeleteDescriptorComment implements scexec.CommentUpdater. +func (cu commentUpdater) DeleteDescriptorComment( + id int64, subID int64, commentType keys.CommentType, +) error { + _, err := cu.ie.ExecEx(context.Background(), + fmt.Sprintf("delete-%s-comment", commentType), + cu.txn, + sessiondata.InternalExecutorOverride{User: security.RootUserName()}, + "DELETE FROM system.comments WHERE object_id = $1 AND sub_id = $2 AND "+ + "type = $3;", + id, + subID, + commentType, + ) + return err +} + +func (cu commentUpdater) oidFromConstraint( + desc catalog.TableDescriptor, + schemaName string, + constraintName string, + constraintType scpb.ConstraintType, +) *tree.DOid { + switch constraintType { + case scpb.ConstraintType_FK: + for _, fk := range desc.AllActiveAndInactiveForeignKeys() { + if fk.Name == constraintName { + return cu.oidBuilder.ForeignKeyConstraintOid( + desc.GetParentID(), + schemaName, + desc.GetID(), + fk, + ) + } + } + case scpb.ConstraintType_PrimaryKey: + for _, idx := range desc.AllIndexes() { + if idx.GetName() == constraintName { + cu.oidBuilder.UniqueConstraintOid( + desc.GetParentID(), + schemaName, + desc.GetID(), + idx.GetID(), + ) + } + } + case scpb.ConstraintType_UniqueWithoutIndex: + for _, unique := range desc.GetUniqueWithoutIndexConstraints() { + if unique.GetName() == constraintName { + return cu.oidBuilder.UniqueWithoutIndexConstraintOid( + desc.GetParentID(), + schemaName, + desc.GetID(), + &unique, + ) + } + } + case scpb.ConstraintType_Check: + for _, check := range desc.GetChecks() { + if check.Name == constraintName { + return cu.oidBuilder.CheckConstraintOid( + desc.GetParentID(), + schemaName, + desc.GetID(), + check, + ) + } + } + } + return nil +} + +// UpsertConstraintComment implements scexec.CommentUpdater. +func (cu commentUpdater) UpsertConstraintComment( + desc catalog.TableDescriptor, + schemaName string, + constraintName string, + constraintType scpb.ConstraintType, + comment string, +) error { + oid := cu.oidFromConstraint(desc, schemaName, constraintName, constraintType) + // Constraint was not found. + if oid == nil { + return nil + } + return cu.UpsertDescriptorComment(int64(oid.DInt), 0, keys.ConstraintCommentType, comment) +} + +// DeleteConstraintComment implements scexec.CommentUpdater. +func (cu commentUpdater) DeleteConstraintComment( + desc catalog.TableDescriptor, + schemaName string, + constraintName string, + constraintType scpb.ConstraintType, +) error { + oid := cu.oidFromConstraint(desc, schemaName, constraintName, constraintType) + // Constraint was not found. + if oid == nil { + return nil + } + return cu.DeleteDescriptorComment(int64(oid.DInt), 0, keys.ConstraintCommentType) +} diff --git a/pkg/sql/commenter/comment_updater_factory.go b/pkg/sql/commenter/comment_updater_factory.go new file mode 100644 index 000000000000..3be31c8e752d --- /dev/null +++ b/pkg/sql/commenter/comment_updater_factory.go @@ -0,0 +1,53 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package commenter + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" + "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" +) + +// MakeConstraintOidBuilderFn creates a ConstraintOidBuilder. +type MakeConstraintOidBuilderFn func() ConstraintOidBuilder + +// CommentUpdaterFactory used to construct a commenter.CommentUpdater, which +// can be used to update comments on schema objects. +type CommentUpdaterFactory struct { + ieFactory sqlutil.SessionBoundInternalExecutorFactory + makeConstraintOidBuilder MakeConstraintOidBuilderFn +} + +// NewCommentUpdaterFactory creates a new comment updater factory. +func NewCommentUpdaterFactory( + ieFactory sqlutil.SessionBoundInternalExecutorFactory, + makeConstraintOidBuilder MakeConstraintOidBuilderFn, +) scexec.CommentUpdaterFactory { + return CommentUpdaterFactory{ + ieFactory: ieFactory, + makeConstraintOidBuilder: makeConstraintOidBuilder, + } +} + +// NewCommentUpdater creates a new comment updater, which can be used to +// create / destroy comments associated with different schema objects. +func (cf CommentUpdaterFactory) NewCommentUpdater( + ctx context.Context, txn *kv.Txn, sessionData *sessiondata.SessionData, +) scexec.CommentUpdater { + return commentUpdater{ + txn: txn, + ie: cf.ieFactory(ctx, sessionData), + oidBuilder: cf.makeConstraintOidBuilder(), + } +} diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index 274ad8b90781..22ddd4d0757f 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -37,7 +37,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/idxusage" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scerrors" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scrun" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -804,7 +803,7 @@ func (s *Server) newConnExecutor( execTestingKnobs: s.GetExecutorConfig().TestingKnobs, }, memMetrics: memMetrics, - planner: planner{execCfg: s.cfg, alloc: &rowenc.DatumAlloc{}}, + planner: planner{execCfg: s.cfg, alloc: &tree.DatumAlloc{}}, // ctxHolder will be reset at the start of run(). We only define // it here so that an early call to close() doesn't panic. @@ -2483,6 +2482,7 @@ func (ex *connExecutor) initEvalCtx(ctx context.Context, evalCtx *extendedEvalCo Planner: p, PrivilegedAccessor: p, SessionAccessor: p, + JobExecContext: p, ClientNoticeSender: p, Sequence: p, Tenant: p, @@ -2948,6 +2948,7 @@ func (ex *connExecutor) notifyStatsRefresherOfNewTables(ctx context.Context) { func (ex *connExecutor) runPreCommitStages(ctx context.Context) error { scs := &ex.extraTxnState.schemaChangerState deps := newSchemaChangerTxnRunDependencies( + ex.planner.SessionData(), ex.planner.User(), ex.server.cfg, ex.planner.txn, diff --git a/pkg/sql/conn_executor_exec.go b/pkg/sql/conn_executor_exec.go index ea4297c5fc8b..64cc1d999b99 100644 --- a/pkg/sql/conn_executor_exec.go +++ b/pkg/sql/conn_executor_exec.go @@ -262,22 +262,42 @@ func (ex *connExecutor) execStmtInOpenState( ast := parserStmt.AST ctx = withStatement(ctx, ast) + makeErrEvent := func(err error) (fsm.Event, fsm.EventPayload, error) { + ev, payload := ex.makeErrEvent(err, ast) + return ev, payload, nil + } + var stmt Statement queryID := ex.generateID() // Update the deadline on the transaction based on the collections. err := ex.extraTxnState.descCollection.MaybeUpdateDeadline(ctx, ex.state.mu.txn) if err != nil { - ev, pl := ex.makeErrEvent(err, ast) - return ev, pl, nil + return makeErrEvent(err) } + os := ex.machine.CurState().(stateOpen) + isNextCmdSync := false isExtendedProtocol := prepared != nil if isExtendedProtocol { stmt = makeStatementFromPrepared(prepared, queryID) + // Only check for Sync in the extended protocol. In the simple protocol, + // Sync is meaningless, so it's OK to let isNextCmdSync default to false. + isNextCmdSync, err = ex.stmtBuf.isNextCmdSync() + if err != nil { + return makeErrEvent(err) + } } else { stmt = makeStatement(parserStmt, queryID) } + // In some cases, we need to turn off autocommit behavior here. The postgres + // docs say that commands in the extended protocol are all treated as an + // implicit transaction that does not get committed until a Sync message is + // received. However, if we are executing a statement that is immediately + // followed by Sync (which is the common case), then we still can auto-commit, + // which allows the "insert fast path" (1PC optimization) to be used. + canAutoCommit := os.ImplicitTxn.Get() && (!isExtendedProtocol || isNextCmdSync) + ex.incrementStartedStmtCounter(ast) defer func() { if retErr == nil && !payloadHasError(retPayload) { @@ -289,8 +309,6 @@ func (ex *connExecutor) execStmtInOpenState( ex.state.mu.stmtCount++ ex.state.mu.Unlock() - os := ex.machine.CurState().(stateOpen) - var timeoutTicker *time.Timer queryTimedOut := false // doneAfterFunc will be allocated only when timeoutTicker is non-nil. @@ -355,11 +373,6 @@ func (ex *connExecutor) execStmtInOpenState( } }(ctx, res) - makeErrEvent := func(err error) (fsm.Event, fsm.EventPayload, error) { - ev, payload := ex.makeErrEvent(err, ast) - return ev, payload, nil - } - p := &ex.planner stmtTS := ex.server.cfg.Clock.PhysicalTime() ex.statsCollector.Reset(ex.applicationStats, ex.phaseTimes) @@ -497,12 +510,7 @@ func (ex *connExecutor) execStmtInOpenState( if retEv != nil || retErr != nil { return } - // The postgres docs say that commands in the extended protocol are - // all treated as an implicit transaction that does not get committed - // until a Sync message is received. The prepared statement will only be - // nil if we are in the simple protocol; for the extended protocol the - // commit occurs when Sync is received. - if os.ImplicitTxn.Get() && !isExtendedProtocol { + if canAutoCommit { retEv, retPayload = ex.handleAutoCommit(ctx, ast) return } @@ -701,11 +709,7 @@ func (ex *connExecutor) execStmtInOpenState( p.stmt = stmt p.cancelChecker.Reset(ctx) - // We need to turn off autocommit behavior here so that the "insert fast path" - // does not get triggered. The postgres docs say that commands in the extended - // protocol are all treated as an implicit transaction that does not get - // committed until a Sync message is received. - p.autoCommit = os.ImplicitTxn.Get() && !isExtendedProtocol && !ex.server.cfg.TestingKnobs.DisableAutoCommit + p.autoCommit = canAutoCommit && !ex.server.cfg.TestingKnobs.DisableAutoCommit var stmtThresholdSpan *tracing.Span alreadyRecording := ex.transitionCtx.sessionTracing.Enabled() diff --git a/pkg/sql/conn_io.go b/pkg/sql/conn_io.go index 18a77307d74a..4c9a6fbf5237 100644 --- a/pkg/sql/conn_io.go +++ b/pkg/sql/conn_io.go @@ -442,6 +442,36 @@ func (buf *StmtBuf) CurCmd() (Command, CmdPos, error) { } } +// isNextCmdSync peeks at the next command, and returns true if it is a Sync +// message. If there is not another command in the buffer already, then false +// is returned. The position is reset before returning, so this does not +// affect the curPos. +// +// If the buffer has previously been Close()d, or is closed while this is +// blocked, io.EOF is returned. +func (buf *StmtBuf) isNextCmdSync() (bool, error) { + buf.mu.Lock() + prev := buf.mu.curPos + buf.mu.curPos++ + defer func() { + buf.mu.curPos = prev + buf.mu.Unlock() + }() + if buf.mu.closed { + return false, io.EOF + } + curPos := buf.mu.curPos + cmdIdx, err := buf.translatePosLocked(curPos) + if err != nil { + return false, err + } + if cmdIdx < buf.mu.data.Len() { + _, isSync := buf.mu.data.Get(cmdIdx).(Sync) + return isSync, nil + } + return false, nil +} + // translatePosLocked translates an absolute position of a command (counting // from the connection start) to the index of the respective command in the // buffer (so, it returns an index relative to the start of the buffer). diff --git a/pkg/sql/copy.go b/pkg/sql/copy.go index 8dabac401c64..d538188c1e1f 100644 --- a/pkg/sql/copy.go +++ b/pkg/sql/copy.go @@ -29,7 +29,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirebase" "github.com/cockroachdb/cockroach/pkg/sql/privilege" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -123,7 +122,7 @@ func newCopyMachine( format: n.Options.CopyFormat, txnOpt: txnOpt, // The planner will be prepared before use. - p: planner{execCfg: execCfg, alloc: &rowenc.DatumAlloc{}}, + p: planner{execCfg: execCfg, alloc: &tree.DatumAlloc{}}, execInsertPlan: execInsertPlan, } diff --git a/pkg/sql/copy_file_upload.go b/pkg/sql/copy_file_upload.go index 7084cc03d68f..27df7093c166 100644 --- a/pkg/sql/copy_file_upload.go +++ b/pkg/sql/copy_file_upload.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/lexbase" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirebase" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" @@ -89,7 +88,7 @@ func newFileUploadMachine( c := ©Machine{ conn: conn, // The planner will be prepared before use. - p: planner{execCfg: execCfg, alloc: &rowenc.DatumAlloc{}}, + p: planner{execCfg: execCfg, alloc: &tree.DatumAlloc{}}, } f = &fileUploadMachine{ c: c, diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index fe0731496c89..ff3ef23a2e89 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -953,13 +953,13 @@ CREATE TABLE crdb_internal.node_statement_statistics ( exec_node_ids INT[] NOT NULL )`, populate: func(ctx context.Context, p *planner, _ catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - hasViewActivity, err := p.HasRoleOption(ctx, roleoption.VIEWACTIVITY) + hasViewActivityOrViewActivityRedacted, err := p.HasViewActivityOrViewActivityRedactedRole(ctx) if err != nil { return err } - if !hasViewActivity { + if !hasViewActivityOrViewActivityRedacted { return pgerror.Newf(pgcode.InsufficientPrivilege, - "user %s does not have %s privilege", p.User(), roleoption.VIEWACTIVITY) + "user %s does not have %s or %s privilege", p.User(), roleoption.VIEWACTIVITY, roleoption.VIEWACTIVITYREDACTED) } sqlStats, err := getSQLStats(p, "crdb_internal.node_statement_statistics") @@ -1089,14 +1089,15 @@ CREATE TABLE crdb_internal.node_transaction_statistics ( ) `, populate: func(ctx context.Context, p *planner, _ catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - hasViewActivity, err := p.HasRoleOption(ctx, roleoption.VIEWACTIVITY) + hasViewActivityOrhasViewActivityRedacted, err := p.HasViewActivityOrViewActivityRedactedRole(ctx) if err != nil { return err } - if !hasViewActivity { + if !hasViewActivityOrhasViewActivityRedacted { return pgerror.Newf(pgcode.InsufficientPrivilege, - "user %s does not have %s privilege", p.User(), roleoption.VIEWACTIVITY) + "user %s does not have %s or %s privilege", p.User(), roleoption.VIEWACTIVITY, roleoption.VIEWACTIVITYREDACTED) } + sqlStats, err := getSQLStats(p, "crdb_internal.node_transaction_statistics") if err != nil { return err @@ -1411,11 +1412,13 @@ CREATE TABLE crdb_internal.cluster_settings ( "crdb_internal.cluster_settings", roleoption.MODIFYCLUSTERSETTING) } } - for _, k := range settings.Keys() { + for _, k := range settings.Keys(p.ExecCfg().Codec.ForSystemTenant()) { if !hasAdmin && settings.AdminOnly(k) { continue } - setting, _ := settings.Lookup(k, settings.LookupForLocalAccess) + setting, _ := settings.Lookup( + k, settings.LookupForLocalAccess, p.ExecCfg().Codec.ForSystemTenant(), + ) strVal := setting.String(&p.ExecCfg().Settings.SV) isPublic := setting.Visibility() == settings.Public desc := setting.Description() @@ -1584,11 +1587,11 @@ func (p *planner) makeSessionsRequest(ctx context.Context) (serverpb.ListSession if hasAdmin { req.Username = "" } else { - hasViewActivity, err := p.HasRoleOption(ctx, roleoption.VIEWACTIVITY) + hasViewActivityOrhasViewActivityRedacted, err := p.HasViewActivityOrViewActivityRedactedRole(ctx) if err != nil { return serverpb.ListSessionsRequest{}, err } - if hasViewActivity { + if hasViewActivityOrhasViewActivityRedacted { req.Username = "" } } @@ -3848,7 +3851,7 @@ func addPartitioningRows( } colNames := tree.NewDString(buf.String()) - var datumAlloc rowenc.DatumAlloc + var datumAlloc tree.DatumAlloc // We don't need real prefixes in the DecodePartitionTuple calls because we // only use the tree.Datums part of the output. @@ -4311,7 +4314,7 @@ CREATE TABLE crdb_internal.predefined_comments ( populate: func( ctx context.Context, p *planner, dbContext catalog.DatabaseDescriptor, addRow func(...tree.Datum) error, ) error { - tableCommentKey := tree.NewDInt(keys.TableCommentType) + tableCommentKey := tree.NewDInt(tree.DInt(keys.TableCommentType)) vt := p.getVirtualTabler() vEntries := vt.getSchemas() vSchemaNames := vt.getSchemaNames() diff --git a/pkg/sql/create_index.go b/pkg/sql/create_index.go index 5f74856199be..284031d7d4c5 100644 --- a/pkg/sql/create_index.go +++ b/pkg/sql/create_index.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/geo/geoindex" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" @@ -87,11 +88,11 @@ func (p *planner) CreateIndex(ctx context.Context, n *tree.CreateIndex) (planNod return &createIndexNode{tableDesc: tableDesc, n: n}, nil } -// setupConstraintForShard adds a check constraint ensuring that the shard +// maybeSetupConstraintForShard adds a check constraint ensuring that the shard // column's value is within [0..ShardBuckets-1]. This method is called when a // `CREATE INDEX`/`ALTER PRIMARY KEY` statement is issued for the creation of a // sharded index that *does not* re-use a pre-existing shard column. -func (p *planner) setupConstraintForShard( +func (p *planner) maybeSetupConstraintForShard( ctx context.Context, tableDesc *tabledesc.Mutable, shardCol catalog.Column, buckets int32, ) error { // Assign an ID to the newly-added shard column, which is needed for the creation @@ -104,31 +105,26 @@ func (p *planner) setupConstraintForShard( if err != nil { return err } - info, err := tableDesc.GetConstraintInfo() + ckBuilder := schemaexpr.MakeCheckConstraintBuilder(ctx, p.tableName, tableDesc, &p.semaCtx) + ckDesc, err := ckBuilder.Build(ckDef) if err != nil { return err } - inuseNames := make(map[string]struct{}, len(info)) - for k := range info { - inuseNames[k] = struct{}{} - } - - ckBuilder := schemaexpr.MakeCheckConstraintBuilder(ctx, p.tableName, tableDesc, &p.semaCtx) - ckName, err := ckBuilder.DefaultName(ckDef.Expr) + curConstraintInfos, err := tableDesc.GetConstraintInfo() if err != nil { return err } // Avoid creating duplicate check constraints. - if _, ok := inuseNames[ckName]; !ok { - ck, err := ckBuilder.Build(ckDef) - if err != nil { - return err + for _, info := range curConstraintInfos { + if info.CheckConstraint != nil && info.CheckConstraint.Expr == ckDesc.Expr { + return nil } - ck.Validity = descpb.ConstraintValidity_Validating - tableDesc.AddCheckMutation(ck, descpb.DescriptorMutation_ADD) } + + ckDesc.Validity = descpb.ConstraintValidity_Validating + tableDesc.AddCheckMutation(ckDesc, descpb.DescriptorMutation_ADD) return nil } @@ -225,7 +221,7 @@ func makeIndexDescriptor( if tableDesc.IsLocalityRegionalByRow() { return nil, hashShardedIndexesOnRegionalByRowError() } - shardCol, newColumns, newColumn, err := setupShardedIndex( + shardCol, newColumns, err := setupShardedIndex( params.ctx, params.EvalContext(), ¶ms.p.semaCtx, @@ -239,10 +235,10 @@ func makeIndexDescriptor( return nil, err } columns = newColumns - if newColumn { - if err := params.p.setupConstraintForShard(params.ctx, tableDesc, shardCol, indexDesc.Sharded.ShardBuckets); err != nil { - return nil, err - } + if err := params.p.maybeSetupConstraintForShard( + params.ctx, tableDesc, shardCol, indexDesc.Sharded.ShardBuckets, + ); err != nil { + return nil, err } } @@ -480,9 +476,10 @@ var hashShardedIndexesDisabledError = pgerror.Newf(pgcode.FeatureNotSupported, "hash sharded indexes require the experimental_enable_hash_sharded_indexes session variable") // setupShardedIndex creates a shard column for the given index descriptor. It -// returns the shard column, the new column list for the index, and a boolean -// which is true if the shard column was newly created. If the shard column is -// new, it is added to tableDesc. +// returns the shard column and the new column list for the index. If the shard +// column is new, either of the following happens: +// (1) the column is added to tableDesc if it's a new table; +// (2) a column mutation is added to tableDesc if the table is not new. func setupShardedIndex( ctx context.Context, evalCtx *tree.EvalContext, @@ -493,9 +490,9 @@ func setupShardedIndex( tableDesc *tabledesc.Mutable, indexDesc *descpb.IndexDescriptor, isNewTable bool, -) (shard catalog.Column, newColumns tree.IndexElemList, newColumn bool, err error) { +) (shard catalog.Column, newColumns tree.IndexElemList, err error) { if !shardedIndexEnabled { - return nil, nil, false, hashShardedIndexesDisabledError + return nil, nil, hashShardedIndexesDisabledError } colNames := make([]string, 0, len(columns)) @@ -504,25 +501,26 @@ func setupShardedIndex( } buckets, err := tabledesc.EvalShardBucketCount(ctx, semaCtx, evalCtx, bucketsExpr) if err != nil { - return nil, nil, false, err + return nil, nil, err } - shardCol, newColumn, err := maybeCreateAndAddShardCol(int(buckets), tableDesc, + shardCol, err := maybeCreateAndAddShardCol(int(buckets), tableDesc, colNames, isNewTable) + if err != nil { - return nil, nil, false, err + return nil, nil, err } shardIdxElem := tree.IndexElem{ Column: tree.Name(shardCol.GetName()), Direction: tree.Ascending, } newColumns = append(tree.IndexElemList{shardIdxElem}, columns...) - indexDesc.Sharded = descpb.ShardedDescriptor{ + indexDesc.Sharded = catpb.ShardedDescriptor{ IsSharded: true, Name: shardCol.GetName(), ShardBuckets: buckets, ColumnNames: colNames, } - return shardCol, newColumns, newColumn, nil + return shardCol, newColumns, nil } // maybeCreateAndAddShardCol adds a new hidden computed shard column (or its mutation) to @@ -530,10 +528,10 @@ func setupShardedIndex( // buckets. func maybeCreateAndAddShardCol( shardBuckets int, desc *tabledesc.Mutable, colNames []string, isNewTable bool, -) (col catalog.Column, created bool, err error) { +) (col catalog.Column, err error) { shardColDesc, err := makeShardColumnDesc(colNames, shardBuckets) if err != nil { - return nil, false, err + return nil, err } existingShardCol, err := desc.FindColumnWithName(tree.Name(shardColDesc.Name)) if err == nil && !existingShardCol.Dropped() { @@ -543,14 +541,14 @@ func maybeCreateAndAddShardCol( if !existingShardCol.IsHidden() { // The user managed to reverse-engineer our crazy shard column name, so // we'll return an error here rather than try to be tricky. - return nil, false, pgerror.Newf(pgcode.DuplicateColumn, + return nil, pgerror.Newf(pgcode.DuplicateColumn, "column %s already specified; can't be used for sharding", shardColDesc.Name) } - return existingShardCol, false, nil + return existingShardCol, nil } columnIsUndefined := sqlerrors.IsUndefinedColumnError(err) if err != nil && !columnIsUndefined { - return nil, false, err + return nil, err } if columnIsUndefined || existingShardCol.Dropped() { if isNewTable { @@ -558,10 +556,9 @@ func maybeCreateAndAddShardCol( } else { desc.AddColumnMutation(shardColDesc, descpb.DescriptorMutation_ADD) } - created = true } shardCol, err := desc.FindColumnWithName(tree.Name(shardColDesc.Name)) - return shardCol, created, err + return shardCol, err } func (n *createIndexNode) startExec(params runParams) error { diff --git a/pkg/sql/create_role.go b/pkg/sql/create_role.go index 02b2bbcd7247..a84f97d93c3b 100644 --- a/pkg/sql/create_role.go +++ b/pkg/sql/create_role.go @@ -282,7 +282,7 @@ func (p *planner) checkPasswordAndGetHash( "Passwords must be %d characters or longer.", minLength) } - hashedPassword, err = security.HashPassword(ctx, password) + hashedPassword, err = security.HashPassword(ctx, &st.SV, password) if err != nil { return hashedPassword, err } diff --git a/pkg/sql/create_schema.go b/pkg/sql/create_schema.go index c268438913eb..9c0b595213d3 100644 --- a/pkg/sql/create_schema.go +++ b/pkg/sql/create_schema.go @@ -102,10 +102,43 @@ func CreateUserDefinedSchemaDescriptor( return nil, nil, err } + owner := user + if !n.AuthRole.Undefined() { + exists, err := RoleExists(ctx, execCfg, txn, authRole) + if err != nil { + return nil, nil, err + } + if !exists { + return nil, nil, pgerror.Newf(pgcode.UndefinedObject, "role/user %q does not exist", + n.AuthRole) + } + owner = authRole + } + + desc, privs, err := CreateSchemaDescriptorWithPrivileges(ctx, execCfg.DB, execCfg.Codec, db, schemaName, user, owner, allocateID) + if err != nil { + return nil, nil, err + } + + return desc, privs, nil +} + +// CreateSchemaDescriptorWithPrivileges creates a new schema descriptor with +// the provided name and privileges. +func CreateSchemaDescriptorWithPrivileges( + ctx context.Context, + kvDB *kv.DB, + codec keys.SQLCodec, + db catalog.DatabaseDescriptor, + schemaName string, + user, owner security.SQLUsername, + allocateID bool, +) (*schemadesc.Mutable, *descpb.PrivilegeDescriptor, error) { // Create the ID. var id descpb.ID + var err error if allocateID { - id, err = catalogkv.GenerateUniqueDescID(ctx, execCfg.DB, execCfg.Codec) + id, err = catalogkv.GenerateUniqueDescID(ctx, kvDB, codec) if err != nil { return nil, nil, err } @@ -120,19 +153,7 @@ func CreateUserDefinedSchemaDescriptor( db.GetPrivileges(), ) - if !n.AuthRole.Undefined() { - exists, err := RoleExists(ctx, execCfg, txn, authRole) - if err != nil { - return nil, nil, err - } - if !exists { - return nil, nil, pgerror.Newf(pgcode.UndefinedObject, "role/user %q does not exist", - n.AuthRole) - } - privs.SetOwner(authRole) - } else { - privs.SetOwner(user) - } + privs.SetOwner(owner) // Create the SchemaDescriptor. desc := schemadesc.NewBuilder(&descpb.SchemaDescriptor{ diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index a8a49046d010..5c57239f29a1 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -1290,9 +1290,11 @@ func NewTableDesc( if n.Locality != nil && regionConfig == nil && !opts.bypassLocalityOnNonMultiRegionDatabaseCheck { - return nil, pgerror.Newf( + return nil, errors.WithHint(pgerror.Newf( pgcode.InvalidTableDefinition, "cannot set LOCALITY on a table in a database that is not multi-region enabled", + ), + "database must first be multi-region enabled using ALTER DATABASE ... SET PRIMARY REGION ", ) } @@ -1485,7 +1487,7 @@ func NewTableDesc( if err != nil { return nil, err } - shardCol, _, err := maybeCreateAndAddShardCol(int(buckets), &desc, + shardCol, err := maybeCreateAndAddShardCol(int(buckets), &desc, []string{string(d.Name)}, true, /* isNewTable */ ) if err != nil { @@ -1597,7 +1599,7 @@ func NewTableDesc( if n.PartitionByTable.ContainsPartitions() { return nil, pgerror.New(pgcode.FeatureNotSupported, "sharded indexes don't support partitioning") } - shardCol, newColumns, newColumn, err := setupShardedIndex( + shardCol, newColumns, err := setupShardedIndex( ctx, evalCtx, semaCtx, @@ -1610,18 +1612,39 @@ func NewTableDesc( if err != nil { return nil, err } - if newColumn { - buckets, err := tabledesc.EvalShardBucketCount(ctx, semaCtx, evalCtx, d.Sharded.ShardBuckets) - if err != nil { - return nil, err - } - checkConstraint, err := makeShardCheckConstraintDef(int(buckets), shardCol) - if err != nil { - return nil, err + + buckets, err := tabledesc.EvalShardBucketCount(ctx, semaCtx, evalCtx, d.Sharded.ShardBuckets) + if err != nil { + return nil, err + } + checkConstraint, err := makeShardCheckConstraintDef(int(buckets), shardCol) + if err != nil { + return nil, err + } + + // If there is an equivalent check constraint from the CREATE TABLE (should + // be rare since we hide the constraint of shard column), we don't create a + // duplicate one. + ckBuilder := schemaexpr.MakeCheckConstraintBuilder(ctx, n.Table, &desc, semaCtx) + checkConstraintDesc, err := ckBuilder.Build(checkConstraint) + if err != nil { + return nil, err + } + for _, def := range n.Defs { + if inputCheckConstraint, ok := def.(*tree.CheckConstraintTableDef); ok { + inputCheckConstraintDesc, err := ckBuilder.Build(inputCheckConstraint) + if err != nil { + return nil, err + } + if checkConstraintDesc.Expr == inputCheckConstraintDesc.Expr { + return newColumns, nil + } } - n.Defs = append(n.Defs, checkConstraint) - cdd = append(cdd, nil) } + + n.Defs = append(n.Defs, checkConstraint) + cdd = append(cdd, nil) + return newColumns, nil } diff --git a/pkg/sql/delete.go b/pkg/sql/delete.go index 511ac2f5a873..0f8fad0ba5a2 100644 --- a/pkg/sql/delete.go +++ b/pkg/sql/delete.go @@ -177,10 +177,9 @@ func (d *deleteNode) processSourceRow(params runParams, sourceVals tree.Datums) // If result rows need to be accumulated, do it. if d.run.td.rows != nil { - // The new values can include all columns, the construction of the - // values has used execinfra.ScanVisibilityPublicAndNotPublic so the - // values may contain additional columns for every newly dropped column - // not visible. We do not want them to be available for RETURNING. + // The new values can include all columns, so the values may contain + // additional columns for every newly dropped column not visible. We do not + // want them to be available for RETURNING. // // d.run.rows.NumCols() is guaranteed to only contain the requested // public columns. diff --git a/pkg/sql/delete_preserving_index_test.go b/pkg/sql/delete_preserving_index_test.go index a780c133978d..583dbae792a8 100644 --- a/pkg/sql/delete_preserving_index_test.go +++ b/pkg/sql/delete_preserving_index_test.go @@ -126,7 +126,7 @@ func TestDeletePreservingIndexEncoding(t *testing.T) { } // Grab the revision histories for both indices. - prefix := rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, tableDesc, index.ID) + prefix := rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, tableDesc.GetID(), index.ID) prefixEnd := append(prefix, []byte("\xff")...) revisions, err := kvclient.GetAllRevisions(context.Background(), kvDB, prefix, prefixEnd, now, end) diff --git a/pkg/sql/delete_range.go b/pkg/sql/delete_range.go index 82a3b0945a2e..cdfef904de9f 100644 --- a/pkg/sql/delete_range.go +++ b/pkg/sql/delete_range.go @@ -99,8 +99,7 @@ func (d *deleteRangeNode) startExec(params runParams) error { false, /* reverse */ descpb.ScanLockingStrength_FOR_NONE, descpb.ScanLockingWaitPolicy_BLOCK, - 0, /* lockTimeout */ - false, /* isCheck */ + 0, /* lockTimeout */ params.p.alloc, nil, /* memMonitor */ table, diff --git a/pkg/sql/descriptor.go b/pkg/sql/descriptor.go index 38586e3cd3b1..0fa75216f7d6 100644 --- a/pkg/sql/descriptor.go +++ b/pkg/sql/descriptor.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/multiregion" @@ -323,9 +324,7 @@ func TranslateDataPlacement(g tree.DataPlacement) (descpb.DataPlacement, error) } } -func (p *planner) checkRegionIsCurrentlyActive( - ctx context.Context, region descpb.RegionName, -) error { +func (p *planner) checkRegionIsCurrentlyActive(ctx context.Context, region catpb.RegionName) error { liveRegions, err := p.getLiveClusterRegions(ctx) if err != nil { return err @@ -342,7 +341,7 @@ var InitializeMultiRegionMetadataCCL = func( execCfg *ExecutorConfig, liveClusterRegions LiveClusterRegions, survivalGoal tree.SurvivalGoal, - primaryRegion descpb.RegionName, + primaryRegion catpb.RegionName, regions []tree.Name, dataPlacement tree.DataPlacement, ) (*multiregion.RegionConfig, error) { @@ -396,7 +395,7 @@ func (p *planner) maybeInitializeMultiRegionMetadata( p.ExecCfg(), liveRegions, survivalGoal, - descpb.RegionName(primaryRegion), + catpb.RegionName(primaryRegion), regions, placement, ) diff --git a/pkg/sql/distsql/columnar_operators_test.go b/pkg/sql/distsql/columnar_operators_test.go index 6bbbab9ceb50..0cf193b5f663 100644 --- a/pkg/sql/distsql/columnar_operators_test.go +++ b/pkg/sql/distsql/columnar_operators_test.go @@ -122,7 +122,7 @@ func TestAggregatorAgainstProcessor(t *testing.T) { groupingCols[i] = i orderingCols[i].ColIdx = i } - var da rowenc.DatumAlloc + var da tree.DatumAlloc // We need +1 because an entry for index=6 was omitted by mistake. numSupportedAggFns := len(execinfrapb.AggregatorSpec_Func_name) + 1 @@ -344,7 +344,7 @@ func TestAggregatorAgainstProcessor(t *testing.T) { func TestDistinctAgainstProcessor(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - var da rowenc.DatumAlloc + var da tree.DatumAlloc evalCtx := tree.MakeTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) @@ -547,7 +547,7 @@ func TestSorterAgainstProcessor(t *testing.T) { func TestSortChunksAgainstProcessor(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - var da rowenc.DatumAlloc + var da tree.DatumAlloc st := cluster.MakeTestingClusterSettings() evalCtx := tree.MakeTestingEvalContext(st) defer evalCtx.Stop(context.Background()) @@ -817,7 +817,7 @@ func generateEqualityColumns(rng *rand.Rand, nCols int, nEqCols int) []uint32 { func TestMergeJoinerAgainstProcessor(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - var da rowenc.DatumAlloc + var da tree.DatumAlloc evalCtx := tree.MakeTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) diff --git a/pkg/sql/distsql/server.go b/pkg/sql/distsql/server.go index 96e1bcd8b801..144b22d61725 100644 --- a/pkg/sql/distsql/server.go +++ b/pkg/sql/distsql/server.go @@ -408,8 +408,19 @@ func (ds *ServerImpl) setupFlow( // that have no remote flows and also no concurrency, the txn comes from // localState.Txn. Otherwise, we create a txn based on the request's // LeafTxnInputState. + useLeaf := false + for _, proc := range req.Flow.Processors { + if jr := proc.Core.JoinReader; jr != nil { + if !jr.MaintainOrdering && jr.IsIndexJoin() { + // Index joins when ordering doesn't have to be maintained are + // executed via the Streamer API that has concurrency. + useLeaf = true + break + } + } + } var txn *kv.Txn - if localState.IsLocal && !f.ConcurrentTxnUse() { + if localState.IsLocal && !f.ConcurrentTxnUse() && !useLeaf { txn = localState.Txn } else { // If I haven't created the leaf already, do it now. @@ -468,21 +479,13 @@ func (ds *ServerImpl) newFlowContext( // If we were passed a descs.Collection to use, then take it. In this case, // the caller will handle releasing the used descriptors, so we don't need // to cleanup the descriptors when cleaning up the flow. - flowCtx.TypeResolverFactory = &descs.DistSQLTypeResolverFactory{ - Descriptors: localState.Collection, - CleanupFunc: func(ctx context.Context) {}, - } + flowCtx.Descriptors = localState.Collection } else { // If we weren't passed a descs.Collection, then make a new one. We are // responsible for cleaning it up and releasing any accessed descriptors // on flow cleanup. - collection := ds.CollectionFactory.NewCollection(descs.NewTemporarySchemaProvider(evalCtx.SessionDataStack)) - flowCtx.TypeResolverFactory = &descs.DistSQLTypeResolverFactory{ - Descriptors: collection, - CleanupFunc: func(ctx context.Context) { - collection.ReleaseAll(ctx) - }, - } + flowCtx.Descriptors = ds.CollectionFactory.NewCollection(descs.NewTemporarySchemaProvider(evalCtx.SessionDataStack)) + flowCtx.IsDescriptorsCleanupRequired = true } return flowCtx } diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go index 3ae1e633b7a6..4fd5757a5151 100644 --- a/pkg/sql/distsql_physical_planner.go +++ b/pkg/sql/distsql_physical_planner.go @@ -193,6 +193,11 @@ func (dsp *DistSQLPlanner) shouldPlanTestMetadata() bool { return dsp.distSQLSrv.TestingKnobs.MetadataTestLevel >= dsp.metadataTestTolerance } +// GetNodeInfo gets a node descriptor by node ID. +func (dsp *DistSQLPlanner) GetNodeInfo(nodeID roachpb.NodeID) (*roachpb.NodeDescriptor, error) { + return dsp.nodeDescs.GetNodeDescriptor(nodeID) +} + // SetNodeInfo sets the planner's node descriptor. // The first call to SetNodeInfo leads to the construction of the SpanResolver. func (dsp *DistSQLPlanner) SetNodeInfo(desc roachpb.NodeDescriptor) { @@ -1102,17 +1107,18 @@ func getIndexIdx(index catalog.Index, desc catalog.TableDescriptor) (uint32, err func initTableReaderSpec( n *scanNode, ) (*execinfrapb.TableReaderSpec, execinfrapb.PostProcessSpec, error) { + if n.isCheck { + return nil, execinfrapb.PostProcessSpec{}, errors.AssertionFailedf("isCheck no longer supported") + } s := physicalplan.NewTableReaderSpec() *s = execinfrapb.TableReaderSpec{ Table: *n.desc.TableDesc(), Reverse: n.reverse, - IsCheck: n.isCheck, - Visibility: n.colCfg.visibility, LockingStrength: n.lockingStrength, LockingWaitPolicy: n.lockingWaitPolicy, HasSystemColumns: n.containsSystemColumns, } - if vc := getInvertedColumn(n.colCfg.invertedColumn, n.cols); vc != nil { + if vc := getInvertedColumn(n.colCfg.invertedColumnID, n.cols); vc != nil { s.InvertedColumn = vc.ColumnDesc() } @@ -1122,13 +1128,6 @@ func initTableReaderSpec( } s.IndexIdx = indexIdx - // When a TableReader is running scrub checks, do not allow a - // post-processor. This is because the outgoing stream is a fixed - // format (rowexec.ScrubTypes). - if n.isCheck { - return s, execinfrapb.PostProcessSpec{}, nil - } - var post execinfrapb.PostProcessSpec if n.hardLimit != 0 { post.Limit = uint64(n.hardLimit) @@ -1139,19 +1138,14 @@ func initTableReaderSpec( } // getInvertedColumn returns the column in cols with ID matching -// invertedColumn.colID. -func getInvertedColumn( - invertedColumn *struct { - colID tree.ColumnID - typ *types.T - }, cols []catalog.Column, -) catalog.Column { - if invertedColumn == nil { +// invertedColumnID. +func getInvertedColumn(invertedColumnID tree.ColumnID, cols []catalog.Column) catalog.Column { + if invertedColumnID == 0 { return nil } for i := range cols { - if tree.ColumnID(cols[i].GetID()) == invertedColumn.colID { + if cols[i].GetID() == invertedColumnID { return cols[i] } } @@ -1159,33 +1153,20 @@ func getInvertedColumn( } // tableOrdinal returns the index of a column with the given ID. -func tableOrdinal( - desc catalog.TableDescriptor, colID descpb.ColumnID, visibility execinfrapb.ScanVisibility, -) int { +func tableOrdinal(desc catalog.TableDescriptor, colID descpb.ColumnID) int { col, _ := desc.FindColumnWithID(colID) - if col != nil && (col.IsSystemColumn() || visibility == execinfra.ScanVisibilityPublicAndNotPublic || col.Public()) { - return col.Ordinal() + if col == nil { + panic(errors.AssertionFailedf("column %d not in desc.Columns", colID)) } - - panic(errors.AssertionFailedf("column %d not in desc.Columns", colID)) -} - -func highestTableOrdinal(desc catalog.TableDescriptor, visibility execinfrapb.ScanVisibility) int { - highest := len(desc.PublicColumns()) - 1 - if visibility == execinfra.ScanVisibilityPublicAndNotPublic { - highest = len(desc.DeletableColumns()) - 1 - } - return highest + return col.Ordinal() } // toTableOrdinals returns a mapping from column ordinals in cols to table // reader column ordinals. -func toTableOrdinals( - cols []catalog.Column, desc catalog.TableDescriptor, visibility execinfrapb.ScanVisibility, -) []int { +func toTableOrdinals(cols []catalog.Column, desc catalog.TableDescriptor) []int { res := make([]int, len(cols)) for i := range res { - res[i] = tableOrdinal(desc, cols[i].GetID(), visibility) + res[i] = tableOrdinal(desc, cols[i].GetID()) } return res } @@ -1298,12 +1279,9 @@ func (dsp *DistSQLPlanner) CheckNodeHealthAndVersion( func (dsp *DistSQLPlanner) createTableReaders( planCtx *PlanningCtx, n *scanNode, ) (*PhysicalPlan, error) { - if n.colCfg.addUnwantedAsHidden { - panic("addUnwantedAsHidden not supported") - } // scanNodeToTableOrdinalMap is a map from scan node column ordinal to // table reader column ordinal. - scanNodeToTableOrdinalMap := toTableOrdinals(n.cols, n.desc, n.colCfg.visibility) + scanNodeToTableOrdinalMap := toTableOrdinals(n.cols, n.desc) spec, post, err := initTableReaderSpec(n) if err != nil { return nil, err @@ -1319,7 +1297,6 @@ func (dsp *DistSQLPlanner) createTableReaders( desc: n.desc, spans: n.spans, reverse: n.reverse, - scanVisibility: n.colCfg.visibility, parallelize: n.parallelize, estimatedRowCount: n.estimatedRowCount, reqOrdering: n.reqOrdering, @@ -1340,7 +1317,6 @@ type tableReaderPlanningInfo struct { desc catalog.TableDescriptor spans []roachpb.Span reverse bool - scanVisibility execinfrapb.ScanVisibility parallelize bool estimatedRowCount uint64 reqOrdering ReqOrdering @@ -1525,11 +1501,7 @@ func (dsp *DistSQLPlanner) planTableReaders( } invertedColumn := tabledesc.FindInvertedColumn(info.desc, info.spec.InvertedColumn) - cols := info.desc.PublicColumns() - returnMutations := info.scanVisibility == execinfra.ScanVisibilityPublicAndNotPublic - if returnMutations { - cols = info.desc.DeletableColumns() - } + cols := info.desc.DeletableColumns() typs := catalog.ColumnTypesWithInvertedCol(cols, invertedColumn) if info.containsSystemColumns { for _, col := range info.desc.SystemColumns() { @@ -1545,16 +1517,10 @@ func (dsp *DistSQLPlanner) planTableReaders( var descColumnIDs util.FastIntMap colID := 0 for _, col := range info.desc.AllColumns() { - var addCol bool - if col.IsSystemColumn() { - // If it is a system column, we want to treat it carefully because - // its ID is a very large number, so adding it into util.FastIntMap - // will incur an allocation. - addCol = info.containsSystemColumns - } else { - addCol = col.Public() || returnMutations - } - if addCol { + // If it is a system column, we want to treat it carefully because + // its ID is a very large number, so adding it into util.FastIntMap + // will incur an allocation. + if info.containsSystemColumns || !col.IsSystemColumn() { descColumnIDs.Set(colID, int(col.GetID())) colID++ } @@ -2264,7 +2230,6 @@ func (dsp *DistSQLPlanner) createPlanForIndexJoin( Table: *n.table.desc.TableDesc(), IndexIdx: 0, Type: descpb.InnerJoin, - Visibility: n.table.colCfg.visibility, LockingStrength: n.table.lockingStrength, LockingWaitPolicy: n.table.lockingWaitPolicy, MaintainOrdering: len(n.reqOrdering) > 0, @@ -2280,7 +2245,7 @@ func (dsp *DistSQLPlanner) createPlanForIndexJoin( plan.PlanToStreamColMap = identityMap(plan.PlanToStreamColMap, len(n.cols)) for i := range n.cols { - ord := tableOrdinal(n.table.desc, n.cols[i].GetID(), n.table.colCfg.visibility) + ord := tableOrdinal(n.table.desc, n.cols[i].GetID()) post.OutputColumns[i] = uint32(ord) } @@ -2320,7 +2285,6 @@ func (dsp *DistSQLPlanner) createPlanForLookupJoin( joinReaderSpec := execinfrapb.JoinReaderSpec{ Table: *n.table.desc.TableDesc(), Type: n.joinType, - Visibility: n.table.colCfg.visibility, LockingStrength: n.table.lockingStrength, LockingWaitPolicy: n.table.lockingWaitPolicy, MaintainOrdering: len(n.reqOrdering) > 0, @@ -2434,13 +2398,12 @@ func mappingHelperForLookupJoins( } for i := range table.cols { outTypes[numLeftCols+i] = table.cols[i].GetType() - ord := tableOrdinal(table.desc, table.cols[i].GetID(), table.colCfg.visibility) + ord := tableOrdinal(table.desc, table.cols[i].GetID()) post.OutputColumns[numLeftCols+i] = uint32(numLeftCols + ord) } if addContinuationCol { outTypes[numOutCols-1] = types.Bool - post.OutputColumns[numOutCols-1] = - uint32(numLeftCols + highestTableOrdinal(table.desc, table.colCfg.visibility) + 1) + post.OutputColumns[numOutCols-1] = uint32(numLeftCols + len(table.desc.DeletableColumns())) } // Map the columns of the lookupJoinNode to the result streams of the @@ -2652,7 +2615,7 @@ func (dsp *DistSQLPlanner) planZigzagJoin( // colCfg.wantedColumns for only the necessary columns in // opt/exec/execbuilder/relational_builder.go, similar to lookup joins. for _, col := range side.cols { - ord := tableOrdinal(side.desc, col.GetID(), execinfra.ScanVisibilityPublic) + ord := tableOrdinal(side.desc, col.GetID()) post.OutputColumns[i] = uint32(colOffset + ord) types[i] = col.GetType() planToStreamColMap[i] = i @@ -3297,7 +3260,7 @@ func (dsp *DistSQLPlanner) createValuesPlan( func (dsp *DistSQLPlanner) createValuesSpecFromTuples( planCtx *PlanningCtx, tuples [][]tree.TypedExpr, resultTypes []*types.T, ) (*execinfrapb.ValuesCoreSpec, error) { - var a rowenc.DatumAlloc + var a tree.DatumAlloc evalCtx := &planCtx.ExtendedEvalCtx.EvalContext numRows := len(tuples) if len(resultTypes) == 0 { @@ -4044,7 +4007,6 @@ func checkScanParallelizationIfLocal( } return true, nil case *scanNode: - prohibitParallelization = n.isCheck if len(n.reqOrdering) == 0 && n.parallelize { hasScanNodeToParallelize = true } diff --git a/pkg/sql/distsql_plan_scrub_physical.go b/pkg/sql/distsql_plan_scrub_physical.go deleted file mode 100644 index 0fc2ef81d61f..000000000000 --- a/pkg/sql/distsql_plan_scrub_physical.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package sql - -import ( - "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" - "github.com/cockroachdb/cockroach/pkg/sql/physicalplan" - "github.com/cockroachdb/cockroach/pkg/sql/rowexec" -) - -// createScrubPhysicalCheck generates a plan for running a physical -// check for an index. The plan consists of TableReaders, with IsCheck -// enabled, that scan an index span. By having IsCheck enabled, the -// TableReaders will only emit errors encountered during scanning -// instead of row data. The plan is finalized. -func (dsp *DistSQLPlanner) createScrubPhysicalCheck( - planCtx *PlanningCtx, n *scanNode, -) (*PhysicalPlan, error) { - spec, _, err := initTableReaderSpec(n) - if err != nil { - return nil, err - } - - spanPartitions, err := dsp.PartitionSpans(planCtx, n.spans) - if err != nil { - return nil, err - } - - corePlacement := make([]physicalplan.ProcessorCorePlacement, len(spanPartitions)) - for i, sp := range spanPartitions { - tr := &execinfrapb.TableReaderSpec{} - *tr = *spec - tr.Spans = sp.Spans - - corePlacement[i].NodeID = sp.Node - corePlacement[i].Core.TableReader = tr - } - - p := planCtx.NewPhysicalPlan() - p.AddNoInputStage(corePlacement, execinfrapb.PostProcessSpec{}, rowexec.ScrubTypes, execinfrapb.Ordering{}) - p.PlanToStreamColMap = identityMapInPlace(make([]int, len(rowexec.ScrubTypes))) - - dsp.FinalizePlan(planCtx, p) - return p, nil -} diff --git a/pkg/sql/distsql_plan_stats.go b/pkg/sql/distsql_plan_stats.go index af8cbc5d9f3e..5968644ee50e 100644 --- a/pkg/sql/distsql_plan_stats.go +++ b/pkg/sql/distsql_plan_stats.go @@ -71,7 +71,7 @@ func (dsp *DistSQLPlanner) createStatsPlan( for _, c := range s.columns { if !tableColSet.Contains(c) { tableColSet.Add(c) - colCfg.wantedColumns = append(colCfg.wantedColumns, tree.ColumnID(c)) + colCfg.wantedColumns = append(colCfg.wantedColumns, c) } } } diff --git a/pkg/sql/distsql_running.go b/pkg/sql/distsql_running.go index 5116625b4eef..e61ca87a4762 100644 --- a/pkg/sql/distsql_running.go +++ b/pkg/sql/distsql_running.go @@ -447,6 +447,16 @@ func (dsp *DistSQLPlanner) Run( localState.HasConcurrency = localState.HasConcurrency || execinfra.HasParallelProcessors(flow) } } + for _, proc := range plan.Processors { + if js := proc.Spec.Core.JoinReader; js != nil { + if !js.MaintainOrdering && js.IsIndexJoin() { + // Index joins when ordering doesn't have to be maintained + // are executed via the Streamer API that has concurrency. + localState.HasConcurrency = true + break + } + } + } } if localState.MustUseLeafTxn() && txn != nil { // Set up leaf txns using the txnCoordMeta if we need to. @@ -615,7 +625,7 @@ type DistSQLReceiver struct { row tree.Datums status execinfra.ConsumerStatus - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc closed bool rangeCache *rangecache.RangeCache diff --git a/pkg/sql/distsql_spec_exec_factory.go b/pkg/sql/distsql_spec_exec_factory.go index 69292fdcd356..2ae20cf4c9b6 100644 --- a/pkg/sql/distsql_spec_exec_factory.go +++ b/pkg/sql/distsql_spec_exec_factory.go @@ -22,7 +22,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec/explain" "github.com/cockroachdb/cockroach/pkg/sql/physicalplan" - "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/span" @@ -230,16 +229,14 @@ func (e *distSQLSpecExecFactory) ConstructScan( // Phase 2: perform the table reader planning. This phase is equivalent to // what DistSQLPlanner.createTableReaders does. - colsToTableOrdinalMap := toTableOrdinals(cols, tabDesc, colCfg.visibility) + colsToTableOrdinalMap := toTableOrdinals(cols, tabDesc) trSpec := physicalplan.NewTableReaderSpec() *trSpec = execinfrapb.TableReaderSpec{ Table: *tabDesc.TableDesc(), Reverse: params.Reverse, - IsCheck: false, - Visibility: colCfg.visibility, HasSystemColumns: scanContainsSystemColumns(&colCfg), } - if vc := getInvertedColumn(colCfg.invertedColumn, cols); vc != nil { + if vc := getInvertedColumn(colCfg.invertedColumnID, cols); vc != nil { trSpec.InvertedColumn = vc.ColumnDesc() } @@ -278,7 +275,6 @@ func (e *distSQLSpecExecFactory) ConstructScan( desc: tabDesc, spans: spans, reverse: params.Reverse, - scanVisibility: colCfg.visibility, parallelize: params.Parallelize, estimatedRowCount: uint64(params.EstimatedRowCount), reqOrdering: ReqOrdering(reqOrdering), @@ -692,11 +688,7 @@ func (e *distSQLSpecExecFactory) constructZigzagJoinSide( desc := table.(*optTable).desc colCfg := scanColumnsConfig{wantedColumns: make([]tree.ColumnID, 0, wantedCols.Len())} for c, ok := wantedCols.Next(0); ok; c, ok = wantedCols.Next(c + 1) { - colCfg.wantedColumns = append(colCfg.wantedColumns, tree.ColumnID(desc.PublicColumns()[c].GetID())) - } - ctx := e.planner.extendedEvalCtx.Ctx() - if err := e.planner.CheckPrivilege(ctx, desc, privilege.SELECT); err != nil { - return zigzagPlanningSide{}, err + colCfg.wantedColumns = append(colCfg.wantedColumns, desc.PublicColumns()[c].GetID()) } cols, err := initColsForScan(desc, colCfg) if err != nil { diff --git a/pkg/sql/drop_index.go b/pkg/sql/drop_index.go index a3c27fdd3a25..b77fbb971faf 100644 --- a/pkg/sql/drop_index.go +++ b/pkg/sql/drop_index.go @@ -16,6 +16,7 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv/kvclient" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" @@ -520,7 +521,13 @@ func (p *planner) dropIndexByName( } tableDesc.RemovePublicNonPrimaryIndex(idxOrdinal) - if err := p.removeIndexComment(ctx, tableDesc.ID, idxDesc.ID); err != nil { + commentUpdater := p.execCfg.CommentUpdaterFactory.NewCommentUpdater( + ctx, + p.txn, + p.SessionData(), + ) + if err := commentUpdater.DeleteDescriptorComment( + int64(tableDesc.ID), int64(idxDesc.ID), keys.IndexCommentType); err != nil { return err } diff --git a/pkg/sql/drop_schema.go b/pkg/sql/drop_schema.go index 0708f2d86bc4..94c593122b57 100644 --- a/pkg/sql/drop_schema.go +++ b/pkg/sql/drop_schema.go @@ -102,7 +102,7 @@ func (p *planner) DropSchema(ctx context.Context, n *tree.DropSchema) (planNode, } if !(isAdmin || hasOwnership) { return nil, pgerror.Newf(pgcode.InsufficientPrivilege, - "permission denied to drop schema %q", sc.GetName()) + "must be owner of schema %q", sc.GetName()) } namesBefore := len(d.objectNamesToDelete) if err := d.collectObjectsInSchema(ctx, p, db, sc); err != nil { diff --git a/pkg/sql/event_log.go b/pkg/sql/event_log.go index 442f75145788..239c7f34478f 100644 --- a/pkg/sql/event_log.go +++ b/pkg/sql/event_log.go @@ -23,7 +23,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -239,17 +238,6 @@ func logEventInternalForSchemaChanges( ) } -// makeCommonSQLEventDetails creates a common exec event -// payload. -func makeCommonSQLEventDetails( - userName string, stmt string, appName string, -) *eventpb.CommonSQLEventDetails { - - return &eventpb.CommonSQLEventDetails{ApplicationName: appName, - User: userName, - Statement: redact.RedactableString(stmt)} -} - // logEventInternalForSQLStatements emits a cluster event on behalf of // a SQL statement, when the point where the event is emitted does not // have access to a (*planner) and the current statement metadata. @@ -319,16 +307,18 @@ func NewSchemaChangerEventLogger( // LogEvent implements the scexec.EventLogger interface. func (l schemaChangerEventLogger) LogEvent( - ctx context.Context, descID descpb.ID, metadata scpb.ElementMetadata, event eventpb.EventPayload, + ctx context.Context, + descID descpb.ID, + details eventpb.CommonSQLEventDetails, + event eventpb.EventPayload, ) error { entry := eventLogEntry{targetID: int32(descID), event: event} - commonPayload := makeCommonSQLEventDetails(metadata.Username, metadata.Statement, metadata.AppName) return logEventInternalForSQLStatements(ctx, l.execCfg, l.txn, l.depth, eventLogOptions{dst: LogEverywhere}, - *commonPayload, + details, entry) } diff --git a/pkg/sql/exec_factory_util.go b/pkg/sql/exec_factory_util.go index 758ad141b59a..5a5b4d586ccb 100644 --- a/pkg/sql/exec_factory_util.go +++ b/pkg/sql/exec_factory_util.go @@ -15,8 +15,6 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" @@ -91,33 +89,18 @@ func constructPlan( // list of descriptor IDs for columns in the given cols set. Columns are // identified by their ordinal position in the table schema. func makeScanColumnsConfig(table cat.Table, cols exec.TableColumnOrdinalSet) scanColumnsConfig { - // Set visibility=execinfra.ScanVisibilityPublicAndNotPublic, since all - // columns in the "cols" set should be projected, regardless of whether - // they're public or non-public. The caller decides which columns to - // include (or not include). Note that when wantedColumns is non-empty, - // the visibility flag will never trigger the addition of more columns. colCfg := scanColumnsConfig{ - wantedColumns: make([]tree.ColumnID, 0, cols.Len()), - wantedColumnsOrdinals: make([]uint32, 0, cols.Len()), - visibility: execinfra.ScanVisibilityPublicAndNotPublic, + wantedColumns: make([]tree.ColumnID, 0, cols.Len()), } for ord, ok := cols.Next(0); ok; ord, ok = cols.Next(ord + 1) { col := table.Column(ord) - colOrd := ord if col.Kind() == cat.Inverted { - typ := col.DatumType() - colOrd = col.InvertedSourceColumnOrdinal() + colCfg.invertedColumnType = col.DatumType() + colOrd := col.InvertedSourceColumnOrdinal() col = table.Column(colOrd) - colCfg.invertedColumn = &struct { - colID tree.ColumnID - typ *types.T - }{ - colID: tree.ColumnID(col.ColID()), - typ: typ, - } + colCfg.invertedColumnID = tree.ColumnID(col.ColID()) } colCfg.wantedColumns = append(colCfg.wantedColumns, tree.ColumnID(col.ColID())) - colCfg.wantedColumnsOrdinals = append(colCfg.wantedColumnsOrdinals, uint32(colOrd)) } return colCfg } @@ -297,7 +280,7 @@ func constructVirtualScan( func scanContainsSystemColumns(colCfg *scanColumnsConfig) bool { for _, id := range colCfg.wantedColumns { - if colinfo.IsColIDSystemColumn(descpb.ColumnID(id)) { + if colinfo.IsColIDSystemColumn(id) { return true } } diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index 5121d85d271a..7d2d69e82a69 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -25,7 +25,7 @@ import ( "strings" "time" - apd "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/col/coldata" @@ -185,12 +185,15 @@ var allowCrossDatabaseSeqReferences = settings.RegisterBoolSetting( false, ).WithPublic() -const secondaryTenantsZoneConfigsEnabledSettingName = "sql.zone_configs.experimental_allow_for_secondary_tenant.enabled" +const secondaryTenantsZoneConfigsEnabledSettingName = "sql.zone_configs.allow_for_secondary_tenant.enabled" // secondaryTenantZoneConfigsEnabled controls if secondary tenants are allowed // to set zone configurations. It has no effect for the system tenant. // // This setting has no effect on zone configurations that have already been set. +// +// TODO(irfansharif): This should be a tenant-readonly setting, possible after +// the work for #73349 is completed. var secondaryTenantZoneConfigsEnabled = settings.RegisterBoolSetting( settings.TenantWritable, secondaryTenantsZoneConfigsEnabledSettingName, @@ -1202,6 +1205,9 @@ type ExecutorConfig struct { // IndexValidator is used to validate indexes. IndexValidator scexec.IndexValidator + // CommentUpdaterFactory is used to issue queries for updating comments. + CommentUpdaterFactory scexec.CommentUpdaterFactory + // ContentionRegistry is a node-level registry of contention events used for // contention observability. ContentionRegistry *contention.Registry @@ -1226,9 +1232,9 @@ type ExecutorConfig struct { // CollectionFactory is used to construct a descs.Collection. CollectionFactory *descs.CollectionFactory - // SpanConfigReconciliationJobDeps are used to drive the span config - // reconciliation job. - SpanConfigReconciliationJobDeps spanconfig.ReconciliationDependencies + // SpanConfigReconciler is used to drive the span config reconciliation job + // and related migrations. + SpanConfigReconciler spanconfig.Reconciler // SpanConfigKVAccessor is used when creating and deleting tenant // records. @@ -3095,6 +3101,8 @@ func scrubStmtStatKey(vt VirtualTabler, key string) (string, bool) { return f.CloseAndGetString(), true } +// formatStmtKeyAsRedactableString given an AST node this function will fully +// qualify names using annotations to format it out into a redactable string. func formatStmtKeyAsRedactableString( vt VirtualTabler, rootAST tree.Statement, ann *tree.Annotations, ) redact.RedactableString { diff --git a/pkg/sql/execinfra/BUILD.bazel b/pkg/sql/execinfra/BUILD.bazel index a66e7b4d930d..7e181c2551aa 100644 --- a/pkg/sql/execinfra/BUILD.bazel +++ b/pkg/sql/execinfra/BUILD.bazel @@ -13,7 +13,6 @@ go_library( "outboxbase.go", "processorsbase.go", "readerbase.go", - "scanbase.go", "server_config.go", "stats.go", "testutils.go", @@ -32,6 +31,7 @@ go_library( "//pkg/jobs", "//pkg/keys", "//pkg/kv", + "//pkg/kv/kvclient/kvcoord:with-mocks", "//pkg/kv/kvclient/rangecache:with-mocks", "//pkg/kv/kvserver/diskmap", "//pkg/kv/kvserver/kvserverbase", @@ -40,11 +40,14 @@ go_library( "//pkg/rpc", "//pkg/rpc/nodedialer", "//pkg/settings/cluster", + "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", + "//pkg/sql/catalog/tabledesc", "//pkg/sql/execinfrapb", "//pkg/sql/row", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/sql/sqlliveness", @@ -82,6 +85,7 @@ go_test( "main_test.go", ], embed = [":execinfra"], + tags = ["no-remote"], deps = [ "//pkg/security", "//pkg/security/securitytest", diff --git a/pkg/sql/execinfra/flow_context.go b/pkg/sql/execinfra/flow_context.go index f653d7f55a48..06d37391c88d 100644 --- a/pkg/sql/execinfra/flow_context.go +++ b/pkg/sql/execinfra/flow_context.go @@ -16,7 +16,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -53,6 +56,20 @@ type FlowCtx struct { // higher-level txn (like backfills). Txn *kv.Txn + // Descriptors is used to look up leased table descriptors and to construct + // transaction bound TypeResolvers to resolve type references during flow + // setup. It is not safe for concurrent use and is intended to be used only + // during flow setup and initialization. The Descriptors object is initialized + // when the FlowContext is created on the gateway node using the planner's + // descs.Collection and is created on remote nodes with a new descs.Collection + // In the latter case, after the flow is complete, all descriptors leased from + // this object must be released. + Descriptors *descs.Collection + + // IsDescriptorsCleanupRequired is set if Descriptors needs to release the + // leases it acquired after the flow is complete. + IsDescriptorsCleanupRequired bool + // nodeID is the ID of the node on which the processors using this FlowCtx // run. NodeID *base.SQLIDContainer @@ -69,15 +86,6 @@ type FlowCtx struct { // Gateway is true if this flow is being run on the gateway node. Gateway bool - // TypeResolverFactory is used to construct transaction bound TypeResolvers - // to resolve type references during flow setup. It is not safe for concurrent - // use and is intended to be used only during flow setup and initialization. - // The TypeResolverFactory is initialized when the FlowContext is created - // on the gateway node using the planner's descs.Collection and is created - // on remote nodes with a new descs.Collection. After the flow is complete, - // all descriptors leased from the factory must be released. - TypeResolverFactory *descs.DistSQLTypeResolverFactory - // DiskMonitor is this flow's disk monitor. All disk usage for this flow must // be registered through this monitor. DiskMonitor *mon.BytesMonitor @@ -114,6 +122,40 @@ func (ctx *FlowCtx) Codec() keys.SQLCodec { return ctx.EvalCtx.Codec } +// TableDescriptor returns a catalog.TableDescriptor object for the given +// descriptor proto, using the descriptors collection if it is available. +func (ctx *FlowCtx) TableDescriptor(desc *descpb.TableDescriptor) catalog.TableDescriptor { + if desc == nil { + return nil + } + if ctx != nil && ctx.Descriptors != nil && ctx.Txn != nil { + leased, _ := ctx.Descriptors.GetLeasedImmutableTableByID(ctx.EvalCtx.Ctx(), ctx.Txn, desc.ID) + if leased != nil && leased.GetVersion() == desc.Version { + return leased + } + } + return tabledesc.NewUnsafeImmutable(desc) +} + +// NewTypeResolver creates a new TypeResolver that is bound under the input +// transaction. It returns a nil resolver if the FlowCtx doesn't hold a +// descs.Collection object. +func (ctx *FlowCtx) NewTypeResolver(txn *kv.Txn) descs.DistSQLTypeResolver { + if ctx == nil || ctx.Descriptors == nil { + return descs.DistSQLTypeResolver{} + } + return descs.NewDistSQLTypeResolver(ctx.Descriptors, txn) +} + +// NewSemaContext creates a new SemaContext with a TypeResolver bound to the +// input transaction. +func (ctx *FlowCtx) NewSemaContext(txn *kv.Txn) *tree.SemaContext { + resolver := ctx.NewTypeResolver(txn) + semaCtx := tree.MakeSemaContext() + semaCtx.TypeResolver = &resolver + return &semaCtx +} + // ProcessorComponentID returns a ComponentID for the given processor in this // flow. func (ctx *FlowCtx) ProcessorComponentID(procID int32) execinfrapb.ComponentID { diff --git a/pkg/sql/execinfra/processorsbase.go b/pkg/sql/execinfra/processorsbase.go index 8e14670a40c3..f8453e8c2f2a 100644 --- a/pkg/sql/execinfra/processorsbase.go +++ b/pkg/sql/execinfra/processorsbase.go @@ -786,7 +786,7 @@ func (pb *ProcessorBase) InitWithEvalCtx( pb.MemMonitor = memMonitor // Hydrate all types used in the processor. - resolver := flowCtx.TypeResolverFactory.NewTypeResolver(evalCtx.Txn) + resolver := flowCtx.NewTypeResolver(evalCtx.Txn) if err := resolver.HydrateTypeSlice(evalCtx.Context, coreOutputTypes); err != nil { return err } diff --git a/pkg/sql/execinfra/server_config.go b/pkg/sql/execinfra/server_config.go index 9eeda739e7d6..1e405372998e 100644 --- a/pkg/sql/execinfra/server_config.go +++ b/pkg/sql/execinfra/server_config.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangecache" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/diskmap" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" @@ -145,6 +146,8 @@ type ServerConfig struct { // AdminVerifyProtectedTimestampRequest. ProtectedTimestampProvider protectedts.Provider + DistSender *kvcoord.DistSender + // RangeCache is used by processors that were supposed to have been planned on // the leaseholders of the data ranges that they're consuming. These // processors query the cache to see if they should communicate updates to the diff --git a/pkg/sql/execinfra/testutils.go b/pkg/sql/execinfra/testutils.go index d83d09665320..84e29940c29c 100644 --- a/pkg/sql/execinfra/testutils.go +++ b/pkg/sql/execinfra/testutils.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/mon" ) @@ -128,7 +129,7 @@ func GenerateValuesSpec( spec.NumRows = uint64(len(rows)) if len(colTypes) != 0 { - var a rowenc.DatumAlloc + var a tree.DatumAlloc for i := 0; i < len(rows); i++ { var buf []byte for j, info := range spec.Columns { diff --git a/pkg/sql/execinfra/utils.go b/pkg/sql/execinfra/utils.go index 22f474553b35..4b53e5c73433 100644 --- a/pkg/sql/execinfra/utils.go +++ b/pkg/sql/execinfra/utils.go @@ -11,7 +11,7 @@ package execinfra import ( - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" @@ -20,8 +20,8 @@ import ( // DecodeDatum decodes the given bytes slice into a datum of the given type. It // returns an error if the decoding is not valid, or if there are any remaining // bytes. -func DecodeDatum(datumAlloc *rowenc.DatumAlloc, typ *types.T, data []byte) (tree.Datum, error) { - datum, rem, err := rowenc.DecodeTableValue(datumAlloc, typ, data) +func DecodeDatum(datumAlloc *tree.DatumAlloc, typ *types.T, data []byte) (tree.Datum, error) { + datum, rem, err := valueside.Decode(datumAlloc, typ, data) if err != nil { return nil, errors.NewAssertionErrorWithWrappedErrf(err, "error decoding %d bytes", errors.Safe(len(data))) diff --git a/pkg/sql/execinfra/version.go b/pkg/sql/execinfra/version.go index 774a182bf970..0dd33011b334 100644 --- a/pkg/sql/execinfra/version.go +++ b/pkg/sql/execinfra/version.go @@ -39,11 +39,11 @@ import "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" // // ATTENTION: When updating these fields, add a brief description of what // changed to the version history below. -const Version execinfrapb.DistSQLVersion = 54 +const Version execinfrapb.DistSQLVersion = 55 // MinAcceptedVersion is the oldest version that the server is compatible with. // A server will not accept flows with older versions. -const MinAcceptedVersion execinfrapb.DistSQLVersion = 52 +const MinAcceptedVersion execinfrapb.DistSQLVersion = 55 /* @@ -51,6 +51,15 @@ const MinAcceptedVersion execinfrapb.DistSQLVersion = 52 Please add new entries at the top. +- Version: 56 (MinAcceptedVersion: 56) + - The Visibility fields from TableReaderSpec, IndexSkipTableReaderSpec, + JoinReaderSpec have been removed. + +- Version: 55 (MinAcceptedVersion: 55) + - The computation of the hash of JSONs in the vectorized engine has changed. + As a result, the hash routing can be now done in a different manner, so we + have to bump both versions. + - Version: 54 (MinAcceptedVersion: 52) - Field NeededColumns has been removed from the TableReaderSpec. It was being used for the setup of the vectorized ColBatchScans, but now the diff --git a/pkg/sql/execinfrapb/BUILD.bazel b/pkg/sql/execinfrapb/BUILD.bazel index cede8e175c85..4b2eb47dfd7b 100644 --- a/pkg/sql/execinfrapb/BUILD.bazel +++ b/pkg/sql/execinfrapb/BUILD.bazel @@ -23,10 +23,10 @@ go_library( "//pkg/rpc", "//pkg/security", "//pkg/settings/cluster", - "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkeys", "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/descpb", + "//pkg/sql/catalog/descs", "//pkg/sql/catalog/tabledesc", "//pkg/sql/parser", "//pkg/sql/pgwire/pgcode", @@ -97,6 +97,7 @@ proto_library( deps = [ "//pkg/jobs/jobspb:jobspb_proto", "//pkg/roachpb:roachpb_proto", + "//pkg/sql/catalog/catpb:catpb_proto", "//pkg/sql/catalog/descpb:descpb_proto", "//pkg/sql/inverted:inverted_proto", "//pkg/sql/pgwire/pgerror:pgerror_proto", @@ -121,6 +122,7 @@ go_proto_library( deps = [ "//pkg/jobs/jobspb", "//pkg/roachpb:with-mocks", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", "//pkg/sql/inverted", "//pkg/sql/pgwire/pgerror", diff --git a/pkg/sql/execinfrapb/expr.go b/pkg/sql/execinfrapb/expr.go index 7df6ee4ce47e..47f52d55c9b9 100644 --- a/pkg/sql/execinfrapb/expr.go +++ b/pkg/sql/execinfrapb/expr.go @@ -13,6 +13,7 @@ package execinfrapb import ( "fmt" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/transform" @@ -110,7 +111,7 @@ type ExprHelper struct { Types []*types.T Row rowenc.EncDatumRow - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc } func (eh *ExprHelper) String() string { @@ -182,6 +183,13 @@ func (eh *ExprHelper) Init( eh.Vars.Rebind(eh.Expr) return nil } + distResolver, ok := semaCtx.TypeResolver.(*descs.DistSQLTypeResolver) + if ok { + err := distResolver.HydrateTypeSlice(evalCtx.Context, types) + if err != nil { + return err + } + } var err error eh.Expr, err = DeserializeExpr(expr.Expr, semaCtx, evalCtx, &eh.Vars) return err diff --git a/pkg/sql/execinfrapb/flow_diagram.go b/pkg/sql/execinfrapb/flow_diagram.go index 27bc7974cea8..a79b45fa0aec 100644 --- a/pkg/sql/execinfrapb/flow_diagram.go +++ b/pkg/sql/execinfrapb/flow_diagram.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb" "github.com/cockroachdb/errors" "github.com/dustin/go-humanize" @@ -146,7 +147,7 @@ func (tr *TableReaderSpec) summary() (string, []string) { details := []string{indexDetail(&tr.Table, tr.IndexIdx)} if len(tr.Spans) > 0 { - tbl := tr.BuildTableDescriptor() + tbl := tabledesc.NewUnsafeImmutable(&tr.Table) // only show the first span idx := tbl.ActiveIndexes()[int(tr.IndexIdx)] valDirs := catalogkeys.IndexKeyValDirs(idx) diff --git a/pkg/sql/execinfrapb/processors.go b/pkg/sql/execinfrapb/processors.go index b99ca59984b0..62c2a499cba4 100644 --- a/pkg/sql/execinfrapb/processors.go +++ b/pkg/sql/execinfrapb/processors.go @@ -13,9 +13,7 @@ package execinfrapb import ( "strings" - "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" @@ -320,7 +318,7 @@ func (spec *WindowerSpec_Frame_Bounds) initFromAST( typ := dStartOffset.ResolvedType() spec.Start.OffsetType = DatumInfo{Encoding: descpb.DatumEncoding_VALUE, Type: typ} var buf []byte - var a rowenc.DatumAlloc + var a tree.DatumAlloc datum := rowenc.DatumToEncDatum(typ, dStartOffset) buf, err = datum.Encode(typ, &a, descpb.DatumEncoding_VALUE, buf) if err != nil { @@ -364,7 +362,7 @@ func (spec *WindowerSpec_Frame_Bounds) initFromAST( typ := dEndOffset.ResolvedType() spec.End.OffsetType = DatumInfo{Encoding: descpb.DatumEncoding_VALUE, Type: typ} var buf []byte - var a rowenc.DatumAlloc + var a tree.DatumAlloc datum := rowenc.DatumToEncDatum(typ, dEndOffset) buf, err = datum.Encode(typ, &a, descpb.DatumEncoding_VALUE, buf) if err != nil { @@ -502,42 +500,8 @@ func (spec *WindowerSpec_Frame) ConvertToAST() (*tree.WindowFrame, error) { }, nil } -// BuildTableDescriptor returns a catalog.TableDescriptor wrapping the -// underlying Table field. -func (spec *TableReaderSpec) BuildTableDescriptor() catalog.TableDescriptor { - return tabledesc.NewUnsafeImmutable(&spec.Table) -} - -// BuildTableDescriptor returns a catalog.TableDescriptor wrapping the -// underlying Table field. -func (spec *JoinReaderSpec) BuildTableDescriptor() catalog.TableDescriptor { - return tabledesc.NewUnsafeImmutable(&spec.Table) -} - -// BuildTableDescriptors returns a catalog.TableDescriptor slice wrapping the -// underlying Tables field. -func (spec *ZigzagJoinerSpec) BuildTableDescriptors() []catalog.TableDescriptor { - ret := make([]catalog.TableDescriptor, len(spec.Tables)) - for i := range spec.Tables { - ret[i] = tabledesc.NewUnsafeImmutable(&spec.Tables[i]) - } - return ret -} - -// BuildTableDescriptor returns a catalog.TableDescriptor wrapping the -// underlying Table field. -func (spec *InvertedJoinerSpec) BuildTableDescriptor() catalog.TableDescriptor { - return tabledesc.NewUnsafeImmutable(&spec.Table) -} - -// BuildTableDescriptor returns a catalog.TableDescriptor wrapping the -// underlying Table field. -func (spec *BackfillerSpec) BuildTableDescriptor() catalog.TableDescriptor { - return tabledesc.NewUnsafeImmutable(&spec.Table) -} - -// BuildTableDescriptor returns a catalog.TableDescriptor wrapping the -// underlying Table field. -func (spec *BulkRowWriterSpec) BuildTableDescriptor() catalog.TableDescriptor { - return tabledesc.NewUnsafeImmutable(&spec.Table) +// IsIndexJoin returns true if spec defines an index join (as opposed to a +// lookup join). +func (spec *JoinReaderSpec) IsIndexJoin() bool { + return len(spec.LookupColumns) == 0 && spec.LookupExpr.Empty() } diff --git a/pkg/sql/execinfrapb/processors_bulk_io.proto b/pkg/sql/execinfrapb/processors_bulk_io.proto index d5192aff59e2..a567d52525b8 100644 --- a/pkg/sql/execinfrapb/processors_bulk_io.proto +++ b/pkg/sql/execinfrapb/processors_bulk_io.proto @@ -21,6 +21,7 @@ option go_package = "execinfrapb"; import "jobs/jobspb/jobs.proto"; import "roachpb/io-formats.proto"; +import "sql/catalog/catpb/catalog.proto"; import "sql/catalog/descpb/structured.proto"; import "sql/execinfrapb/processors_base.proto"; import "util/hlc/timestamp.proto"; @@ -148,7 +149,7 @@ message ReadImportDataSpec { // field stores the databases' primary region. optional string database_primary_region = 17 [ (gogoproto.nullable) = false, - (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.RegionName" + (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb.RegionName" ]; // NEXTID: 18 diff --git a/pkg/sql/execinfrapb/processors_sql.proto b/pkg/sql/execinfrapb/processors_sql.proto index 5c3042cfac6a..53a3b6708a23 100644 --- a/pkg/sql/execinfrapb/processors_sql.proto +++ b/pkg/sql/execinfrapb/processors_sql.proto @@ -45,23 +45,13 @@ message ValuesCoreSpec { repeated bytes raw_bytes = 2; } -// ScanVisibility controls which columns are seen by scans - just normal -// columns, or normal columns and also in-progress schema change columns. -enum ScanVisibility { - PUBLIC = 0; - PUBLIC_AND_NOT_PUBLIC = 1; -} - // TableReaderSpec is the specification for a "table reader". A table reader // performs KV operations to retrieve rows for a table and outputs the desired // columns of the rows that pass a filter expression. // // The "internal columns" of a TableReader (see ProcessorSpec) are all the -// columns of the table. If is_check is set, the TableReader will run additional -// data checking procedures and the "internal columns" are: -// - Error type (string). -// - Primary key as a string, if it was obtainable. -// - JSON of all decoded column values. +// columns of the table. Internally, only the values for the columns specified +// by needed_columns are to be populated. message TableReaderSpec { optional sqlbase.TableDescriptor table = 1 [(gogoproto.nullable) = false]; // If 0, we use the primary index. If non-zero, we use the index_idx-th index, @@ -80,14 +70,12 @@ message TableReaderSpec { optional int64 limit_hint = 5 [(gogoproto.nullable) = false]; // Indicates whether the TableReader is being run as an exhaustive - // check. This is only true during SCRUB commands. - optional bool is_check = 6 [(gogoproto.nullable) = false]; + // check. This is only true during SCRUB commands. No longer supported. + optional bool deprecated_is_check = 6 [(gogoproto.nullable) = false]; - // Indicates the visibility level of the columns that should be returned. - // Normally, will be set to PUBLIC. Will be set to PUBLIC_AND_NOT_PUBLIC if - // the consumer of this TableReader expects to be able to see in-progress - // schema changes. - optional ScanVisibility visibility = 7 [(gogoproto.nullable) = false]; + // This field used to be a visibility level of the columns that should be + // produced. We now always produce all columns (public and not public). + reserved 7; // This field used to be an upper bound for the number of rows we will read; // replaced by the parallelize field. @@ -177,11 +165,9 @@ message IndexSkipTableReaderSpec { reserved 3; repeated roachpb.Span spans = 8 [(gogoproto.nullable) = false]; - // Indicates the visibility level of the columns that should be returned. - // Normally, will be set to PUBLIC. Will be set to PUBLIC_AND_NOT_PUBLIC if - // the consumer of this TableReader expects to be able to see in-progress - // schema changes. - optional ScanVisibility visibility = 4 [(gogoproto.nullable) = false]; + // This field used to be a visibility level of the columns that should be + // produced. We now always produce all columns (public and not public). + reserved 4; optional bool reverse = 5 [(gogoproto.nullable) = false]; @@ -363,12 +349,9 @@ message JoinReaderSpec { // supported. optional sqlbase.JoinType type = 6 [(gogoproto.nullable) = false]; - // For index joins that are sources to mutation statements - what visibility - // of columns should we return? Mutations sometimes need to see in-progress - // schema change columns, in which case this field will be changed from its - // default PUBLIC state. Causes the index join to return these schema change - // columns. - optional ScanVisibility visibility = 7 [(gogoproto.nullable) = false]; + // This field used to be a visibility level of the columns that should be + // produced. We now always produce all columns (public and not public). + reserved 7; // Indicates the row-level locking strength to be used by the join. If set to // FOR_NONE, no row-level locking should be performed. diff --git a/pkg/sql/explain_ddl.go b/pkg/sql/explain_ddl.go index e81624539b97..2f7973a11175 100644 --- a/pkg/sql/explain_ddl.go +++ b/pkg/sql/explain_ddl.go @@ -16,7 +16,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraphviz" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -75,11 +74,11 @@ func (n *explainDDLNode) startExec(params runParams) error { } var vizURL string if n.options.Flags[tree.ExplainFlagDeps] { - if vizURL, err = scgraphviz.DependenciesURL(sc); err != nil { + if vizURL, err = sc.DependenciesURL(); err != nil { return errors.WithAssertionFailure(err) } } else { - if vizURL, err = scgraphviz.StagesURL(sc); err != nil { + if vizURL, err = sc.StagesURL(); err != nil { return errors.WithAssertionFailure(err) } } diff --git a/pkg/sql/explain_vec.go b/pkg/sql/explain_vec.go index 861357e32453..ad428c4d94fb 100644 --- a/pkg/sql/explain_vec.go +++ b/pkg/sql/explain_vec.go @@ -13,7 +13,6 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/colflow" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/physicalplan" @@ -96,9 +95,7 @@ func newFlowCtxForExplainPurposes(planCtx *PlanningCtx, p *planner) *execinfra.F VecFDSemaphore: p.execCfg.DistSQLSrv.VecFDSemaphore, NodeDialer: p.DistSQLPlanner().nodeDialer, }, - TypeResolverFactory: &descs.DistSQLTypeResolverFactory{ - Descriptors: p.Descriptors(), - }, + Descriptors: p.Descriptors(), DiskMonitor: &mon.BytesMonitor{}, } } diff --git a/pkg/sql/faketreeeval/evalctx.go b/pkg/sql/faketreeeval/evalctx.go index 1008e090a010..0048fb8904f1 100644 --- a/pkg/sql/faketreeeval/evalctx.go +++ b/pkg/sql/faketreeeval/evalctx.go @@ -85,7 +85,7 @@ func (so *DummySequenceOperators) HasPrivilege( ctx context.Context, specifier tree.HasPrivilegeSpecifier, user security.SQLUsername, - kind privilege.Kind, + priv privilege.Privilege, ) (bool, error) { return false, errors.WithStack(errEvalPlanner) } @@ -305,7 +305,7 @@ func (ep *DummyEvalPlanner) HasPrivilege( ctx context.Context, specifier tree.HasPrivilegeSpecifier, user security.SQLUsername, - kind privilege.Kind, + priv privilege.Privilege, ) (bool, error) { return false, errors.WithStack(errEvalPlanner) } diff --git a/pkg/sql/flowinfra/BUILD.bazel b/pkg/sql/flowinfra/BUILD.bazel index d60a42c9cde8..48c3842798eb 100644 --- a/pkg/sql/flowinfra/BUILD.bazel +++ b/pkg/sql/flowinfra/BUILD.bazel @@ -27,6 +27,7 @@ go_library( "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/rowenc", + "//pkg/sql/sem/tree", "//pkg/sql/sqltelemetry", "//pkg/sql/types", "//pkg/util/admission", @@ -61,6 +62,7 @@ go_test( "utils_test.go", ], embed = [":flowinfra"], + tags = ["no-remote"], deps = [ "//pkg/base", "//pkg/gossip", diff --git a/pkg/sql/flowinfra/cluster_test.go b/pkg/sql/flowinfra/cluster_test.go index 0378acdb2bbc..3a801a946e8f 100644 --- a/pkg/sql/flowinfra/cluster_test.go +++ b/pkg/sql/flowinfra/cluster_test.go @@ -75,7 +75,7 @@ func TestClusterFlow(t *testing.T) { desc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") makeIndexSpan := func(start, end int) roachpb.Span { var span roachpb.Span - prefix := roachpb.Key(rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, desc, desc.PublicNonPrimaryIndexes()[0].GetID())) + prefix := roachpb.Key(rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, desc.GetID(), desc.PublicNonPrimaryIndexes()[0].GetID())) span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) span.EndKey = append(span.EndKey, prefix...) span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) @@ -796,7 +796,7 @@ func BenchmarkInfrastructure(b *testing.B) { if len(rows) != numNodes*numRows { b.Errorf("got %d rows, expected %d", len(rows), numNodes*numRows) } - var a rowenc.DatumAlloc + var a tree.DatumAlloc for i := range rows { if err := rows[i][0].EnsureDecoded(types.Int, &a); err != nil { b.Fatal(err) diff --git a/pkg/sql/flowinfra/flow.go b/pkg/sql/flowinfra/flow.go index 433c6fb5ced0..c8efd7654860 100644 --- a/pkg/sql/flowinfra/flow.go +++ b/pkg/sql/flowinfra/flow.go @@ -493,9 +493,9 @@ func (f *FlowBase) Cleanup(ctx context.Context) { panic("flow cleanup called twice") } - // Release any descriptors accessed by this flow - if f.TypeResolverFactory != nil { - f.TypeResolverFactory.CleanupFunc(ctx) + // Release any descriptors accessed by this flow. + if f.Descriptors != nil && f.IsDescriptorsCleanupRequired { + f.Descriptors.ReleaseAll(ctx) } sp := tracing.SpanFromContext(ctx) diff --git a/pkg/sql/flowinfra/stream_encoder.go b/pkg/sql/flowinfra/stream_encoder.go index 55aa6e248688..a1276827be48 100644 --- a/pkg/sql/flowinfra/stream_encoder.go +++ b/pkg/sql/flowinfra/stream_encoder.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" ) @@ -54,7 +55,7 @@ type StreamEncoder struct { // typingSent is set after the first message that contains any rows has been // sent. typingSent bool - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc // Preallocated structures to avoid allocations. msg execinfrapb.ProducerMessage diff --git a/pkg/sql/indexbackfiller_test.go b/pkg/sql/indexbackfiller_test.go index ae389610601a..e5347fb15785 100644 --- a/pkg/sql/indexbackfiller_test.go +++ b/pkg/sql/indexbackfiller_test.go @@ -32,7 +32,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/row" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/tests" @@ -353,7 +352,7 @@ INSERT INTO foo VALUES (1), (10), (100); ) []tree.Datums { t.Helper() var fetcher row.Fetcher - var alloc rowenc.DatumAlloc + var alloc tree.DatumAlloc mm := mon.MakeStandaloneBudget(1 << 30) idx, err := table.FindIndexWithID(indexID) @@ -386,7 +385,6 @@ INSERT INTO foo VALUES (1), (10), (100); descpb.ScanLockingStrength_FOR_NONE, descpb.ScanLockingWaitPolicy_BLOCK, 0, - false, &alloc, mm.Monitor(), row.FetcherTableArgs{ diff --git a/pkg/sql/information_schema.go b/pkg/sql/information_schema.go index a5e27a82326a..b5cfac88aa66 100755 --- a/pkg/sql/information_schema.go +++ b/pkg/sql/information_schema.go @@ -25,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catconstants" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" @@ -861,17 +862,17 @@ var ( refConstraintRuleCascade = tree.NewDString("CASCADE") ) -func dStringForFKAction(action descpb.ForeignKeyReference_Action) tree.Datum { +func dStringForFKAction(action catpb.ForeignKeyAction) tree.Datum { switch action { - case descpb.ForeignKeyReference_NO_ACTION: + case catpb.ForeignKeyAction_NO_ACTION: return refConstraintRuleNoAction - case descpb.ForeignKeyReference_RESTRICT: + case catpb.ForeignKeyAction_RESTRICT: return refConstraintRuleRestrict - case descpb.ForeignKeyReference_SET_NULL: + case catpb.ForeignKeyAction_SET_NULL: return refConstraintRuleSetNull - case descpb.ForeignKeyReference_SET_DEFAULT: + case catpb.ForeignKeyAction_SET_DEFAULT: return refConstraintRuleSetDefault - case descpb.ForeignKeyReference_CASCADE: + case catpb.ForeignKeyAction_CASCADE: return refConstraintRuleCascade } panic(errors.Errorf("unexpected ForeignKeyReference_Action: %v", action)) diff --git a/pkg/sql/insert_fast_path.go b/pkg/sql/insert_fast_path.go index 058b7adbb1de..037fd257c898 100644 --- a/pkg/sql/insert_fast_path.go +++ b/pkg/sql/insert_fast_path.go @@ -101,7 +101,7 @@ func (c *insertFastPathFKCheck) init(params runParams) error { c.idx = idx.idx codec := params.ExecCfg().Codec - c.keyPrefix = rowenc.MakeIndexKeyPrefix(codec, c.tabDesc, c.idx.GetID()) + c.keyPrefix = rowenc.MakeIndexKeyPrefix(codec, c.tabDesc.GetID(), c.idx.GetID()) c.spanBuilder = span.MakeBuilder(params.EvalContext(), codec, c.tabDesc, c.idx) if len(c.InsertCols) > idx.numLaxKeyCols { diff --git a/pkg/sql/internal_test.go b/pkg/sql/internal_test.go index 752d315a21ac..be3d23ffca6d 100644 --- a/pkg/sql/internal_test.go +++ b/pkg/sql/internal_test.go @@ -258,6 +258,7 @@ func TestQueryHasRoleOptionWithNoTxn(t *testing.T) { stmts := ` CREATE USER testuser VIEWACTIVITY; +CREATE USER testuserredacted VIEWACTIVITYREDACTED; CREATE USER testadmin; GRANT admin TO testadmin` if _, err := db.Exec(stmts); err != nil { @@ -274,6 +275,9 @@ GRANT admin TO testadmin` {"testuser", roleoption.VIEWACTIVITY.String(), true, ""}, {"testuser", roleoption.CREATEROLE.String(), false, ""}, {"testuser", "nonexistent", false, "unrecognized role option"}, + {"testuserredacted", roleoption.VIEWACTIVITYREDACTED.String(), true, ""}, + {"testuserredacted", roleoption.CREATEROLE.String(), false, ""}, + {"testuserredacted", "nonexistent", false, "unrecognized role option"}, {"testadmin", roleoption.VIEWACTIVITY.String(), true, ""}, {"testadmin", roleoption.CREATEROLE.String(), true, ""}, {"testadmin", "nonexistent", false, "unrecognized role option"}, diff --git a/pkg/sql/inverted/expression.go b/pkg/sql/inverted/expression.go index d45b09b16d53..e1c88496379d 100644 --- a/pkg/sql/inverted/expression.go +++ b/pkg/sql/inverted/expression.go @@ -27,7 +27,7 @@ import ( // // If the inverted column stores an encoded datum, the encoding is // DatumEncoding_ASCENDING_KEY, and is performed using -// EncodeTableKey(nil /* prefix */, val tree.Datum, encoding.Ascending). +// keyside.Encode(nil /* prefix */, val tree.Datum, encoding.Ascending). // It is used to represent spans of the inverted column. // // It would be ideal if the inverted column only contained Datums, since we diff --git a/pkg/sql/job_exec_context.go b/pkg/sql/job_exec_context.go index db7a162b8fa9..c6df2280ddf3 100644 --- a/pkg/sql/job_exec_context.go +++ b/pkg/sql/job_exec_context.go @@ -61,8 +61,8 @@ func (e *plannerJobExecContext) User() security.SQLUsername { return e.p.Us func (e *plannerJobExecContext) MigrationJobDeps() migration.JobDeps { return e.p.MigrationJobDeps() } -func (e *plannerJobExecContext) SpanConfigReconciliationJobDeps() spanconfig.ReconciliationDependencies { - return e.p.SpanConfigReconciliationJobDeps() +func (e *plannerJobExecContext) SpanConfigReconciler() spanconfig.Reconciler { + return e.p.SpanConfigReconciler() } // JobExecContext provides the execution environment for a job. It is what is @@ -84,5 +84,5 @@ type JobExecContext interface { LeaseMgr() *lease.Manager User() security.SQLUsername MigrationJobDeps() migration.JobDeps - SpanConfigReconciliationJobDeps() spanconfig.ReconciliationDependencies + SpanConfigReconciler() spanconfig.Reconciler } diff --git a/pkg/sql/job_exec_context_test_util.go b/pkg/sql/job_exec_context_test_util.go index d591e988f3be..ad7fa5f7bce9 100644 --- a/pkg/sql/job_exec_context_test_util.go +++ b/pkg/sql/job_exec_context_test_util.go @@ -70,7 +70,7 @@ func (p *FakeJobExecContext) MigrationJobDeps() migration.JobDeps { panic("unimplemented") } -// SpanConfigReconciliationJobDeps implements the JobExecContext interface. -func (p *FakeJobExecContext) SpanConfigReconciliationJobDeps() spanconfig.ReconciliationDependencies { +// SpanConfigReconciler implements the JobExecContext interface. +func (p *FakeJobExecContext) SpanConfigReconciler() spanconfig.Reconciler { panic("unimplemented") } diff --git a/pkg/sql/logictest/BUILD.bazel b/pkg/sql/logictest/BUILD.bazel index 4db5046c6ea3..4cf288169f86 100644 --- a/pkg/sql/logictest/BUILD.bazel +++ b/pkg/sql/logictest/BUILD.bazel @@ -48,6 +48,7 @@ go_library( "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_errors//oserror", "@com_github_lib_pq//:pq", + "@com_github_pmezard_go_difflib//difflib", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/sql/logictest/logic.go b/pkg/sql/logictest/logic.go index a81cd9edec06..d476259073cc 100644 --- a/pkg/sql/logictest/logic.go +++ b/pkg/sql/logictest/logic.go @@ -74,6 +74,7 @@ import ( "github.com/cockroachdb/errors" "github.com/cockroachdb/errors/oserror" "github.com/lib/pq" + "github.com/pmezard/go-difflib/difflib" "github.com/stretchr/testify/require" ) @@ -430,6 +431,8 @@ var ( showSQL = flag.Bool("show-sql", false, "print the individual SQL statement/queries before processing", ) + + showDiff = flag.Bool("show-diff", false, "generate a diff for expectation mismatches when possible") printErrorSummary = flag.Bool("error-summary", false, "print a per-error summary of failing queries at the end of testing, "+ "when -allow-prepare-fail is set", @@ -2982,11 +2985,15 @@ func (t *logicTest) execQuery(query logicQuery) error { resultsMatch := func() error { makeError := func() error { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%s: %s\nexpected:\n", query.pos, query.sql) + var expFormatted strings.Builder + var actFormatted strings.Builder for _, line := range query.expectedResultsRaw { - fmt.Fprintf(&buf, " %s\n", line) + fmt.Fprintf(&expFormatted, " %s\n", line) + } + for _, line := range t.formatValues(actualResultsRaw, query.valsPerLine) { + fmt.Fprintf(&actFormatted, " %s\n", line) } + sortMsg := "" if query.sorter != nil { // We performed an order-insensitive comparison of "actual" vs "expected" @@ -2995,11 +3002,23 @@ func (t *logicTest) execQuery(query logicQuery) error { // rows in the order in which the query returned them. sortMsg = " -> ignore the following ordering of rows" } - fmt.Fprintf(&buf, "but found (query options: %q%s) :\n", query.rawOpts, sortMsg) - for _, line := range t.formatValues(actualResultsRaw, query.valsPerLine) { - fmt.Fprintf(&buf, " %s\n", line) + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s: %s\nexpected:\n%s", query.pos, query.sql, expFormatted.String()) + fmt.Fprintf(&buf, "but found (query options: %q%s) :\n%s", query.rawOpts, sortMsg, actFormatted.String()) + if *showDiff { + if diff, err := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(expFormatted.String()), + B: difflib.SplitLines(actFormatted.String()), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }); err == nil { + fmt.Fprintf(&buf, "\nDiff:\n%s", diff) + } } - return errors.Newf("%s", buf.String()) + return errors.Newf("%s\n", buf.String()) } if len(query.expectedResults) != len(actualResults) { return makeError() diff --git a/pkg/sql/logictest/testdata/logic_test/alter_primary_key b/pkg/sql/logictest/testdata/logic_test/alter_primary_key index d8a3d45049f8..60356dc30007 100644 --- a/pkg/sql/logictest/testdata/logic_test/alter_primary_key +++ b/pkg/sql/logictest/testdata/logic_test/alter_primary_key @@ -218,8 +218,7 @@ t CREATE TABLE public.t ( UNIQUE INDEX i5 (w ASC) STORING (y), INVERTED INDEX i6 (v), INDEX i7 (z ASC) USING HASH WITH BUCKET_COUNT = 4, - FAMILY fam_0_x_y_z_w_v (x, y, z, w, v), - CONSTRAINT check_crdb_internal_z_shard_4 CHECK (crdb_internal_z_shard_4 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8)) + FAMILY fam_0_x_y_z_w_v (x, y, z, w, v) ) # Test that the indexes we expect got rewritten. All but i3 should have been rewritten, @@ -368,9 +367,7 @@ t CREATE TABLE public.t ( CONSTRAINT t_pkey PRIMARY KEY (y ASC) USING HASH WITH BUCKET_COUNT = 10, UNIQUE INDEX t_x_key (x ASC), INDEX i1 (z ASC) USING HASH WITH BUCKET_COUNT = 5, - FAMILY fam_0_x_y_z (x, y, z), - CONSTRAINT check_crdb_internal_z_shard_5 CHECK (crdb_internal_z_shard_5 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8)), - CONSTRAINT check_crdb_internal_y_shard_10 CHECK (crdb_internal_y_shard_10 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8)) + FAMILY fam_0_x_y_z (x, y, z) ) query T @@ -429,8 +426,7 @@ t CREATE TABLE public.t ( CONSTRAINT t_pkey PRIMARY KEY (y ASC), UNIQUE INDEX t_x_key (x ASC) USING HASH WITH BUCKET_COUNT = 5, INDEX i (z ASC), - FAMILY fam_0_x_y_z (x, y, z), - CONSTRAINT check_crdb_internal_x_shard_5 CHECK (crdb_internal_x_shard_5 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8)) + FAMILY fam_0_x_y_z (x, y, z) ) query III @@ -556,8 +552,7 @@ t CREATE TABLE public.t ( rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(), crdb_internal_x_shard_4 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(x)), 4:::INT8)) VIRTUAL, CONSTRAINT t_pkey PRIMARY KEY (x ASC) USING HASH WITH BUCKET_COUNT = 4, - FAMILY "primary" (x, rowid), - CONSTRAINT check_crdb_internal_x_shard_4 CHECK (crdb_internal_x_shard_4 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8)) + FAMILY "primary" (x, rowid) ) statement ok @@ -950,9 +945,7 @@ t CREATE TABLE public.t ( x INT8 NOT NULL, crdb_internal_x_shard_3 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(x)), 3:::INT8)) VIRTUAL, CONSTRAINT t_pkey PRIMARY KEY (x ASC) USING HASH WITH BUCKET_COUNT = 3, - FAMILY "primary" (x), - CONSTRAINT check_crdb_internal_x_shard_2 CHECK (crdb_internal_x_shard_2 IN (0:::INT8, 1:::INT8)), - CONSTRAINT check_crdb_internal_x_shard_3 CHECK (crdb_internal_x_shard_3 IN (0:::INT8, 1:::INT8, 2:::INT8)) + FAMILY "primary" (x) ) # Changes on a hash sharded index that change the columns will cause the old @@ -972,9 +965,7 @@ t CREATE TABLE public.t ( crdb_internal_y_shard_2 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(y)), 2:::INT8)) VIRTUAL, CONSTRAINT t_pkey PRIMARY KEY (y ASC) USING HASH WITH BUCKET_COUNT = 2, UNIQUE INDEX t_x_key (x ASC) USING HASH WITH BUCKET_COUNT = 2, - FAMILY fam_0_x_y (x, y), - CONSTRAINT check_crdb_internal_x_shard_2 CHECK (crdb_internal_x_shard_2 IN (0:::INT8, 1:::INT8)), - CONSTRAINT check_crdb_internal_y_shard_2 CHECK (crdb_internal_y_shard_2 IN (0:::INT8, 1:::INT8)) + FAMILY fam_0_x_y (x, y) ) # Regression for #49079. diff --git a/pkg/sql/logictest/testdata/logic_test/auto_span_config_reconciliation_job b/pkg/sql/logictest/testdata/logic_test/auto_span_config_reconciliation_job index da19b7e03bd3..81c6a5422519 100644 --- a/pkg/sql/logictest/testdata/logic_test/auto_span_config_reconciliation_job +++ b/pkg/sql/logictest/testdata/logic_test/auto_span_config_reconciliation_job @@ -1,7 +1,7 @@ # cluster-opt: enable-span-configs statement ok -SET CLUSTER SETTING spanconfig.experimental_reconciliation_job.enabled = true; +SET CLUSTER SETTING spanconfig.reconciliation_job.enabled = true; # Ensure there's a single auto span config reconciliation job in the system, # and that it's running. diff --git a/pkg/sql/logictest/testdata/logic_test/cast b/pkg/sql/logictest/testdata/logic_test/cast index b48918ad5e34..184c2f459326 100644 --- a/pkg/sql/logictest/testdata/logic_test/cast +++ b/pkg/sql/logictest/testdata/logic_test/cast @@ -267,6 +267,7 @@ PREPARE insert_s AS INSERT INTO assn_cast(s) VALUES ($1) statement error expected EXECUTE parameter expression to have type string, but \'1\' has type int EXECUTE insert_s(1) + # Tests for assignment casts of DEFAULT expressions. subtest assignment_casts_default @@ -321,35 +322,6 @@ SELECT * FROM assn_cast_dec_default ---- 1 2 1.6 -# Tests for assignment casts of computed columns. -subtest assignment_casts_computed - -statement ok -CREATE TABLE assn_cast_comp ( - i INT, - i2 INT2 AS (i + 9999999) STORED, - t TEXT, - c CHAR AS (t) STORED, - d DECIMAL(10, 0), - d_comp DECIMAL(10, 2) AS (d) STORED, - d2 DECIMAL(10, 2), - d2_comp DECIMAL(10, 0) AS (d2) STORED -) - -statement error integer out of range for type int2 -INSERT INTO assn_cast_comp(i) VALUES (1) - -statement error value too long for type CHAR -INSERT INTO assn_cast_comp(t) VALUES ('foo') - -statement ok -INSERT INTO assn_cast_comp(d, d2) VALUES (1.56, 2.78) - -query RRRR -SELECT d, d_comp, d2, d2_comp FROM assn_cast_comp ----- -2 2.00 2.78 3 - # Tests for assignment casts in UPDATEs. subtest assignment_casts_update @@ -561,12 +533,420 @@ UPDATE assn_cast SET t = 3.2 statement error value type decimal doesn't match type timestamp of column "t" UPDATE assn_cast SET (i, t) = (1, 3.2) + +# Tests for assignment casts in UPSERTs. +subtest assignment_casts_upsert + +statement ok +CREATE TABLE assn_cast_upsert ( + k INT PRIMARY KEY, + c CHAR, + qc "char", + i2 INT2, + d DECIMAL(10, 0), + a DECIMAL(10, 0)[] +) + +statement error value too long for type CHAR +UPSERT INTO assn_cast_upsert (k, c) VALUES (1, 'abc') + +statement ok +UPSERT INTO assn_cast_upsert (k, c) VALUES (1, 'a') + +statement error value too long for type CHAR +UPSERT INTO assn_cast_upsert (k, c) VALUES (1, 'def') + +statement error value too long for type CHAR +UPSERT INTO assn_cast_upsert (k, c) VALUES ('1', 'def') + +statement error value too long for type CHAR +UPSERT INTO assn_cast_upsert (k, c) VALUES (1, 123) + +statement error value type string doesn't match type int of column \"k\" +UPSERT INTO assn_cast_upsert (k, c) VALUES ('1'::STRING, 'b') + +statement ok +UPSERT INTO assn_cast_upsert (k, c) VALUES (1, 'b') + +statement ok +UPSERT INTO assn_cast_upsert (k, c) VALUES ('1', 'c') + +statement ok +UPSERT INTO assn_cast_upsert (k, c) VALUES (1, NULL) + +statement ok +PREPARE upsert_c AS UPSERT INTO assn_cast_upsert (k, c) VALUES ($1, $2) + +statement error value too long for type CHAR +EXECUTE upsert_c(1, 'foo') + +statement error value too long for type CHAR +EXECUTE upsert_c(2, 'foo') + +statement error value too long for type CHAR +EXECUTE upsert_c(1, 'foo'::STRING) + +statement ok +EXECUTE upsert_c(1, ' ') + +statement ok +EXECUTE upsert_c(2, ' ') + +query IT rowsort +SELECT k, concat('"', c, '"') FROM assn_cast_upsert +---- +1 "" +2 "" + +statement ok +EXECUTE upsert_c(1, ' '::STRING) + +statement ok +EXECUTE upsert_c(3, ' '::STRING) + +query IT rowsort +SELECT k, concat('"', c, '"') FROM assn_cast_upsert +---- +1 "" +2 "" +3 "" + +statement ok +DELETE FROM assn_cast_upsert + +statement ok +UPSERT INTO assn_cast_upsert (k, qc) VALUES (1, 'a') + +query T +UPSERT INTO assn_cast_upsert (k, qc) VALUES (1, 'abc') RETURNING qc +---- +a + +# An integer to "char" cast converts the integer into the corresponding 7-bit +# ASCII character. Anything greater than 127 is out of range. +statement error \"char\" out of range +UPSERT INTO assn_cast_upsert (k, qc) VALUES (1, 1234) + +statement ok +PREPARE upsert_qc AS UPSERT INTO assn_cast_upsert (k, qc) VALUES ($1, $2) + +statement ok +EXECUTE upsert_qc(1, 'foo') + +query T +SELECT qc FROM assn_cast_upsert +---- +f + +statement ok +EXECUTE upsert_qc(1, 'bar'::STRING) + +query T +SELECT qc FROM assn_cast_upsert +---- +b + +statement error integer out of range for type int2 +UPSERT INTO assn_cast_upsert (k, i2) VALUES (1, 999999999) + +statement ok +PREPARE upsert_i2 AS UPSERT INTO assn_cast_upsert (k, i2) VALUES ($1, $2) + +statement error integer out of range for type int2 +EXECUTE upsert_i2(1, 99999999) + +query F +UPSERT INTO assn_cast_upsert (k, d) VALUES (1, 11.22) RETURNING d +---- +11 + +query F +UPSERT INTO assn_cast_upsert (k, d) VALUES (1, 11.22::DECIMAL(10, 0)) RETURNING d +---- +11 + +query F +UPSERT INTO assn_cast_upsert (k, d) VALUES (1, 11.22::DECIMAL(10, 2)) RETURNING d +---- +11 + +statement ok +PREPARE upsert_d AS UPSERT INTO assn_cast_upsert (k, d) VALUES ($1, $2) + +statement ok +EXECUTE upsert_d(1, 123.45) + +query F +SELECT d FROM assn_cast_upsert +---- +123 + +statement ok +PREPARE upsert_d2 AS UPSERT INTO assn_cast_upsert (k, d) VALUES (1, (SELECT * FROM (VALUES ($1::DECIMAL(10, 2))))) + +statement ok +EXECUTE upsert_d2(67.89) + +query F +SELECT d FROM assn_cast_upsert +---- +68 + +query T +UPSERT INTO assn_cast_upsert (k, a) VALUES (1, ARRAY[]) RETURNING a +---- +{} + +query T +UPSERT INTO assn_cast_upsert (k, a) VALUES (1, ARRAY[NULL]) RETURNING a +---- +{NULL} + +query T +UPSERT INTO assn_cast_upsert (k, a) VALUES (1, ARRAY[1.1]) RETURNING a +---- +{1} + +query T +UPSERT INTO assn_cast_upsert (k, a) VALUES (1, ARRAY[2.88, NULL, 15]) RETURNING a +---- +{3,NULL,15} + +query T +UPSERT INTO assn_cast_upsert (k, a) VALUES (1, ARRAY[3.99, NULL, 16]::DECIMAL(10, 2)[]) RETURNING a +---- +{4,NULL,16} + +query T +UPSERT INTO assn_cast_upsert (k, a) VALUES (1, ARRAY[5.55, 6.66::DECIMAL(10, 2)]) RETURNING a +---- +{6,7} + +statement ok +PREPARE upsert_a AS UPSERT INTO assn_cast_upsert (k, a) VALUES ($1, $2) + +statement ok +EXECUTE upsert_a(1, ARRAY[7.77, 8.88::DECIMAL(10, 2)]) + +query T +SELECT a FROM assn_cast_upsert +---- +{8,9} + +statement ok +PREPARE upsert_a2 AS UPSERT INTO assn_cast_upsert (k, a) VALUES ($1, ARRAY[$2]) + +statement ok +EXECUTE upsert_a2(1, 20.2) + +query T +SELECT a FROM assn_cast_upsert +---- +{20} + +statement ok +PREPARE upsert_a3 AS UPSERT INTO assn_cast_upsert (k, a) VALUES ($1, ARRAY[30.12, $2, 32.1]) + +statement ok +EXECUTE upsert_a3(1, 30.9) + +query T +SELECT a FROM assn_cast_upsert +---- +{30,31,32} + + +# Tests for assignment casts in INSERT .. ON CONFLICT .. DO NOTHING. +subtest assignment_casts_insert_do_nothing + +statement ok +CREATE TABLE assn_cast_do_nothing ( + k INT PRIMARY KEY, + d DECIMAL(10, 0) UNIQUE, + c CHAR UNIQUE +) + +statement error value too long for type CHAR +INSERT INTO assn_cast_do_nothing VALUES (1, 2.34, 'abc') ON CONFLICT DO NOTHING + +statement ok +INSERT INTO assn_cast_do_nothing VALUES (1, 2.34, 'a') ON CONFLICT DO NOTHING + +# Conflict with k. +statement ok +INSERT INTO assn_cast_do_nothing VALUES (1, 5.67, 'b') ON CONFLICT DO NOTHING + +# Conflict with d. +statement ok +INSERT INTO assn_cast_do_nothing VALUES (2, 2.34, 'b') ON CONFLICT DO NOTHING + +# Conflict with c. +statement ok +INSERT INTO assn_cast_do_nothing VALUES (2, 5.67, 'a') ON CONFLICT DO NOTHING + +statement ok +INSERT INTO assn_cast_do_nothing VALUES ('1', 2.34, 'a') ON CONFLICT (k) DO NOTHING + +statement ok +INSERT INTO assn_cast_do_nothing VALUES (1, 2.45, 'a') ON CONFLICT (d) DO NOTHING + +statement ok +INSERT INTO assn_cast_do_nothing VALUES (1, 2.45::DECIMAL(10, 2), 'a') ON CONFLICT (d) DO NOTHING + +statement ok +INSERT INTO assn_cast_do_nothing VALUES (1, 2.0, 'a') ON CONFLICT (d) DO NOTHING + +statement ok +INSERT INTO assn_cast_do_nothing VALUES (1, 2, 'a') ON CONFLICT (d) DO NOTHING + +query IRT +SELECT * FROM assn_cast_do_nothing +---- +1 2 a + +statement ok +PREPARE insert_do_nothing_d AS INSERT INTO assn_cast_do_nothing VALUES ($1, $2, $3) ON CONFLICT (d) DO NOTHING + +statement ok +EXECUTE insert_do_nothing_d(1, 2.45, 'a') + +statement ok +EXECUTE insert_do_nothing_d(1, 2.45::DECIMAL(10, 2), 'a') + +statement ok +EXECUTE insert_do_nothing_d(1, 2.0, 'a') + +statement ok +EXECUTE insert_do_nothing_d(1, 2, 'a') + +statement error duplicate key value violates unique constraint "assn_cast_do_nothing_pkey"\nDETAIL: Key \(k\)=\(1\) already exists\. +EXECUTE insert_do_nothing_d(1, 2.56, 'a') + +query IRT +SELECT * FROM assn_cast_do_nothing +---- +1 2 a + +statement ok +PREPARE insert_do_nothing_d2 AS INSERT INTO assn_cast_do_nothing VALUES ($1, $2::DECIMAL(10, 0), $3) ON CONFLICT (d) DO NOTHING + +statement ok +EXECUTE insert_do_nothing_d2(1, 2.45, 'a') + +statement ok +EXECUTE insert_do_nothing_d2(1, 2.45::DECIMAL(10, 2), 'a') + +statement ok +EXECUTE insert_do_nothing_d2(1, 2.0, 'a') + +statement ok +EXECUTE insert_do_nothing_d2(1, 2, 'a') + +statement error duplicate key value violates unique constraint "assn_cast_do_nothing_pkey"\nDETAIL: Key \(k\)=\(1\) already exists\. +EXECUTE insert_do_nothing_d2(1, 2.56, 'a') + +query IRT +SELECT * FROM assn_cast_do_nothing +---- +1 2 a + + +# Tests for assignment casts in INSERT .. ON CONFLICT .. DO UPDATE. +subtest assignment_casts_insert_do_update + +statement ok +CREATE TABLE assn_cast_do_update ( + k INT PRIMARY KEY, + d DECIMAL(10, 0) UNIQUE, + c CHAR UNIQUE +) + +statement error value too long for type CHAR +INSERT INTO assn_cast_do_update VALUES (1, 2.34, 'abc') ON CONFLICT (c) DO UPDATE SET c = 'b' + +statement ok +INSERT INTO assn_cast_do_update VALUES (1, 2.34, 'a') ON CONFLICT (c) DO UPDATE SET c = 'b' + +statement error value too long for type CHAR +INSERT INTO assn_cast_do_update VALUES (1, 2.34, 'a') ON CONFLICT (c) DO UPDATE SET c = 'abc' + +statement ok +INSERT INTO assn_cast_do_update VALUES (1, 2.34, 'a') ON CONFLICT (c) DO UPDATE SET c = 'b' + +query IRT +SELECT * FROM assn_cast_do_update +---- +1 2 b + +statement ok +PREPARE insert_do_update_c AS +INSERT INTO assn_cast_do_update VALUES (1, 2.34, $1) ON CONFLICT (c) DO UPDATE SET c = $2 + +statement error value too long for type CHAR +EXECUTE insert_do_update_c('b', 'abc') + +statement error value too long for type CHAR +EXECUTE insert_do_update_c('b', 'abc'::STRING) + +statement error duplicate key value violates unique constraint "assn_cast_do_update_pkey"\nDETAIL: Key \(k\)=\(1\) already exists\. +EXECUTE insert_do_update_c('c', 'abc') + +statement ok +EXECUTE insert_do_update_c('b', 'c') + +query IRT +SELECT * FROM assn_cast_do_update +---- +1 2 c + +query I +INSERT INTO assn_cast_do_update VALUES ('1', 2.34, 'a') +ON CONFLICT (k) DO UPDATE SET k = '2' +RETURNING k +---- +2 + +query R +INSERT INTO assn_cast_do_update VALUES (1, 2.45, 'a') +ON CONFLICT (d) DO UPDATE SET d = 3.56 +RETURNING d +---- +4 + +query R +INSERT INTO assn_cast_do_update VALUES (1, 3.56, 'a') +ON CONFLICT (d) DO UPDATE SET d = 5.12::DECIMAL(10, 2) +RETURNING d +---- +5 + +query IRT +SELECT * FROM assn_cast_do_update +---- +2 5 c + +statement ok +INSERT INTO assn_cast_do_update VALUES (3, 1.23, 'b') + +statement error duplicate key value violates unique constraint "assn_cast_do_update_c_key"\nDETAIL: Key \(c\)=\('c'\) already exists\. +INSERT INTO assn_cast_do_update VALUES (3, 10.12, 'b') +ON CONFLICT (c) DO UPDATE SET c = 'c' + +statement error duplicate key value violates unique constraint "assn_cast_do_update_d_key"\nDETAIL: Key \(d\)=\(5\) already exists\. +INSERT INTO assn_cast_do_update VALUES (3, 10.12, 'b') +ON CONFLICT (c) DO UPDATE SET d = 5.45 + +statement error duplicate key value violates unique constraint "assn_cast_do_update_d_key"\nDETAIL: Key \(d\)=\(5\) already exists\. +INSERT INTO assn_cast_do_update VALUES (3, 10.12, 'b') +ON CONFLICT (c) DO UPDATE SET d = 5.45::DECIMAL(10, 2) + + # Tests for assignment casts in cascading UPDATEs. subtest assignment_casts_update_cascade statement ok -CREATE TABLE assn_cast_p (p DECIMAL(10, 2) PRIMARY KEY); -INSERT INTO assn_cast_p VALUES (1.0); +CREATE TABLE assn_cast_p (p DECIMAL(10, 2) PRIMARY KEY, d DECIMAL(10, 2) UNIQUE); +INSERT INTO assn_cast_p VALUES (1.0, 10.0); # Test ON UPDATE CASCADE. statement ok @@ -584,6 +964,23 @@ SELECT * FROM assn_cast_c ---- 1 2 +statement ok +DROP TABLE assn_cast_c; +CREATE TABLE assn_cast_c (c INT PRIMARY KEY, d DECIMAL(10, 0) REFERENCES assn_cast_p(d) ON UPDATE CASCADE); +UPSERT INTO assn_cast_c VALUES (2, 10) + +statement error update on table "assn_cast_c" violates foreign key constraint "assn_cast_c_d_fkey" +UPSERT INTO assn_cast_p VALUES (2.0, 11.22) + +statement ok +UPSERT INTO assn_cast_p VALUES (2.0, 11.00) + +statement error update on table "assn_cast_c" violates foreign key constraint "assn_cast_c_d_fkey" +INSERT INTO assn_cast_p VALUES (2.0, 11.00) ON CONFLICT (d) DO UPDATE SET d = 12.99 + +statement ok +INSERT INTO assn_cast_p VALUES (2.0, 11.00) ON CONFLICT (d) DO UPDATE SET d = 12.0 + # Test ON UPDATE SET DEFAULT. statement ok DROP TABLE assn_cast_c; @@ -601,6 +998,142 @@ SELECT * FROM assn_cast_c ---- 2 3 +statement ok +DROP TABLE assn_cast_c; +CREATE TABLE assn_cast_c (c INT PRIMARY KEY, d DECIMAL(10, 0) DEFAULT 3.1 REFERENCES assn_cast_p(d) ON UPDATE SET DEFAULT); +INSERT INTO assn_cast_c VALUES (2, 12) + +statement error update on table "assn_cast_c" violates foreign key constraint "assn_cast_c_d_fkey" +UPSERT INTO assn_cast_p VALUES (3.0, 3.4) + +statement ok +UPSERT INTO assn_cast_p VALUES (3.0, 3.0) + +statement error update on table "assn_cast_c" violates foreign key constraint "assn_cast_c_d_fkey" +INSERT INTO assn_cast_p VALUES (3.0, 1) ON CONFLICT (p) DO UPDATE SET d = 3.4 + +statement error update on table "assn_cast_c" violates foreign key constraint "assn_cast_c_d_fkey" +INSERT INTO assn_cast_p VALUES (4.0, 3.0) ON CONFLICT (d) DO UPDATE SET d = 3.4 + +statement ok +INSERT INTO assn_cast_p VALUES (3.0, 1) ON CONFLICT (p) DO UPDATE SET d = 3.0 + +statement ok +INSERT INTO assn_cast_p VALUES (4.0, 3.0) ON CONFLICT (d) DO UPDATE SET d = 3.0 + + +# Tests for assignment casts of computed columns. +subtest assignment_casts_computed + +statement ok +CREATE TABLE assn_cast_comp ( + k INT PRIMARY KEY, + i INT, + i2 INT2 AS (i + 9999999) STORED, + t TEXT, + c CHAR AS (t) STORED, + d DECIMAL(10, 0), + d_comp DECIMAL(10, 2) AS (d) STORED, + d2 DECIMAL(10, 2), + d2_comp DECIMAL(10, 0) AS (d2) STORED +) + +statement error integer out of range for type int2 +INSERT INTO assn_cast_comp(k, i) VALUES (1, 1) + +statement error value too long for type CHAR +INSERT INTO assn_cast_comp(k, t) VALUES (1, 'foo') + +statement ok +INSERT INTO assn_cast_comp(k, d, d2) VALUES (1, 1.56, 2.78) + +query IRRRR +SELECT k, d, d_comp, d2, d2_comp FROM assn_cast_comp +---- +1 2 2.00 2.78 3 + +statement error integer out of range for type int2 +UPDATE assn_cast_comp SET i = 1 WHERE k = 1 + +statement error value too long for type CHAR +UPDATE assn_cast_comp SET t = 'foo' WHERE k = 1 + +statement ok +UPDATE assn_cast_comp SET d = 3.45, d2 = 4.56 WHERE k = 1 + +query IRRRR +SELECT k, d, d_comp, d2, d2_comp FROM assn_cast_comp +---- +1 3 3.00 4.56 5 + +statement error integer out of range for type int2 +UPSERT INTO assn_cast_comp (k, i) VALUES (1, 1) + +statement error integer out of range for type int2 +UPSERT INTO assn_cast_comp (k, i) VALUES (2, 2) + +statement error value too long for type CHAR +UPSERT INTO assn_cast_comp (k, t) VALUES (1, 'foo') + +statement error value too long for type CHAR +UPSERT INTO assn_cast_comp (k, t) VALUES (2, 'bar') + +statement ok +UPSERT INTO assn_cast_comp (k, d, d2) VALUES (1, 5.43, 7.89) + +query IRRRR +SELECT k, d, d_comp, d2, d2_comp FROM assn_cast_comp +---- +1 5 5.00 7.89 8 + + +# Tests for assignment casts of ON UPDATE expressions. +subtest assignment_casts_on_update + +statement ok +CREATE TABLE assn_cast_on_update ( + k INT PRIMARY KEY, + i INT UNIQUE, + d DECIMAL(10, 1) ON UPDATE 1.23, + d2 DECIMAL(10, 1) ON UPDATE 1.23::DECIMAL(10, 2), + d_comp DECIMAL(10, 0) AS (d) STORED +) + +statement ok +INSERT INTO assn_cast_on_update (k, i) VALUES (1, 10) + +statement ok +UPDATE assn_cast_on_update SET i = 11 WHERE k = 1 + +query IIRRR +SELECT * FROM assn_cast_on_update +---- +1 11 1.2 1.2 1 + +statement ok +UPDATE assn_cast_on_update SET d = NULL, d2 = NULL WHERE k = 1 + +statement ok +UPSERT INTO assn_cast_on_update (k, i) VALUES (1, 10) + +statement ok +UPSERT INTO assn_cast_on_update (k, i) VALUES (2, 20) + +query IIRRR rowsort +SELECT * FROM assn_cast_on_update +---- +1 10 1.2 1.2 1 +2 20 NULL NULL NULL + +statement ok +INSERT INTO assn_cast_on_update (k, i) VALUES (2, 20) ON CONFLICT (i) DO UPDATE SET i = 30 + +query IIRRR rowsort +SELECT * FROM assn_cast_on_update +---- +1 10 1.2 1.2 1 +2 30 1.2 1.2 1 + # Regression tests. subtest regressions @@ -777,3 +1310,35 @@ query T EXECUTE s73450_c('foo') ---- f + +# Regression test for #59489. Decimal scale should be enforced for literals +# given in scientific notation. +subtest regression_59489 + +statement ok +CREATE TABLE t59489 ( + d12_3 DECIMAL(12, 3), + d4_2 DECIMAL(4, 2) +) + +query R +INSERT INTO t59489 (d12_3) VALUES (6000) RETURNING d12_3 +---- +6000.000 + +query R +INSERT INTO t59489 (d12_3) VALUES (6e3) RETURNING d12_3 +---- +6000.000 + +query R +SELECT d12_3 FROM t59489 +---- +6000.000 +6000.000 + +statement error value with precision 4, scale 2 must round to an absolute value less than 10\^2 +INSERT INTO t59489 (d4_2) VALUES (600) + +statement error value with precision 4, scale 2 must round to an absolute value less than 10\^2 +INSERT INTO t59489 (d4_2) VALUES (6e2) diff --git a/pkg/sql/logictest/testdata/logic_test/cluster_settings b/pkg/sql/logictest/testdata/logic_test/cluster_settings index 8b90abd173bf..6b1cae3081d9 100644 --- a/pkg/sql/logictest/testdata/logic_test/cluster_settings +++ b/pkg/sql/logictest/testdata/logic_test/cluster_settings @@ -98,3 +98,21 @@ query B SHOW CLUSTER SETTING sql.defaults.stub_catalog_tables.enabled ---- true + +skipif config 3node-tenant +statement ok +SET CLUSTER SETTING kv.snapshot_rebalance.max_rate = '10Mib' + +skipif config 3node-tenant +query T +SHOW CLUSTER SETTING kv.snapshot_rebalance.max_rate +---- +10 MiB + +onlyif config 3node-tenant +statement error unknown cluster setting +SET CLUSTER SETTING kv.snapshot_rebalance.max_rate = '10Mib' + +onlyif config 3node-tenant +statement error unknown setting +SHOW CLUSTER SETTING kv.snapshot_rebalance.max_rate diff --git a/pkg/sql/logictest/testdata/logic_test/create_table b/pkg/sql/logictest/testdata/logic_test/create_table index be9cf56d1639..6d9201d2964f 100644 --- a/pkg/sql/logictest/testdata/logic_test/create_table +++ b/pkg/sql/logictest/testdata/logic_test/create_table @@ -369,8 +369,7 @@ like_hash CREATE TABLE public.like_hash ( rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(), CONSTRAINT like_hash_base_pkey PRIMARY KEY (rowid ASC), INDEX like_hash_base_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, - FAMILY "primary" (a, rowid), - CONSTRAINT check_crdb_internal_a_shard_4 CHECK (crdb_internal_a_shard_4 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8)) + FAMILY "primary" (a, rowid) ) statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_enum b/pkg/sql/logictest/testdata/logic_test/distsql_enum index 072f961c6a0e..dc0159d8c91d 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_enum +++ b/pkg/sql/logictest/testdata/logic_test/distsql_enum @@ -1,4 +1,4 @@ -# LogicTest: 5node-default-configs +# LogicTest: 5node-default-configs !5node-metadata # Regression test for nested tuple enum hydration (#74189) statement ok @@ -31,3 +31,61 @@ WITH w (col) ---- hello ("(hello,0)",0) +# Regression test for nested tuple enum hydration (#74189) +statement ok +CREATE TABLE t1 (x INT PRIMARY KEY, y greeting); INSERT INTO t1(x, y) VALUES (0,'hello'); +CREATE TABLE t2 (x INT PRIMARY KEY, y greeting); INSERT INTO t2(x, y) VALUES (0,'hello'); + +# split into ranges + +statement ok +ALTER TABLE t1 SPLIT AT VALUES(0),(10),(20); +ALTER TABLE t2 SPLIT AT VALUES(0),(10),(20); +ALTER TABLE t1 EXPERIMENTAL_RELOCATE VALUES (ARRAY[1], 0), (ARRAY[2], 10), (ARRAY[3], 20); +ALTER TABLE t2 EXPERIMENTAL_RELOCATE VALUES (ARRAY[1], 0), (ARRAY[2], 10), (ARRAY[3], 20); + +# Tickle stats to force lookup join +statement ok +ALTER TABLE t1 INJECT STATISTICS '[ + { + "columns": ["x"], + "created_at": "2018-01-01 1:00:00.00000+00:00", + "row_count": 10, + "distinct_count": 100 + } +]' + +statement ok +ALTER TABLE t2 INJECT STATISTICS '[ + { + "columns": ["x"], + "created_at": "2018-01-01 1:00:00.00000+00:00", + "row_count": 10000, + "distinct_count": 10000 + } +]' + +query T nodeidx=1 +EXPLAIN (VEC) +SELECT x from t1 WHERE EXISTS (SELECT x FROM t2 WHERE t1.x=t2.x AND t2.y='hello') +---- +│ +├ Node 1 +│ └ *colrpc.Outbox +│ └ *rowexec.joinReader +│ └ *colfetcher.ColBatchScan +├ Node 2 +│ └ *colexec.ParallelUnorderedSynchronizer +│ ├ *colrpc.Inbox +│ ├ *rowexec.joinReader +│ │ └ *colfetcher.ColBatchScan +│ └ *colrpc.Inbox +└ Node 3 + └ *colrpc.Outbox + └ *rowexec.joinReader + └ *colfetcher.ColBatchScan + +query I nodeidx=1 +SELECT x from t1 WHERE EXISTS (SELECT x FROM t2 WHERE t1.x=t2.x AND t2.y='hello') +---- +0 diff --git a/pkg/sql/logictest/testdata/logic_test/drop_index b/pkg/sql/logictest/testdata/logic_test/drop_index index 34a5cdd22bdc..3d41d285f6af 100644 --- a/pkg/sql/logictest/testdata/logic_test/drop_index +++ b/pkg/sql/logictest/testdata/logic_test/drop_index @@ -327,7 +327,7 @@ DROP INDEX t_secondary CASCADE; ALTER TABLE t DROP COLUMN b; INSERT INTO t SELECT a + 1 FROM t; -statement error pgcode 23505 duplicate key value got decoding error: column-id "2" does not exist +statement error pgcode 23505 duplicate key value got decoding error: column \"b\" \(2\) is not public UPSERT INTO t SELECT a + 1 FROM t; statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/hash_sharded_index b/pkg/sql/logictest/testdata/logic_test/hash_sharded_index index 02ad7092853b..9c6343a80c39 100644 --- a/pkg/sql/logictest/testdata/logic_test/hash_sharded_index +++ b/pkg/sql/logictest/testdata/logic_test/hash_sharded_index @@ -12,8 +12,7 @@ sharded_primary CREATE TABLE public.sharded_primary ( crdb_internal_a_shard_10 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(a)), 10:::INT8)) VIRTUAL, a INT8 NOT NULL, CONSTRAINT sharded_primary_pkey PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, - FAMILY "primary" (a), - CONSTRAINT check_crdb_internal_a_shard_10 CHECK (crdb_internal_a_shard_10 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8)) + FAMILY "primary" (a) ) statement error pgcode 22023 BUCKET_COUNT must be a 32-bit integer greater than 1, got -1 @@ -49,8 +48,7 @@ sharded_primary CREATE TABLE public.sharded_primary ( a INT8 NOT NULL, crdb_internal_a_shard_10 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(a)), 10:::INT8)) VIRTUAL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, - FAMILY "primary" (a), - CONSTRAINT check_crdb_internal_a_shard_10 CHECK (crdb_internal_a_shard_10 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8)) + FAMILY "primary" (a) ) query TTT colnames @@ -107,8 +105,7 @@ specific_family CREATE TABLE public.specific_family ( CONSTRAINT specific_family_pkey PRIMARY KEY (rowid ASC), INDEX specific_family_b_idx (b ASC) USING HASH WITH BUCKET_COUNT = 10, FAMILY a_family (a, rowid), - FAMILY b_family (b), - CONSTRAINT check_crdb_internal_b_shard_10 CHECK (crdb_internal_b_shard_10 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8)) + FAMILY b_family (b) ) # Tests for secondary sharded indexes @@ -124,8 +121,7 @@ sharded_secondary CREATE TABLE public.sharded_secondary ( rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(), CONSTRAINT sharded_secondary_pkey PRIMARY KEY (rowid ASC), INDEX sharded_secondary_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, - FAMILY "primary" (a, rowid), - CONSTRAINT check_crdb_internal_a_shard_4 CHECK (crdb_internal_a_shard_4 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8)) + FAMILY "primary" (a, rowid) ) statement ok @@ -147,8 +143,7 @@ sharded_secondary CREATE TABLE public.sharded_secondary ( rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(), CONSTRAINT sharded_secondary_pkey PRIMARY KEY (rowid ASC), INDEX sharded_secondary_crdb_internal_a_shard_4_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, - FAMILY "primary" (a, rowid), - CONSTRAINT check_crdb_internal_a_shard_4 CHECK (crdb_internal_a_shard_4 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8)) + FAMILY "primary" (a, rowid) ) statement ok @@ -177,8 +172,7 @@ sharded_secondary CREATE TABLE public.sharded_secondary ( crdb_internal_a_shard_10 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(a)), 10:::INT8)) VIRTUAL, CONSTRAINT sharded_secondary_pkey PRIMARY KEY (rowid ASC), INDEX sharded_secondary_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, - FAMILY "primary" (a, rowid), - CONSTRAINT check_crdb_internal_a_shard_10 CHECK (crdb_internal_a_shard_10 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8)) + FAMILY "primary" (a, rowid) ) statement ok @@ -199,9 +193,7 @@ sharded_secondary CREATE TABLE public.sharded_secondary ( CONSTRAINT sharded_secondary_pkey PRIMARY KEY (rowid ASC), INDEX sharded_secondary_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_secondary_a_idx1 (a ASC) USING HASH WITH BUCKET_COUNT = 4, - FAMILY "primary" (a, rowid), - CONSTRAINT check_crdb_internal_a_shard_10 CHECK (crdb_internal_a_shard_10 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8)), - CONSTRAINT check_crdb_internal_a_shard_4 CHECK (crdb_internal_a_shard_4 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8)) + FAMILY "primary" (a, rowid) ) # Drop a sharded index and ensure that the shard column is dropped with it. @@ -217,8 +209,7 @@ sharded_secondary CREATE TABLE public.sharded_secondary ( crdb_internal_a_shard_4 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(a)), 4:::INT8)) VIRTUAL, CONSTRAINT sharded_secondary_pkey PRIMARY KEY (rowid ASC), INDEX sharded_secondary_a_idx1 (a ASC) USING HASH WITH BUCKET_COUNT = 4, - FAMILY "primary" (a, rowid), - CONSTRAINT check_crdb_internal_a_shard_4 CHECK (crdb_internal_a_shard_4 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8)) + FAMILY "primary" (a, rowid) ) statement ok @@ -279,8 +270,7 @@ sharded_secondary CREATE TABLE public.sharded_secondary ( INDEX sharded_secondary_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_secondary_a_idx1 (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_secondary_a_idx2 (a ASC) USING HASH WITH BUCKET_COUNT = 10, - FAMILY "primary" (a, rowid), - CONSTRAINT check_crdb_internal_a_shard_10 CHECK (crdb_internal_a_shard_10 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8)) + FAMILY "primary" (a, rowid) ) @@ -302,9 +292,7 @@ sharded_primary CREATE TABLE public.sharded_primary ( crdb_internal_a_shard_4 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(a)), 4:::INT8)) VIRTUAL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_primary_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 4, - FAMILY "primary" (a), - CONSTRAINT check_crdb_internal_a_shard_10 CHECK (crdb_internal_a_shard_10 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8)), - CONSTRAINT check_crdb_internal_a_shard_4 CHECK (crdb_internal_a_shard_4 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8)) + FAMILY "primary" (a) ) statement ok @@ -320,8 +308,7 @@ sharded_primary CREATE TABLE public.sharded_primary ( a INT8 NOT NULL, crdb_internal_a_shard_10 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(a)), 10:::INT8)) VIRTUAL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, - FAMILY "primary" (a), - CONSTRAINT check_crdb_internal_a_shard_10 CHECK (crdb_internal_a_shard_10 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8)) + FAMILY "primary" (a) ) statement ok @@ -335,8 +322,7 @@ sharded_primary CREATE TABLE public.sharded_primary ( crdb_internal_a_shard_10 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(a)), 10:::INT8)) VIRTUAL, CONSTRAINT "primary" PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 10, INDEX sharded_primary_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 10, - FAMILY "primary" (a), - CONSTRAINT check_crdb_internal_a_shard_10 CHECK (crdb_internal_a_shard_10 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8)) + FAMILY "primary" (a) ) statement ok @@ -416,8 +402,7 @@ column_used_on_unsharded CREATE TABLE public.column_used_on_unsharded ( rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(), CONSTRAINT column_used_on_unsharded_pkey PRIMARY KEY (rowid ASC), INDEX column_used_on_unsharded_crdb_internal_a_shard_10_idx (crdb_internal_a_shard_10 ASC), - FAMILY "primary" (a, rowid), - CONSTRAINT check_crdb_internal_a_shard_10 CHECK (crdb_internal_a_shard_10 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8)) + FAMILY "primary" (a, rowid) ) statement ok @@ -442,8 +427,7 @@ column_used_on_unsharded_create_table CREATE TABLE public.column_used_on_unshar rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(), CONSTRAINT column_used_on_unsharded_create_table_pkey PRIMARY KEY (rowid ASC), INDEX column_used_on_unsharded_create_table_crdb_internal_a_shard_10_idx (crdb_internal_a_shard_10 ASC), - FAMILY "primary" (a, rowid), - CONSTRAINT check_crdb_internal_a_shard_10 CHECK (crdb_internal_a_shard_10 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8)) + FAMILY "primary" (a, rowid) ) statement ok @@ -499,9 +483,7 @@ weird_names CREATE TABLE public.weird_names ( "crdb_internal_'quotes' in the column's name_shard_4" INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes("'quotes' in the column's name")), 4:::INT8)) VIRTUAL, CONSTRAINT weird_names_pkey PRIMARY KEY ("I am a column with spaces" ASC) USING HASH WITH BUCKET_COUNT = 12, INDEX foo ("'quotes' in the column's name" ASC) USING HASH WITH BUCKET_COUNT = 4, - FAMILY "primary" ("I am a column with spaces", "'quotes' in the column's name"), - CONSTRAINT "check_crdb_internal_I am a column with spaces_shard_12" CHECK ("crdb_internal_I am a column with spaces_shard_12" IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8, 8:::INT8, 9:::INT8, 10:::INT8, 11:::INT8)), - CONSTRAINT "check_crdb_internal_'quotes' in the column's name_shard_4" CHECK ("crdb_internal_'quotes' in the column's name_shard_4" IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8)) + FAMILY "primary" ("I am a column with spaces", "'quotes' in the column's name") ) subtest column_does_not_exist @@ -577,9 +559,7 @@ rename_column CREATE TABLE public.rename_column ( crdb_internal_c2_shard_8 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(c2)), 8:::INT8)) VIRTUAL, CONSTRAINT rename_column_pkey PRIMARY KEY (c0 ASC, c1 ASC) USING HASH WITH BUCKET_COUNT = 8, INDEX rename_column_c2_idx (c2 ASC) USING HASH WITH BUCKET_COUNT = 8, - FAMILY "primary" (c0, c1, c2), - CONSTRAINT check_crdb_internal_c0_c1_shard_8 CHECK (crdb_internal_c0_c1_shard_8 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8)), - CONSTRAINT check_crdb_internal_c2_shard_8 CHECK (crdb_internal_c2_shard_8 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8)) + FAMILY "primary" (c0, c1, c2) ) statement ok @@ -603,9 +583,7 @@ rename_column CREATE TABLE public.rename_column ( crdb_internal_c3_shard_8 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(c3)), 8:::INT8)) VIRTUAL, CONSTRAINT rename_column_pkey PRIMARY KEY (c1 ASC, c2 ASC) USING HASH WITH BUCKET_COUNT = 8, INDEX rename_column_c2_idx (c3 ASC) USING HASH WITH BUCKET_COUNT = 8, - FAMILY "primary" (c1, c2, c3), - CONSTRAINT check_crdb_internal_c0_c1_shard_8 CHECK (crdb_internal_c1_c2_shard_8 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8)), - CONSTRAINT check_crdb_internal_c2_shard_8 CHECK (crdb_internal_c3_shard_8 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8)) + FAMILY "primary" (c1, c2, c3) ) query III @@ -628,9 +606,7 @@ rename_column CREATE TABLE public.rename_column ( crdb_internal_c2_shard_8 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(c2)), 8:::INT8)) VIRTUAL, CONSTRAINT rename_column_pkey PRIMARY KEY (c0 ASC, c1 ASC) USING HASH WITH BUCKET_COUNT = 8, INDEX rename_column_c2_idx (c2 ASC) USING HASH WITH BUCKET_COUNT = 8, - FAMILY "primary" (c0, c1, c2), - CONSTRAINT check_crdb_internal_c0_c1_shard_8 CHECK (crdb_internal_c0_c1_shard_8 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8)), - CONSTRAINT check_crdb_internal_c2_shard_8 CHECK (crdb_internal_c2_shard_8 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8)) + FAMILY "primary" (c0, c1, c2) ) query III @@ -807,3 +783,117 @@ INSERT INTO parent VALUES (1,1) statement ok INSERT INTO child VALUES (1,1) + +# Test creating tables with output of `SHOW CREATE TABLE` from table with +# hash-sharded index and make sure constraint of shard column is preserved and +# recognized by optimizer plan +subtest create_with_show_create + +statement ok +DROP TABLE IF EXISTS t + +statement ok +CREATE TABLE t ( + a INT PRIMARY KEY USING HASH WITH BUCKET_COUNT = 8 +); + +query T +explain (opt, catalog) select * from t +---- +TABLE t + ├── crdb_internal_a_shard_8 int4 not null as (mod(fnv32("crdb_internal.datums_to_bytes"(a)), 8:::INT8)) virtual [hidden] + ├── a int not null + ├── crdb_internal_mvcc_timestamp decimal [hidden] [system] + ├── tableoid oid [hidden] [system] + ├── CHECK (crdb_internal_a_shard_8 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8)) + └── PRIMARY INDEX t_pkey + ├── crdb_internal_a_shard_8 int4 not null as (mod(fnv32("crdb_internal.datums_to_bytes"(a)), 8:::INT8)) virtual [hidden] + └── a int not null + scan t + ├── check constraint expressions + │ └── crdb_internal_a_shard_8 IN (0, 1, 2, 3, 4, 5, 6, 7) + └── computed column expressions + └── crdb_internal_a_shard_8 + └── mod(fnv32(crdb_internal.datums_to_bytes(a)), 8) + +let $create_statement +SELECT create_statement FROM [SHOW CREATE TABLE t] + +statement ok +DROP TABLE t + +statement ok +$create_statement + +query T +SELECT @2 FROM [SHOW CREATE TABLE t] +---- +CREATE TABLE public.t ( + crdb_internal_a_shard_8 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(a)), 8:::INT8)) VIRTUAL, + a INT8 NOT NULL, + CONSTRAINT t_pkey PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 8, + FAMILY "primary" (a) +) + +query T +explain (opt, catalog) select * from t +---- +TABLE t + ├── crdb_internal_a_shard_8 int4 not null as (mod(fnv32(crdb_internal.datums_to_bytes(a)), 8:::INT8)) virtual [hidden] + ├── a int not null + ├── crdb_internal_mvcc_timestamp decimal [hidden] [system] + ├── tableoid oid [hidden] [system] + ├── CHECK (crdb_internal_a_shard_8 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8)) + └── PRIMARY INDEX t_pkey + ├── crdb_internal_a_shard_8 int4 not null as (mod(fnv32(crdb_internal.datums_to_bytes(a)), 8:::INT8)) virtual [hidden] + └── a int not null + scan t + ├── check constraint expressions + │ └── crdb_internal_a_shard_8 IN (0, 1, 2, 3, 4, 5, 6, 7) + └── computed column expressions + └── crdb_internal_a_shard_8 + └── mod(fnv32(crdb_internal.datums_to_bytes(a)), 8) + +# Make sure user defined constraint is used if it's equivalent to the shard +# column constraint would have been created. +statement ok +DROP TABLE t + +statement ok +CREATE TABLE public.t ( + crdb_internal_a_shard_8 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(a)), 8:::INT8)) VIRTUAL, + a INT8 NOT NULL, + CONSTRAINT t_pkey PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 8, + FAMILY "primary" (a), + CONSTRAINT check_crdb_internal_a_shard_8 CHECK (crdb_internal_a_shard_8 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8)) +) + +query T +SELECT @2 FROM [SHOW CREATE TABLE t] +---- +CREATE TABLE public.t ( + crdb_internal_a_shard_8 INT4 NOT VISIBLE NOT NULL AS (mod(fnv32(crdb_internal.datums_to_bytes(a)), 8:::INT8)) VIRTUAL, + a INT8 NOT NULL, + CONSTRAINT t_pkey PRIMARY KEY (a ASC) USING HASH WITH BUCKET_COUNT = 8, + FAMILY "primary" (a), + CONSTRAINT check_crdb_internal_a_shard_8 CHECK (crdb_internal_a_shard_8 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8)) +) + +query T +explain (opt, catalog) select * from t +---- +TABLE t + ├── crdb_internal_a_shard_8 int4 not null as (mod(fnv32(crdb_internal.datums_to_bytes(a)), 8:::INT8)) virtual [hidden] + ├── a int not null + ├── crdb_internal_mvcc_timestamp decimal [hidden] [system] + ├── tableoid oid [hidden] [system] + ├── CHECK (crdb_internal_a_shard_8 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8)) + └── PRIMARY INDEX t_pkey + ├── crdb_internal_a_shard_8 int4 not null as (mod(fnv32(crdb_internal.datums_to_bytes(a)), 8:::INT8)) virtual [hidden] + └── a int not null + scan t + ├── check constraint expressions + │ └── crdb_internal_a_shard_8 IN (0, 1, 2, 3, 4, 5, 6, 7) + └── computed column expressions + └── crdb_internal_a_shard_8 + └── mod(fnv32(crdb_internal.datums_to_bytes(a)), 8) diff --git a/pkg/sql/logictest/testdata/logic_test/information_schema b/pkg/sql/logictest/testdata/logic_test/information_schema index c45290311aeb..c2414ad13aca 100644 --- a/pkg/sql/logictest/testdata/logic_test/information_schema +++ b/pkg/sql/logictest/testdata/logic_test/information_schema @@ -4636,6 +4636,7 @@ default_transaction_isolation serializable default_transaction_priority normal default_transaction_read_only off default_transaction_use_follower_reads off +default_with_oids off disable_partially_distributed_plans off disable_plan_gists off disallow_full_table_scans off @@ -4721,6 +4722,7 @@ transaction_rows_read_log 0 transaction_rows_written_err 0 transaction_rows_written_log 0 transaction_status NoTxn +xmloption content # information_schema can be used with the anonymous database. # It should show information across all databases. diff --git a/pkg/sql/logictest/testdata/logic_test/insert b/pkg/sql/logictest/testdata/logic_test/insert index bdcd76dcfa8b..2cc19ea9cf61 100644 --- a/pkg/sql/logictest/testdata/logic_test/insert +++ b/pkg/sql/logictest/testdata/logic_test/insert @@ -788,3 +788,27 @@ SELECT * FROM gen_as_id_seqopt ORDER BY a 8 5 7 9 8 11 10 11 2 + +# Regression test for hitting an internal error in the vectorized unordered +# distinct when NULLs are present in the rows being inserted (#74795). +statement ok +CREATE TABLE t74795 ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + account_id TEXT NOT NULL, + deletion_request_id TEXT, + UNIQUE INDEX (account_id, deletion_request_id) +); +INSERT INTO t74795 + (account_id) +VALUES + ('foo'), + ('foo'), + ('foo') +ON CONFLICT (account_id, deletion_request_id) DO NOTHING; + +query TT +SELECT account_id, deletion_request_id FROM t74795 +---- +foo NULL +foo NULL +foo NULL diff --git a/pkg/sql/logictest/testdata/logic_test/new_schema_changer b/pkg/sql/logictest/testdata/logic_test/new_schema_changer index 733a6dffbf2e..968e976b32d6 100644 --- a/pkg/sql/logictest/testdata/logic_test/new_schema_changer +++ b/pkg/sql/logictest/testdata/logic_test/new_schema_changer @@ -391,7 +391,7 @@ CREATE VIEW v4Dep AS (SELECT n2, n1 FROM v2Dep); statement ok explain (DDL, DEPS) DROP VIEW v1Dep CASCADE; -statement error pq: cannot drop view "v1dep" because view "v2dep" depends on it +statement error pq: cannot drop relation "v1dep" because view "v2dep" depends on it DROP VIEW v1Dep RESTRICT; statement error pq: "v1dep" is not a materialized view @@ -453,6 +453,15 @@ statement ok CREATE SEQUENCE defaultdb.sq1 OWNED BY defaultdb.shipments.carrier; statement ok +CREATE TABLE defaultdb.sq1dep ( + rand_col INT8 DEFAULT nextval('defaultdb.sq1') +); + +statement error cannot drop table a because other objects depend on it +DROP TABLE defaultdb.shipments; + +statement ok +DROP TABLE defaultdb.sq1dep; DROP TABLE defaultdb.shipments; statement ok @@ -466,11 +475,10 @@ CREATE TABLE defaultdb.shipments ( CONSTRAINT fk_orders FOREIGN KEY (customer_id) REFERENCES defaultdb.orders(customer) ); - statement ok CREATE VIEW defaultdb.v1 as (select customer_id, carrier from defaultdb.shipments); -statement error pq: cannot drop table "defaultdb.public.shipments" because view "defaultdb.public.v1" depends on it +statement error pq: cannot drop relation "shipments" because view "v1" depends on it DROP TABLE defaultdb.shipments; statement ok @@ -633,16 +641,38 @@ CREATE VIEW db1.sc1.v3 AS (SELECT name, n1 FROM db1.sc1.v1, db1.sc1.v2); statement ok CREATE VIEW db1.sc1.v4 AS (SELECT n2, n1 FROM db1.sc1.v2); +statement ok +CREATE INDEX tmp_idx ON db1.sc1.t1(name) + +statement ok +use db1; +COMMENT ON DATABASE db1 IS 'BLAH'; +COMMENT ON SCHEMA sc1 IS 'BLAH2'; +COMMENT ON TABLE db1.sc1.t1 IS 'BLAH3'; +COMMENT ON COLUMN db1.sc1.t1.id IS 'BLAH4'; +COMMENT ON INDEX db1.sc1.tmp_idx IS 'BLAH5'; +use test; + statement ok CREATE TYPE db1.sc1.typ AS ENUM('a') statement ok CREATE VIEW db1.sc1.v5 AS (SELECT 'a'::db1.sc1.typ::string AS k, n2, n1 from db1.sc1.v4) +# Confirm comments exist. +query T +SELECT comment FROM system.comments ORDER BY comment ASC +---- +BLAH +BLAH2 +BLAH3 +BLAH4 +BLAH5 + statement error schema "sc1" is not empty and CASCADE was not specified DROP SCHEMA db1.sc1 -statement error database "db1" has a non-empty schema "public" and CASCADE was not specified +statement error database "db1" is not empty and RESTRICT was specified DROP DATABASE db1 RESTRICT statement ok @@ -684,6 +714,11 @@ select count(*) from system.descriptor statement ok DROP DATABASE db1 CASCADE +# No comments should be left after. +query T +SELECT comment FROM system.comments ORDER BY comment ASC +---- + let $desc_count_post_drop select count(*) from system.descriptor @@ -982,7 +1017,7 @@ query IT SELECT "reportingID", info::JSONB - 'Timestamp' - 'DescriptorID' FROM system.eventlog; ---- -1 {"CascadeDroppedViews": ["test.public.v4ev"], "EventType": "drop_view", "Statement": "DROP VIEW v1ev CASCADE", "User": "root", "ViewName": "test.public.v1ev"} +1 {"CascadeDroppedViews": ["test.public.v4ev"], "EventType": "drop_view", "Statement": "DROP VIEW test.public.v1ev CASCADE", "Tag": "DROP VIEW", "User": "root", "ViewName": "test.public.v1ev"} statement ok CREATE VIEW v1ev AS (SELECT name FROM T1EV); @@ -1001,8 +1036,8 @@ SELECT "reportingID", info::JSONB - 'Timestamp' - 'DescriptorID' FROM system.eventlog ORDER BY timestamp, info DESC; ---- -1 {"CascadeDroppedViews": ["test.public.v2ev", "test.public.v3ev"], "EventType": "drop_table", "Statement": "DROP TABLE t1ev, t2ev CASCADE", "TableName": "test.public.t2ev", "User": "root"} -1 {"CascadeDroppedViews": ["test.public.v1ev", "test.public.v4ev"], "EventType": "drop_table", "Statement": "DROP TABLE t1ev, t2ev CASCADE", "TableName": "test.public.t1ev", "User": "root"} +1 {"CascadeDroppedViews": ["test.public.v2ev", "test.public.v3ev"], "EventType": "drop_table", "Statement": "DROP TABLE test.public.t1ev, test.public.t2ev CASCADE", "TableName": "test.public.t2ev", "Tag": "DROP TABLE", "User": "root"} +1 {"CascadeDroppedViews": ["test.public.v1ev", "test.public.v4ev"], "EventType": "drop_table", "Statement": "DROP TABLE test.public.t1ev, test.public.t2ev CASCADE", "TableName": "test.public.t1ev", "Tag": "DROP TABLE", "User": "root"} statement ok CREATE TABLE fooev (i INT PRIMARY KEY); @@ -1018,7 +1053,7 @@ SELECT "reportingID", info::JSONB - 'Timestamp' - 'DescriptorID' FROM system.eventlog ORDER BY timestamp, info DESC; ---- -1 {"EventType": "alter_table", "MutationID": 1, "Statement": "ALTER TABLE fooev ADD COLUMN j INT8", "TableName": "test.public.fooev", "User": "root"} +1 {"EventType": "alter_table", "MutationID": 1, "Statement": "ALTER TABLE test.public.fooev ADD COLUMN j INT8", "TableName": "test.public.fooev", "Tag": "ALTER TABLE", "User": "root"} subtest names-with-escaped-chars @@ -1065,8 +1100,8 @@ SELECT "reportingID", info::JSONB - 'Timestamp' - 'DescriptorID' FROM system.eventlog ORDER BY timestamp, info DESC; ---- -1 {"DatabaseName": "'db1-a'", "DroppedSchemaObjects": ["'db1-a'.public", "'db1-a'.sc1", "'db1-a'.sc2"], "EventType": "drop_database", "Statement": "DROP DATABASE \"'db1-a'\" CASCADE", "User": "root"} -1 {"DatabaseName": "db2", "DroppedSchemaObjects": ["db2.public", "db2.sc3"], "EventType": "drop_database", "Statement": "DROP DATABASE db2 CASCADE", "User": "root"} +1 {"DatabaseName": "'db1-a'", "DroppedSchemaObjects": ["'db1-a'.public", "'db1-a'.sc1", "'db1-a'.sc2"], "EventType": "drop_database", "Statement": "DROP DATABASE \"'db1-a'\" CASCADE", "Tag": "DROP DATABASE", "User": "root"} +1 {"DatabaseName": "db2", "DroppedSchemaObjects": ["db2.public", "db2.sc3"], "EventType": "drop_database", "Statement": "DROP DATABASE db2 CASCADE", "Tag": "DROP DATABASE", "User": "root"} # Sanity: Dropping multiple objects in the builder or resolving any dependencies # should function fine. @@ -1241,3 +1276,31 @@ DROP TABLE ttc3; statement ok DROP TYPE d_tc; + +subtest empty-database + +statement error empty database name +DROP DATABASE "" + +subtest schema-permission-error + +user root + +statement ok +CREATE SCHEMA sc1; + +statement ok +CREATE DATABASE db1; + +user testuser + +statement ok +SET experimental_use_new_schema_changer = 'on' + +statement error must be owner of schema \"sc1\" +DROP SCHEMA sc1; + +statement error user testuser does not have DROP privilege on database db1 +DROP DATABASE db1; + +user root diff --git a/pkg/sql/logictest/testdata/logic_test/pg_catalog b/pkg/sql/logictest/testdata/logic_test/pg_catalog index 0f42b1d8339b..0152eb2fb01c 100644 --- a/pkg/sql/logictest/testdata/logic_test/pg_catalog +++ b/pkg/sql/logictest/testdata/logic_test/pg_catalog @@ -4043,6 +4043,7 @@ default_transaction_isolation serializable NULL default_transaction_priority normal NULL NULL NULL string default_transaction_read_only off NULL NULL NULL string default_transaction_use_follower_reads off NULL NULL NULL string +default_with_oids off NULL NULL NULL string disable_partially_distributed_plans off NULL NULL NULL string disable_plan_gists off NULL NULL NULL string disallow_full_table_scans off NULL NULL NULL string @@ -4125,6 +4126,7 @@ transaction_rows_written_err 0 NULL transaction_rows_written_log 0 NULL NULL NULL string transaction_status NoTxn NULL NULL NULL string vectorize on NULL NULL NULL string +xmloption content NULL NULL NULL string skipif config 3node-tenant query TTTTTTT colnames @@ -4151,6 +4153,7 @@ default_transaction_isolation serializable NULL default_transaction_priority normal NULL user NULL normal normal default_transaction_read_only off NULL user NULL off off default_transaction_use_follower_reads off NULL user NULL off off +default_with_oids off NULL user NULL off off disable_partially_distributed_plans off NULL user NULL off off disable_plan_gists off NULL user NULL off off disallow_full_table_scans off NULL user NULL off off @@ -4233,6 +4236,7 @@ transaction_rows_written_err 0 NULL transaction_rows_written_log 0 NULL user NULL 0 0 transaction_status NoTxn NULL user NULL NoTxn NoTxn vectorize on NULL user NULL on on +xmloption content NULL user NULL content content query TTTTTT colnames SELECT name, source, min_val, max_val, sourcefile, sourceline FROM pg_catalog.pg_settings @@ -4254,6 +4258,7 @@ default_transaction_isolation NULL NULL NULL default_transaction_priority NULL NULL NULL NULL NULL default_transaction_read_only NULL NULL NULL NULL NULL default_transaction_use_follower_reads NULL NULL NULL NULL NULL +default_with_oids NULL NULL NULL NULL NULL disable_partially_distributed_plans NULL NULL NULL NULL NULL disable_plan_gists NULL NULL NULL NULL NULL disallow_full_table_scans NULL NULL NULL NULL NULL @@ -4338,6 +4343,7 @@ transaction_rows_written_err NULL NULL NULL transaction_rows_written_log NULL NULL NULL NULL NULL transaction_status NULL NULL NULL NULL NULL vectorize NULL NULL NULL NULL NULL +xmloption NULL NULL NULL NULL NULL # pg_catalog.pg_sequence diff --git a/pkg/sql/logictest/testdata/logic_test/privilege_builtins b/pkg/sql/logictest/testdata/logic_test/privilege_builtins index 47f3abeb73b9..51c635131a02 100644 --- a/pkg/sql/logictest/testdata/logic_test/privilege_builtins +++ b/pkg/sql/logictest/testdata/logic_test/privilege_builtins @@ -362,7 +362,7 @@ SELECT has_database_privilege((SELECT oid FROM pg_database WHERE datname = 'test ---- true true true true -query error pgcode 3D000 database 'does_not_exist' does not exist +query error pgcode 3D000 database "does_not_exist" does not exist SELECT has_database_privilege('does_not_exist', 'CREATE') query BBBBB diff --git a/pkg/sql/logictest/testdata/logic_test/role b/pkg/sql/logictest/testdata/logic_test/role index 2dbac4103b59..414128261628 100644 --- a/pkg/sql/logictest/testdata/logic_test/role +++ b/pkg/sql/logictest/testdata/logic_test/role @@ -1458,4 +1458,21 @@ hash6 $2a$10$ false # Reset cluster setting after test completion. statement ok -SET CLUSTER SETTING server.user_login.store_client_pre_hashed_passwords.enabled = true +RESET CLUSTER SETTING server.user_login.store_client_pre_hashed_passwords.enabled; + +subtest bcrypt_cost + +statement ok +SET CLUSTER SETTING server.user_login.password_hashes.default_cost.crdb_bcrypt = 20 + +statement ok +CREATE USER hash7 WITH PASSWORD 'hello' + +# Check that the configured cost was embedded in the setting. +query TT +SELECT username, substr("hashedPassword", 1, 7) FROM system.users WHERE username = 'hash7' +---- +hash7 $2a$20$ + +statement ok +RESET CLUSTER SETTING server.user_login.password_hashes.default_cost.crdb_bcrypt; diff --git a/pkg/sql/logictest/testdata/logic_test/schema b/pkg/sql/logictest/testdata/logic_test/schema index 2fcd1ff0bcc9..043af0cf1188 100644 --- a/pkg/sql/logictest/testdata/logic_test/schema +++ b/pkg/sql/logictest/testdata/logic_test/schema @@ -423,7 +423,7 @@ CREATE TYPE privs.denied AS ENUM ('denied') statement error pq: must be owner of schema "privs" ALTER SCHEMA privs RENAME TO denied -statement error pq: permission denied to drop schema "privs" +statement error must be owner of schema \"privs\" DROP SCHEMA privs # Test the usage privilege. diff --git a/pkg/sql/logictest/testdata/logic_test/scrub b/pkg/sql/logictest/testdata/logic_test/scrub index a100e6da1008..addac1956a08 100644 --- a/pkg/sql/logictest/testdata/logic_test/scrub +++ b/pkg/sql/logictest/testdata/logic_test/scrub @@ -22,15 +22,14 @@ query TTTTTTTT EXPERIMENTAL SCRUB TABLE t ----- -query TTTTTTTT +statement error not implemented EXPERIMENTAL SCRUB TABLE t WITH OPTIONS PHYSICAL ------ query TTTTTTTT EXPERIMENTAL SCRUB TABLE t WITH OPTIONS INDEX ALL ------ -query TTTTTTTT +statement error not implemented EXPERIMENTAL SCRUB TABLE t WITH OPTIONS PHYSICAL, INDEX (name_idx) ----- @@ -145,7 +144,7 @@ CREATE TABLE test.order (a INT, b INT, c INT, CONSTRAINT "primary" PRIMARY KEY ( statement ok INSERT INTO test.order VALUES (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0); -query TTTTTTTT +statement error not implemented EXPERIMENTAL SCRUB TABLE test.order WITH OPTIONS PHYSICAL # Test that scrubbing timestamp works as expected. diff --git a/pkg/sql/logictest/testdata/logic_test/set b/pkg/sql/logictest/testdata/logic_test/set index dd3ae70f66f0..10127e775764 100644 --- a/pkg/sql/logictest/testdata/logic_test/set +++ b/pkg/sql/logictest/testdata/logic_test/set @@ -530,6 +530,42 @@ SET standard_conforming_strings='true' statement ok SET standard_conforming_strings='on' +subtest default_with_oids_test + +query T +SHOW default_with_oids +---- +off + +statement ok +SET default_with_oids = 'false' + +query T +SHOW default_with_oids +---- +off + +statement error invalid value for parameter "default_with_oids": "true" +SET default_with_oids = 'true' + +subtest xmloption_test + +query T +SHOW xmloption +---- +content + +statement ok +SET xmloption = 'content' + +query T +SHOW xmloption +---- +content + +statement error invalid value for parameter "xmloption": "document" +SET xmloption = 'document' + subtest backslash_quote_test statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/show_source b/pkg/sql/logictest/testdata/logic_test/show_source index ea3d95711a15..f38f14ee0072 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_source +++ b/pkg/sql/logictest/testdata/logic_test/show_source @@ -41,6 +41,7 @@ default_transaction_isolation serializable default_transaction_priority normal default_transaction_read_only off default_transaction_use_follower_reads off +default_with_oids off disable_partially_distributed_plans off disable_plan_gists off disallow_full_table_scans off @@ -123,6 +124,7 @@ transaction_rows_written_err 0 transaction_rows_written_log 0 transaction_status NoTxn vectorize on +xmloption content query T colnames SELECT * FROM [SHOW CLUSTER SETTING sql.defaults.distsql] diff --git a/pkg/sql/logictest/testdata/logic_test/upsert b/pkg/sql/logictest/testdata/logic_test/upsert index 421226e635cb..cbb48a5f57b4 100644 --- a/pkg/sql/logictest/testdata/logic_test/upsert +++ b/pkg/sql/logictest/testdata/logic_test/upsert @@ -1327,3 +1327,28 @@ SELECT * FROM generated_as_id_t ORDER BY a 1 5 1 2 2 2 3 3 3 + +subtest explicit_arbiter_indexes + +# Test explicitly specified arbiter indexes. Note that these are mostly tested +# by optbuilder test cases, since they compile to the same outputs that inferred +# arbiter indexes are. +statement ok +CREATE TABLE arbiter_index (a INT, b INT, c INT, PRIMARY KEY (a, b), UNIQUE (c)) + +statement ok +INSERT INTO arbiter_index VALUES (1,2,3) +ON CONFLICT ON CONSTRAINT arbiter_index_pkey DO NOTHING + +statement ok +INSERT INTO arbiter_index VALUES(1,2,3) +ON CONFLICT ON CONSTRAINT arbiter_index_pkey DO NOTHING + +statement ok +INSERT INTO arbiter_index VALUES(2,2,3) +ON CONFLICT ON CONSTRAINT arbiter_index_c_key DO UPDATE SET c=10 + +query III +SELECT * FROM arbiter_index +---- +1 2 10 diff --git a/pkg/sql/logictest/testdata/logic_test/zone_config b/pkg/sql/logictest/testdata/logic_test/zone_config index 537b7ff066fe..0597c8d9a6dd 100644 --- a/pkg/sql/logictest/testdata/logic_test/zone_config +++ b/pkg/sql/logictest/testdata/logic_test/zone_config @@ -1,7 +1,7 @@ # As these tests are run for both the system tenant and secondary tenants, we # turn on the setting that gates setting zone configs for system tenants. statement ok -SET CLUSTER SETTING sql.zone_configs.experimental_allow_for_secondary_tenant.enabled = true +SET CLUSTER SETTING sql.zone_configs.allow_for_secondary_tenant.enabled = true # Check that we can alter the default zone config. statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/zone_config_system_tenant b/pkg/sql/logictest/testdata/logic_test/zone_config_system_tenant index af67994e811e..0669a2429f55 100644 --- a/pkg/sql/logictest/testdata/logic_test/zone_config_system_tenant +++ b/pkg/sql/logictest/testdata/logic_test/zone_config_system_tenant @@ -9,7 +9,7 @@ ALTER TABLE t CONFIGURE ZONE USING num_replicas = 5; # Should have no effect on the system tenant. statement ok -SET CLUSTER SETTING sql.zone_configs.experimental_allow_for_secondary_tenant.enabled = false +SET CLUSTER SETTING sql.zone_configs.allow_for_secondary_tenant.enabled = false statement ok ALTER TABLE t CONFIGURE ZONE USING num_replicas = 3; diff --git a/pkg/sql/memsize/BUILD.bazel b/pkg/sql/memsize/BUILD.bazel index d118ef52214e..74b1ef54fc79 100644 --- a/pkg/sql/memsize/BUILD.bazel +++ b/pkg/sql/memsize/BUILD.bazel @@ -8,6 +8,6 @@ go_library( deps = [ "//pkg/sql/sem/tree", "//pkg/util/duration", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", ], ) diff --git a/pkg/sql/memsize/constants.go b/pkg/sql/memsize/constants.go index 109bd43ea9ed..e7e70dcdc7ab 100644 --- a/pkg/sql/memsize/constants.go +++ b/pkg/sql/memsize/constants.go @@ -14,7 +14,7 @@ import ( "time" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/duration" ) diff --git a/pkg/sql/opt/constraint/constraint.go b/pkg/sql/opt/constraint/constraint.go index e57fe8a371f8..02aaa3b18386 100644 --- a/pkg/sql/opt/constraint/constraint.go +++ b/pkg/sql/opt/constraint/constraint.go @@ -303,21 +303,37 @@ func (c *Constraint) Contains(evalCtx *tree.EvalContext, other *Constraint) bool // span that contains it). func (c *Constraint) ContainsSpan(evalCtx *tree.EvalContext, sp *Span) bool { keyCtx := MakeKeyContext(&c.Columns, evalCtx) - // Binary search to find an overlapping span. + if cSpan, ok := c.findIntersectingSpan(&keyCtx, sp); ok { + // The spans must overlap. Check if sp is fully contained. + return sp.CompareStarts(&keyCtx, cSpan) >= 0 && + sp.CompareEnds(&keyCtx, cSpan) <= 0 + } + return false +} + +// IntersectsSpan returns true if the constraint overlaps the given span. +func (c *Constraint) IntersectsSpan(evalCtx *tree.EvalContext, sp *Span) bool { + keyCtx := MakeKeyContext(&c.Columns, evalCtx) + _, ok := c.findIntersectingSpan(&keyCtx, sp) + return ok +} + +// findIntersectingSpan performs binary search to find a span within +// the constraint that overlaps sp. +func (c *Constraint) findIntersectingSpan(keyCtx *KeyContext, sp *Span) (_ *Span, ok bool) { for l, r := 0, c.Spans.Count()-1; l <= r; { m := (l + r) / 2 cSpan := c.Spans.Get(m) - if sp.StartsAfter(&keyCtx, cSpan) { + if sp.StartsAfter(keyCtx, cSpan) { l = m + 1 - } else if cSpan.StartsAfter(&keyCtx, sp) { + } else if cSpan.StartsAfter(keyCtx, sp) { r = m - 1 } else { - // The spans must overlap. Check if sp is fully contained. - return sp.CompareStarts(&keyCtx, cSpan) >= 0 && - sp.CompareEnds(&keyCtx, cSpan) <= 0 + // The spans must overlap. + return cSpan, true } } - return false + return nil, false } // Combine refines the receiver constraint using constraints on a suffix of the diff --git a/pkg/sql/opt/constraint/constraint_test.go b/pkg/sql/opt/constraint/constraint_test.go index 0ad04850c800..30f2a6dbf312 100644 --- a/pkg/sql/opt/constraint/constraint_test.go +++ b/pkg/sql/opt/constraint/constraint_test.go @@ -356,6 +356,55 @@ func TestConstraintContainsSpan(t *testing.T) { } } +func TestConstraintIntersectsSpan(t *testing.T) { + st := cluster.MakeTestingClusterSettings() + evalCtx := tree.MakeTestingEvalContext(st) + + // Each test case has a bunch of spans that are expected to intersect the + // constraint, and a bunch of spans that are expected not to. + testData := []struct { + constraint string + intersectingSpans string + notIntersectingSpans string + }{ + { + constraint: "/1: [/1 - /3]", + intersectingSpans: "[/0 - /1] (/0 - /2] [/2 - /2] (/0 - /3) [/1 - /1] (/1 - /2) [/2 - /3] [/1 - /3] [/2 - /5]", + notIntersectingSpans: "[/0 - /1) (/3 - /5) [/7 - /7] [/7 - /10]", + }, + { + constraint: "/1/2: [ - /2] [/4 - /4] [/5/3 - /7) [/9 - /9/20]", + intersectingSpans: "[ - /1] [ - /2] [ - /3] [/1 - /2] [/2 - /2] [/3 - /4] [/4 - /4] " + + "[/5/3 - /5/3/1] [/5/3 - /7] [/6 - /8] [/5/5 - /7) [/9/19 - /9/20] [/9/20 - /9/21] [/8 - /9]", + notIntersectingSpans: "[/3 - /3] (/2 - /4) [/5 - /5/3) (/9/20 - /10]", + }, + { + constraint: "/1/-2: [/1/5 - /1/2] [/3/5 - /5/2] [/7 - ]", + intersectingSpans: "[/1/5 - /1/2] [/1/4 - /1/3] [/1/4 - /1/2] [/4 - /5) [/4 - /5/1] [/4/6 - /5/3] [/6/10 - ]", + notIntersectingSpans: "[ - /1/6] (/1/2 - /3/5) [/6 - /7) (/6/3 - /6/9]", + }, + } + + for i, tc := range testData { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + c := ParseConstraint(&evalCtx, tc.constraint) + + spans := parseSpans(&evalCtx, tc.intersectingSpans) + for i := 0; i < spans.Count(); i++ { + if sp := spans.Get(i); !c.IntersectsSpan(&evalCtx, sp) { + t.Errorf("%s should intersect span %s", c, sp) + } + } + spans = parseSpans(&evalCtx, tc.notIntersectingSpans) + for i := 0; i < spans.Count(); i++ { + if sp := spans.Get(i); c.IntersectsSpan(&evalCtx, sp) { + t.Errorf("%s should not intersect span %s", c, sp) + } + } + }) + } +} + func TestConstraintCombine(t *testing.T) { st := cluster.MakeTestingClusterSettings() evalCtx := tree.MakeTestingEvalContext(st) diff --git a/pkg/sql/opt/distribution/BUILD.bazel b/pkg/sql/opt/distribution/BUILD.bazel new file mode 100644 index 000000000000..d5a77fa96102 --- /dev/null +++ b/pkg/sql/opt/distribution/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "distribution", + srcs = ["distribution.go"], + importpath = "github.com/cockroachdb/cockroach/pkg/sql/opt/distribution", + visibility = ["//visibility:public"], + deps = [ + "//pkg/sql/opt/memo", + "//pkg/sql/opt/props/physical", + "//pkg/sql/sem/tree", + "//pkg/util/buildutil", + "@com_github_cockroachdb_errors//:errors", + ], +) + +go_test( + name = "distribution_test", + srcs = ["distribution_test.go"], + embed = [":distribution"], + deps = [ + "//pkg/settings/cluster", + "//pkg/sql/opt/memo", + "//pkg/sql/opt/norm", + "//pkg/sql/opt/props", + "//pkg/sql/opt/props/physical", + "//pkg/sql/opt/testutils/testcat", + "//pkg/sql/opt/testutils/testexpr", + "//pkg/sql/sem/tree", + ], +) diff --git a/pkg/sql/opt/distribution/distribution.go b/pkg/sql/opt/distribution/distribution.go new file mode 100644 index 000000000000..2c0014867d41 --- /dev/null +++ b/pkg/sql/opt/distribution/distribution.go @@ -0,0 +1,144 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package distribution + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" + "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/util/buildutil" + "github.com/cockroachdb/errors" +) + +// CanProvide returns true if the given operator returns rows that can +// satisfy the given required distribution. +func CanProvide( + evalCtx *tree.EvalContext, expr memo.RelExpr, required *physical.Distribution, +) bool { + if required.Any() { + return true + } + if buildutil.CrdbTestBuild { + checkRequired(required) + } + + var provided physical.Distribution + switch t := expr.(type) { + case *memo.DistributeExpr: + return true + + case *memo.LocalityOptimizedSearchExpr: + provided.FromLocality(evalCtx.Locality) + + case *memo.ScanExpr: + md := expr.Memo().Metadata() + index := md.Table(t.Table).Index(t.Index) + provided.FromIndexScan(evalCtx, index, t.Constraint) + + default: + // Other operators can pass through the distribution to their children. + } + + return provided.Any() || provided.Equals(*required) +} + +// BuildChildRequired returns the distribution that must be required of its +// given child in order to satisfy a required distribution. Can only be called if +// CanProvide is true for the required distribution. +func BuildChildRequired( + parent memo.RelExpr, required *physical.Distribution, childIdx int, +) physical.Distribution { + if required.Any() { + return physical.Distribution{} + } + + switch parent.(type) { + case *memo.DistributeExpr: + return physical.Distribution{} + + case *memo.LocalityOptimizedSearchExpr: + return physical.Distribution{} + + case *memo.ScanExpr: + return physical.Distribution{} + } + + if buildutil.CrdbTestBuild { + checkRequired(required) + } + return *required +} + +// BuildProvided returns a specific distribution that the operator provides. The +// returned distribution must match the required distribution. +// +// This function assumes that the provided distributions have already been set in +// the children of the expression. +func BuildProvided( + evalCtx *tree.EvalContext, expr memo.RelExpr, required *physical.Distribution, +) physical.Distribution { + var provided physical.Distribution + switch t := expr.(type) { + case *memo.DistributeExpr: + return *required + + case *memo.LocalityOptimizedSearchExpr: + provided.FromLocality(evalCtx.Locality) + + case *memo.ScanExpr: + md := expr.Memo().Metadata() + index := md.Table(t.Table).Index(t.Index) + provided.FromIndexScan(evalCtx, index, t.Constraint) + + default: + for i, n := 0, expr.ChildCount(); i < n; i++ { + if relExpr, ok := expr.Child(i).(memo.RelExpr); ok { + provided = provided.Union(relExpr.ProvidedPhysical().Distribution) + } + } + } + + if buildutil.CrdbTestBuild { + checkProvided(&provided, required) + } + + return provided +} + +func checkRequired(required *physical.Distribution) { + // There should be exactly one region in the required distribution (for now, + // assuming this is coming from the gateway). + if len(required.Regions) != 1 { + panic(errors.AssertionFailedf( + "There should be at most one region in the required distribution: %s", required.String(), + )) + } + check(required) +} + +func checkProvided(provided, required *physical.Distribution) { + if !provided.Any() && !required.Any() && !provided.Equals(*required) { + panic(errors.AssertionFailedf("expression can't provide required distribution")) + } + check(provided) +} + +func check(distribution *physical.Distribution) { + for i := range distribution.Regions { + if i > 0 { + if distribution.Regions[i] <= distribution.Regions[i-1] { + panic(errors.AssertionFailedf( + "Distribution regions are not sorted and deduplicated: %s", distribution.String(), + )) + } + } + } +} diff --git a/pkg/sql/opt/distribution/distribution_test.go b/pkg/sql/opt/distribution/distribution_test.go new file mode 100644 index 000000000000..3ea9f4e4ccdd --- /dev/null +++ b/pkg/sql/opt/distribution/distribution_test.go @@ -0,0 +1,108 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package distribution + +import ( + "fmt" + "testing" + + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" + "github.com/cockroachdb/cockroach/pkg/sql/opt/norm" + "github.com/cockroachdb/cockroach/pkg/sql/opt/props" + "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" + "github.com/cockroachdb/cockroach/pkg/sql/opt/testutils/testcat" + "github.com/cockroachdb/cockroach/pkg/sql/opt/testutils/testexpr" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" +) + +func TestBuildProvided(t *testing.T) { + tc := testcat.New() + st := cluster.MakeTestingClusterSettings() + evalCtx := tree.NewTestingEvalContext(st) + var f norm.Factory + f.Init(evalCtx, tc) + + testCases := []struct { + leftDist []string + rightDist []string + expected []string + }{ + { + leftDist: []string{}, + rightDist: []string{}, + expected: []string{}, + }, + { + leftDist: []string{}, + expected: []string{}, + }, + { + leftDist: []string{}, + rightDist: []string{"west"}, + expected: []string{"west"}, + }, + { + leftDist: []string{"east", "west"}, + expected: []string{"east", "west"}, + }, + { + leftDist: []string{"east"}, + rightDist: []string{"east"}, + expected: []string{"east"}, + }, + { + leftDist: []string{"west"}, + rightDist: []string{"east"}, + expected: []string{"east", "west"}, + }, + { + leftDist: []string{"central", "east", "west"}, + rightDist: []string{"central", "west"}, + expected: []string{"central", "east", "west"}, + }, + } + for tcIdx, tc := range testCases { + t.Run(fmt.Sprintf("case%d", tcIdx+1), func(t *testing.T) { + expected := physical.Distribution{Regions: tc.expected} + + leftInput := &testexpr.Instance{ + Rel: &props.Relational{}, + Provided: &physical.Provided{ + Distribution: physical.Distribution{Regions: tc.leftDist}, + }, + } + + // If there is only one input, build provided distribution for a Select. + // Otherwise, build the distribution for a join (we use anti join to avoid + // calling initJoinMultiplicity). + var expr memo.RelExpr + if tc.rightDist == nil { + expr = f.Memo().MemoizeSelect(leftInput, memo.FiltersExpr{}) + } else { + rightInput := &testexpr.Instance{ + Rel: &props.Relational{}, + Provided: &physical.Provided{ + Distribution: physical.Distribution{Regions: tc.rightDist}, + }, + } + expr = f.Memo().MemoizeAntiJoin( + leftInput, rightInput, memo.FiltersExpr{}, &memo.JoinPrivate{}, + ) + } + + res := BuildProvided(evalCtx, expr, &physical.Distribution{}) + if res.String() != expected.String() { + t.Errorf("expected '%s', got '%s'", expected, res) + } + }) + } +} diff --git a/pkg/sql/opt/exec/execbuilder/relational.go b/pkg/sql/opt/exec/execbuilder/relational.go index 80a96d805814..efe540b34111 100644 --- a/pkg/sql/opt/exec/execbuilder/relational.go +++ b/pkg/sql/opt/exec/execbuilder/relational.go @@ -236,6 +236,9 @@ func (b *Builder) buildRelational(e memo.RelExpr) (execPlan, error) { case *memo.SortExpr: ep, err = b.buildSort(t) + case *memo.DistributeExpr: + ep, err = b.buildDistribute(t) + case *memo.IndexJoinExpr: ep, err = b.buildIndexJoin(t) @@ -1654,6 +1657,23 @@ func (b *Builder) buildSort(sort *memo.SortExpr) (execPlan, error) { return execPlan{root: node, outputCols: input.outputCols}, nil } +func (b *Builder) buildDistribute(distribute *memo.DistributeExpr) (execPlan, error) { + input, err := b.buildRelational(distribute.Input) + if err != nil { + return execPlan{}, err + } + + distribution := distribute.ProvidedPhysical().Distribution + inputDistribution := distribute.Input.ProvidedPhysical().Distribution + if distribution.Equals(inputDistribution) { + return execPlan{}, errors.AssertionFailedf("distribution already provided by input") + } + + // TODO(rytaft): This is currently a no-op. We should pass this distribution + // info to the DistSQL planner. + return input, err +} + func (b *Builder) buildOrdinality(ord *memo.OrdinalityExpr) (execPlan, error) { input, err := b.buildRelational(ord.Input) if err != nil { diff --git a/pkg/sql/opt/exec/execbuilder/testdata/distsql_agg b/pkg/sql/opt/exec/execbuilder/testdata/distsql_agg index de3b2f684746..0e8b8f86cd89 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/distsql_agg +++ b/pkg/sql/opt/exec/execbuilder/testdata/distsql_agg @@ -993,6 +993,7 @@ group-by (streaming) ├── cost: 25.1456179 ├── key: (2) ├── fd: (2)-->(5) + ├── distribution: test ├── prune: (5) ├── scan data2 │ ├── columns: a:1 b:2 @@ -1002,6 +1003,7 @@ group-by (streaming) │ ├── key: (2) │ ├── fd: ()-->(1) │ ├── ordering: +2 opt(1) [actual: +2] + │ ├── distribution: test │ ├── prune: (2) │ └── interesting orderings: (+2 opt(1)) └── aggregations diff --git a/pkg/sql/opt/exec/execbuilder/testdata/enums b/pkg/sql/opt/exec/execbuilder/testdata/enums index a99942fde5f7..5f9c75e64876 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/enums +++ b/pkg/sql/opt/exec/execbuilder/testdata/enums @@ -23,6 +23,7 @@ scan t ├── cost: 18.05 ├── key: (1) ├── fd: (1)-->(2) + ├── distribution: test └── prune: (1,2) query T @@ -123,11 +124,13 @@ distinct-on ├── stats: [rows=4, distinct(1)=4, null(1)=0] ├── cost: 1114.88 ├── key: (1) + ├── distribution: test └── scan checks@checks_x_y_idx ├── columns: x:1 ├── stats: [rows=1000, distinct(1)=4, null(1)=0] ├── cost: 1104.82 ├── ordering: +1 + ├── distribution: test ├── prune: (1) └── interesting orderings: (+1) @@ -172,24 +175,28 @@ union ├── stats: [rows=5, distinct(11)=5, null(11)=1] ├── cost: 2276.86667 ├── key: (11) + ├── distribution: test ├── interesting orderings: (+11) ├── project │ ├── columns: nulls.x:1 │ ├── stats: [rows=333.333333, distinct(1)=5, null(1)=3.33333333] │ ├── cost: 1138.40333 │ ├── ordering: +1 + │ ├── distribution: test │ ├── interesting orderings: (+1) │ └── select │ ├── columns: nulls.x:1 y:2 │ ├── stats: [rows=333.333333, distinct(1)=5, null(1)=3.33333333, distinct(2)=33.3333333, null(2)=0] │ ├── cost: 1135.05 │ ├── ordering: +1 + │ ├── distribution: test │ ├── interesting orderings: (+1,+2) │ ├── scan nulls@nulls_x_y_idx │ │ ├── columns: nulls.x:1 y:2 │ │ ├── stats: [rows=1000, distinct(1)=5, null(1)=10, distinct(2)=100, null(2)=10] │ │ ├── cost: 1125.02 │ │ ├── ordering: +1 + │ │ ├── distribution: test │ │ ├── prune: (1,2) │ │ └── interesting orderings: (+1,+2) │ └── filters @@ -199,18 +206,21 @@ union ├── stats: [rows=333.333333, distinct(6)=5, null(6)=3.33333333] ├── cost: 1138.40333 ├── ordering: +6 + ├── distribution: test ├── interesting orderings: (+6) └── select ├── columns: nulls.x:6 y:7 ├── stats: [rows=333.333333, distinct(6)=5, null(6)=3.33333333, distinct(7)=33.3333333, null(7)=0] ├── cost: 1135.05 ├── ordering: +6 + ├── distribution: test ├── interesting orderings: (+6,+7) ├── scan nulls@nulls_x_y_idx │ ├── columns: nulls.x:6 y:7 │ ├── stats: [rows=1000, distinct(6)=5, null(6)=10, distinct(7)=100, null(7)=10] │ ├── cost: 1125.02 │ ├── ordering: +6 + │ ├── distribution: test │ ├── prune: (6,7) │ └── interesting orderings: (+6,+7) └── filters diff --git a/pkg/sql/opt/exec/execbuilder/testdata/explain b/pkg/sql/opt/exec/execbuilder/testdata/explain index f325b3500bd6..23333728a84b 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/explain +++ b/pkg/sql/opt/exec/execbuilder/testdata/explain @@ -1277,6 +1277,7 @@ values ├── cost: 0.02 ├── key: () ├── fd: ()-->(1) + ├── distribution: test ├── prune: (1) └── (1,) @@ -1290,6 +1291,7 @@ values ├── cost: 0.02 ├── key: () ├── fd: ()-->(1) + ├── distribution: test ├── prune: (1) └── tuple [type=tuple{int}] └── const: 1 [type=int] @@ -1396,11 +1398,13 @@ inner-join (hash) ├── stats: [rows=990, distinct(1)=99, null(1)=0, distinct(6)=99, null(6)=0] ├── cost: 2269.90625 ├── fd: (6)-->(7), (1)==(6), (6)==(1) + ├── distribution: test ├── prune: (2,7) ├── scan tc │ ├── columns: a:1 b:2 │ ├── stats: [rows=1000, distinct(1)=100, null(1)=10] │ ├── cost: 1125.02 + │ ├── distribution: test │ ├── prune: (1,2) │ ├── interesting orderings: (+1) │ └── unfiltered-cols: (1-5) @@ -1410,6 +1414,7 @@ inner-join (hash) │ ├── cost: 1104.82 │ ├── key: (6) │ ├── fd: (6)-->(7) + │ ├── distribution: test │ ├── prune: (6,7) │ ├── interesting orderings: (+6) │ └── unfiltered-cols: (6-9) @@ -1805,32 +1810,56 @@ regions: query T EXPLAIN (OPT, MEMO) SELECT * FROM tc JOIN t ON k=a ---- -memo (optimized, ~11KB, required=[presentation: info:10]) - ├── G1: (explain G2 [presentation: a:1,b:2,k:6,v:7]) - │ └── [presentation: info:10] - │ ├── best: (explain G2="[presentation: a:1,b:2,k:6,v:7]" [presentation: a:1,b:2,k:6,v:7]) +memo (optimized, ~11KB, required=[presentation: info:10] [distribution: test]) + ├── G1: (explain G2 [presentation: a:1,b:2,k:6,v:7] [distribution: test]) + │ ├── [presentation: info:10] [distribution: test] + │ │ ├── best: (explain G2="[presentation: a:1,b:2,k:6,v:7] [distribution: test]" [presentation: a:1,b:2,k:6,v:7] [distribution: test]) + │ │ └── cost: 2269.93 + │ └── [] + │ ├── best: (explain G2="[presentation: a:1,b:2,k:6,v:7]" [presentation: a:1,b:2,k:6,v:7] [distribution: test]) │ └── cost: 2269.93 ├── G2: (inner-join G3 G4 G5) (inner-join G4 G3 G5) (merge-join G3 G4 G6 inner-join,+1,+6) (lookup-join G3 G6 t,keyCols=[1],outCols=(1,2,6,7)) (merge-join G4 G3 G6 inner-join,+6,+1) (lookup-join G7 G6 tc,keyCols=[3],outCols=(1,2,6,7)) - │ └── [presentation: a:1,b:2,k:6,v:7] + │ ├── [presentation: a:1,b:2,k:6,v:7] + │ │ ├── best: (inner-join G3 G4 G5) + │ │ └── cost: 2269.91 + │ ├── [presentation: a:1,b:2,k:6,v:7] [distribution: test] + │ │ ├── best: (inner-join G3="[distribution: test]" G4="[distribution: test]" G5) + │ │ └── cost: 2269.91 + │ └── [] │ ├── best: (inner-join G3 G4 G5) │ └── cost: 2269.91 ├── G3: (scan tc,cols=(1,2)) + │ ├── [distribution: test] + │ │ ├── best: (scan tc,cols=(1,2)) + │ │ └── cost: 1125.02 │ ├── [ordering: +1] │ │ ├── best: (sort G3) │ │ └── cost: 1364.50 + │ ├── [ordering: +1] [distribution: test] + │ │ ├── best: (distribute G3="[ordering: +1]") + │ │ └── cost: 1364.53 │ └── [] │ ├── best: (scan tc,cols=(1,2)) │ └── cost: 1125.02 ├── G4: (scan t,cols=(6,7)) + │ ├── [distribution: test] + │ │ ├── best: (scan t,cols=(6,7)) + │ │ └── cost: 1104.82 │ ├── [ordering: +6] │ │ ├── best: (scan t,cols=(6,7)) │ │ └── cost: 1104.82 + │ ├── [ordering: +6] [distribution: test] + │ │ ├── best: (scan t,cols=(6,7)) + │ │ └── cost: 1104.82 │ └── [] │ ├── best: (scan t,cols=(6,7)) │ └── cost: 1104.82 ├── G5: (filters G8) ├── G6: (filters) ├── G7: (lookup-join G4 G6 tc@c,keyCols=[6],outCols=(1,3,6,7)) + │ ├── [distribution: test] + │ │ ├── best: (lookup-join G4="[distribution: test]" G6 tc@c,keyCols=[6],outCols=(1,3,6,7)) + │ │ └── cost: 23173.94 │ └── [] │ ├── best: (lookup-join G4 G6 tc@c,keyCols=[6],outCols=(1,3,6,7)) │ └── cost: 23173.94 @@ -1864,32 +1893,56 @@ TABLE t ├── tableoid oid [hidden] [system] └── PRIMARY INDEX t_pkey └── k int not null -memo (optimized, ~11KB, required=[presentation: info:10]) - ├── G1: (explain G2 [presentation: a:1,b:2,k:6,v:7]) - │ └── [presentation: info:10] - │ ├── best: (explain G2="[presentation: a:1,b:2,k:6,v:7]" [presentation: a:1,b:2,k:6,v:7]) +memo (optimized, ~11KB, required=[presentation: info:10] [distribution: test]) + ├── G1: (explain G2 [presentation: a:1,b:2,k:6,v:7] [distribution: test]) + │ ├── [presentation: info:10] [distribution: test] + │ │ ├── best: (explain G2="[presentation: a:1,b:2,k:6,v:7] [distribution: test]" [presentation: a:1,b:2,k:6,v:7] [distribution: test]) + │ │ └── cost: 2269.93 + │ └── [] + │ ├── best: (explain G2="[presentation: a:1,b:2,k:6,v:7]" [presentation: a:1,b:2,k:6,v:7] [distribution: test]) │ └── cost: 2269.93 ├── G2: (inner-join G3 G4 G5) (inner-join G4 G3 G5) (merge-join G3 G4 G6 inner-join,+1,+6) (lookup-join G3 G6 t,keyCols=[1],outCols=(1,2,6,7)) (merge-join G4 G3 G6 inner-join,+6,+1) (lookup-join G7 G6 tc,keyCols=[3],outCols=(1,2,6,7)) - │ └── [presentation: a:1,b:2,k:6,v:7] + │ ├── [presentation: a:1,b:2,k:6,v:7] + │ │ ├── best: (inner-join G3 G4 G5) + │ │ └── cost: 2269.91 + │ ├── [presentation: a:1,b:2,k:6,v:7] [distribution: test] + │ │ ├── best: (inner-join G3="[distribution: test]" G4="[distribution: test]" G5) + │ │ └── cost: 2269.91 + │ └── [] │ ├── best: (inner-join G3 G4 G5) │ └── cost: 2269.91 ├── G3: (scan tc,cols=(1,2)) + │ ├── [distribution: test] + │ │ ├── best: (scan tc,cols=(1,2)) + │ │ └── cost: 1125.02 │ ├── [ordering: +1] │ │ ├── best: (sort G3) │ │ └── cost: 1364.50 + │ ├── [ordering: +1] [distribution: test] + │ │ ├── best: (distribute G3="[ordering: +1]") + │ │ └── cost: 1364.53 │ └── [] │ ├── best: (scan tc,cols=(1,2)) │ └── cost: 1125.02 ├── G4: (scan t,cols=(6,7)) + │ ├── [distribution: test] + │ │ ├── best: (scan t,cols=(6,7)) + │ │ └── cost: 1104.82 │ ├── [ordering: +6] │ │ ├── best: (scan t,cols=(6,7)) │ │ └── cost: 1104.82 + │ ├── [ordering: +6] [distribution: test] + │ │ ├── best: (scan t,cols=(6,7)) + │ │ └── cost: 1104.82 │ └── [] │ ├── best: (scan t,cols=(6,7)) │ └── cost: 1104.82 ├── G5: (filters G8) ├── G6: (filters) ├── G7: (lookup-join G4 G6 tc@c,keyCols=[6],outCols=(1,3,6,7)) + │ ├── [distribution: test] + │ │ ├── best: (lookup-join G4="[distribution: test]" G6 tc@c,keyCols=[6],outCols=(1,3,6,7)) + │ │ └── cost: 23173.94 │ └── [] │ ├── best: (lookup-join G4 G6 tc@c,keyCols=[6],outCols=(1,3,6,7)) │ └── cost: 23173.94 @@ -1902,11 +1955,13 @@ inner-join (hash) ├── stats: [rows=990, distinct(1)=99, null(1)=0, distinct(6)=99, null(6)=0] ├── cost: 2269.90625 ├── fd: (6)-->(7), (1)==(6), (6)==(1) + ├── distribution: test ├── prune: (2,7) ├── scan tc │ ├── columns: a:1 b:2 │ ├── stats: [rows=1000, distinct(1)=100, null(1)=10] │ ├── cost: 1125.02 + │ ├── distribution: test │ ├── prune: (1,2) │ ├── interesting orderings: (+1) │ └── unfiltered-cols: (1-5) @@ -1916,6 +1971,7 @@ inner-join (hash) │ ├── cost: 1104.82 │ ├── key: (6) │ ├── fd: (6)-->(7) + │ ├── distribution: test │ ├── prune: (6,7) │ ├── interesting orderings: (+6) │ └── unfiltered-cols: (6-9) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/inverted_index b/pkg/sql/opt/exec/execbuilder/testdata/inverted_index index 792dbb025dc1..a10c6ae25d5a 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/inverted_index +++ b/pkg/sql/opt/exec/execbuilder/testdata/inverted_index @@ -1471,6 +1471,7 @@ inner-join (lookup geo_table) ├── cost: 112704.87 ├── key: (1,5) ├── fd: (1)-->(2), (5)-->(6) + ├── distribution: test ├── prune: (1,5) ├── inner-join (inverted geo_table@geom_index) │ ├── columns: geo_table2.k:1 geo_table2.geom:2 geo_table.k:11 @@ -1480,12 +1481,14 @@ inner-join (lookup geo_table) │ ├── cost: 41804.84 │ ├── key: (1,11) │ ├── fd: (1)-->(2) + │ ├── distribution: test │ ├── scan geo_table2 │ │ ├── columns: geo_table2.k:1 geo_table2.geom:2 │ │ ├── stats: [rows=1000, distinct(1)=1000, null(1)=0, distinct(2)=100, null(2)=10] │ │ ├── cost: 1104.82 │ │ ├── key: (1) │ │ ├── fd: (1)-->(2) + │ │ ├── distribution: test │ │ ├── prune: (1,2) │ │ └── unfiltered-cols: (1-4) │ └── filters (true) @@ -1561,6 +1564,7 @@ left-join (lookup geo_table) ├── cost: 112904.87 ├── key: (1,5) ├── fd: (1)-->(2), (5)-->(6) + ├── distribution: test ├── prune: (1,5) ├── left-join (inverted geo_table@geom_index) │ ├── columns: geo_table2.k:1 geo_table2.geom:2 geo_table.k:11 continuation:16 @@ -1571,12 +1575,14 @@ left-join (lookup geo_table) │ ├── cost: 42004.84 │ ├── key: (1,11) │ ├── fd: (1)-->(2), (11)-->(16) + │ ├── distribution: test │ ├── scan geo_table2 │ │ ├── columns: geo_table2.k:1 geo_table2.geom:2 │ │ ├── stats: [rows=1000, distinct(1)=1000, null(1)=0] │ │ ├── cost: 1104.82 │ │ ├── key: (1) │ │ ├── fd: (1)-->(2) + │ │ ├── distribution: test │ │ ├── prune: (1,2) │ │ └── unfiltered-cols: (1-4) │ └── filters (true) @@ -1597,6 +1603,7 @@ semi-join (lookup geo_table) ├── cost: 112704.87 ├── key: (1) ├── fd: (1)-->(2) + ├── distribution: test ├── prune: (1) ├── inner-join (inverted geo_table@geom_index) │ ├── columns: geo_table2.k:1 geo_table2.geom:2 geo_table.k:11 continuation:16 @@ -1607,12 +1614,14 @@ semi-join (lookup geo_table) │ ├── cost: 42004.84 │ ├── key: (1,11) │ ├── fd: (1)-->(2), (11)-->(16) + │ ├── distribution: test │ ├── scan geo_table2 │ │ ├── columns: geo_table2.k:1 geo_table2.geom:2 │ │ ├── stats: [rows=1000, distinct(1)=1000, null(1)=0, distinct(2)=100, null(2)=10] │ │ ├── cost: 1104.82 │ │ ├── key: (1) │ │ ├── fd: (1)-->(2) + │ │ ├── distribution: test │ │ ├── prune: (1,2) │ │ ├── interesting orderings: (+1) │ │ └── unfiltered-cols: (1-4) @@ -1634,6 +1643,7 @@ anti-join (lookup geo_table) ├── cost: 112704.87 ├── key: (1) ├── fd: (1)-->(2) + ├── distribution: test ├── prune: (1) ├── left-join (inverted geo_table@geom_index) │ ├── columns: geo_table2.k:1 geo_table2.geom:2 geo_table.k:11 continuation:16 @@ -1644,12 +1654,14 @@ anti-join (lookup geo_table) │ ├── cost: 42004.84 │ ├── key: (1,11) │ ├── fd: (1)-->(2), (11)-->(16) + │ ├── distribution: test │ ├── scan geo_table2 │ │ ├── columns: geo_table2.k:1 geo_table2.geom:2 │ │ ├── stats: [rows=1000, distinct(1)=1000, null(1)=0] │ │ ├── cost: 1104.82 │ │ ├── key: (1) │ │ ├── fd: (1)-->(2) + │ │ ├── distribution: test │ │ ├── prune: (1,2) │ │ └── unfiltered-cols: (1-4) │ └── filters (true) diff --git a/pkg/sql/opt/exec/execbuilder/testdata/stats b/pkg/sql/opt/exec/execbuilder/testdata/stats index c589e2bdaff2..093c9a2dd37a 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/stats +++ b/pkg/sql/opt/exec/execbuilder/testdata/stats @@ -221,11 +221,13 @@ distinct-on ├── stats: [rows=20.0617284, distinct(1,2)=20.0617284, null(1,2)=0] ├── cost: 51.917284 ├── key: (1,2) + ├── distribution: test └── scan uv@uv_u_idx ├── columns: u:1 v:2 ├── constraint: /1/3: (/NULL - /29] ├── stats: [rows=33.3333333, distinct(1)=6.66666667, null(1)=0, distinct(1,2)=20.0617284, null(1,2)=0] ├── cost: 50.6866667 + ├── distribution: test ├── prune: (2) └── interesting orderings: (+1) (+2) @@ -241,11 +243,13 @@ distinct-on ├── stats: [rows=33.3333333, distinct(1,2)=33.3333333, null(1,2)=0] ├── cost: 52.05 ├── key: (1,2) + ├── distribution: test └── scan uv@uv_u_idx ├── columns: u:1 v:2 ├── constraint: /1/3: (/NULL - /29] ├── stats: [rows=33.3333333, distinct(1)=6.66666667, null(1)=0, distinct(1,2)=33.3333333, null(1,2)=0] ├── cost: 50.6866667 + ├── distribution: test ├── prune: (2) └── interesting orderings: (+1) (+2) @@ -261,6 +265,7 @@ distinct-on ├── stats: [rows=100, distinct(1,2)=100, null(1,2)=0] ├── cost: 128.050563 ├── key: (1,2) + ├── distribution: test └── scan uv@uv_u_idx ├── columns: u:1 v:2 ├── constraint: /1/3: (/NULL - /29] @@ -268,6 +273,7 @@ distinct-on │ histogram(1)= 0 50 0 20 8 5 12 5 │ <--- 1 --- 2 --- 10 ---- 20 ├── cost: 124.02 + ├── distribution: test ├── prune: (2) └── interesting orderings: (+1) (+2) @@ -283,6 +289,7 @@ distinct-on ├── stats: [rows=25, distinct(1,2)=25, null(1,2)=0] ├── cost: 127.3 ├── key: (1,2) + ├── distribution: test └── scan uv@uv_u_idx ├── columns: u:1 v:2 ├── constraint: /1/3: (/NULL - /29] @@ -290,6 +297,7 @@ distinct-on │ histogram(1)= 0 50 0 20 8 5 12 5 │ <--- 1 --- 2 --- 10 ---- 20 ├── cost: 124.02 + ├── distribution: test ├── prune: (2) └── interesting orderings: (+1) (+2) @@ -312,6 +320,7 @@ limit ├── cost: 121.069999 ├── key: () ├── fd: ()-->(1) + ├── distribution: test ├── select │ ├── columns: j:1 │ ├── immutable @@ -319,11 +328,13 @@ limit │ ├── cost: 121.049999 │ ├── fd: ()-->(1) │ ├── limit hint: 1.00 + │ ├── distribution: test │ ├── scan tj │ │ ├── columns: j:1 │ │ ├── stats: [rows=1000, distinct(1)=100, null(1)=10] │ │ ├── cost: 120.019999 │ │ ├── limit hint: 100.00 + │ │ ├── distribution: test │ │ └── prune: (1) │ └── filters │ └── j:1 IS NULL [outer=(1), immutable, constraints=(/1: [/NULL - /NULL]; tight), fd=()-->(1)] @@ -343,6 +354,7 @@ limit ├── cost: 19.42 ├── key: () ├── fd: ()-->(1) + ├── distribution: test ├── select │ ├── columns: j:1 │ ├── immutable @@ -350,11 +362,13 @@ limit │ ├── cost: 19.4 │ ├── fd: ()-->(1) │ ├── limit hint: 1.00 + │ ├── distribution: test │ ├── scan tj │ │ ├── columns: j:1 │ │ ├── stats: [rows=5, distinct(1)=4, null(1)=1] │ │ ├── cost: 19.32 │ │ ├── limit hint: 5.00 + │ │ ├── distribution: test │ │ └── prune: (1) │ └── filters │ └── j:1 IS NULL [outer=(1), immutable, constraints=(/1: [/NULL - /NULL]; tight), fd=()-->(1)] diff --git a/pkg/sql/opt/invertedexpr/geo_expression.go b/pkg/sql/opt/invertedexpr/geo_expression.go index 5743f4321e0d..70cc7bcbe0b6 100644 --- a/pkg/sql/opt/invertedexpr/geo_expression.go +++ b/pkg/sql/opt/invertedexpr/geo_expression.go @@ -23,9 +23,9 @@ import ( // This file contains functions to encode geoindex.{UnionKeySpans, RPKeyExpr} // into a SpanExpression. These functions are in this package since they -// need to use sqlbase.EncodeTableKey to convert geoindex.Key to -// invertedexpr.EncVal and that cannot be done in the geoindex package -// as it introduces a circular dependency. +// need to use keyside.Encode to convert geoindex.Key to invertedexpr.EncVal and +// that cannot be done in the geoindex package as it introduces a circular +// dependency. // // TODO(sumeer): change geoindex to produce SpanExpressions directly. diff --git a/pkg/sql/opt/invertedidx/geo.go b/pkg/sql/opt/invertedidx/geo.go index aa07576e3f2e..4518fbffd96d 100644 --- a/pkg/sql/opt/invertedidx/geo.go +++ b/pkg/sql/opt/invertedidx/geo.go @@ -716,7 +716,7 @@ type geoDatumsToInvertedExpr struct { filterer *PreFilterer row rowenc.EncDatumRow - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc } var _ invertedexpr.DatumsToInvertedExpr = &geoDatumsToInvertedExpr{} diff --git a/pkg/sql/opt/invertedidx/json_array.go b/pkg/sql/opt/invertedidx/json_array.go index 11258ca4404b..98cdb682932c 100644 --- a/pkg/sql/opt/invertedidx/json_array.go +++ b/pkg/sql/opt/invertedidx/json_array.go @@ -168,7 +168,7 @@ type jsonOrArrayDatumsToInvertedExpr struct { invertedExpr tree.TypedExpr row rowenc.EncDatumRow - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc } var _ invertedexpr.DatumsToInvertedExpr = &jsonOrArrayDatumsToInvertedExpr{} diff --git a/pkg/sql/opt/memo/BUILD.bazel b/pkg/sql/opt/memo/BUILD.bazel index a3011a8db889..cf06e8ce3ca6 100644 --- a/pkg/sql/opt/memo/BUILD.bazel +++ b/pkg/sql/opt/memo/BUILD.bazel @@ -25,7 +25,6 @@ go_library( deps = [ "//pkg/geo/geoindex", "//pkg/sql/catalog/colinfo", - "//pkg/sql/catalog/descpb", "//pkg/sql/inverted", "//pkg/sql/opt", "//pkg/sql/opt/cat", @@ -33,7 +32,8 @@ go_library( "//pkg/sql/opt/invertedexpr", # keep "//pkg/sql/opt/props", "//pkg/sql/opt/props/physical", - "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", + "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/builtins", "//pkg/sql/sem/tree", "//pkg/sql/types", diff --git a/pkg/sql/opt/memo/expr_format.go b/pkg/sql/opt/memo/expr_format.go index 73ae2f688f87..607954338846 100644 --- a/pkg/sql/opt/memo/expr_format.go +++ b/pkg/sql/opt/memo/expr_format.go @@ -817,6 +817,15 @@ func (f *ExprFmtCtx) formatRelational(e RelExpr, tp treeprinter.Node) { if required.LimitHint != 0 { tp.Childf("limit hint: %.2f", required.LimitHint) } + + // Show the required distribution, if any, and also show the provided input + // distribution if this is a Distribute expression. + if !required.Distribution.Any() { + tp.Childf("distribution: %s", required.Distribution.String()) + } + if distribute, ok := e.(*DistributeExpr); ok { + tp.Childf("input distribution: %s", distribute.Input.ProvidedPhysical().Distribution.String()) + } } if !f.HasFlags(ExprFmtHideRuleProps) { diff --git a/pkg/sql/opt/memo/group.go b/pkg/sql/opt/memo/group.go index abeeaca7c26d..b0988fed9264 100644 --- a/pkg/sql/opt/memo/group.go +++ b/pkg/sql/opt/memo/group.go @@ -52,11 +52,7 @@ type bestProps struct { required *physical.Required // Provided properties, which must be compatible with the required properties. - // - // We store these properties in-place because the structure is very small; if - // that changes we will want to intern them, similar to the required - // properties. - provided physical.Provided + provided *physical.Provided // Cost of the best expression. cost Cost diff --git a/pkg/sql/opt/memo/interner.go b/pkg/sql/opt/memo/interner.go index 93323371c2a3..a3e3560c013e 100644 --- a/pkg/sql/opt/memo/interner.go +++ b/pkg/sql/opt/memo/interner.go @@ -18,13 +18,13 @@ import ( "unsafe" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/inverted" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -630,6 +630,9 @@ func (h *hasher) HashPhysProps(val *physical.Required) { } h.HashOrderingChoice(val.Ordering) h.HashFloat64(val.LimitHint) + for _, region := range val.Distribution.Regions { + h.HashString(region) + } } func (h *hasher) HashLockingItem(val *tree.LockingItem) { @@ -1206,13 +1209,13 @@ func encodeDatum(b []byte, val tree.Datum) []byte { // should not be considered equivalent by the interner (e.g. decimal values // 1.0 and 1.00). if !colinfo.CanHaveCompositeKeyEncoding(val.ResolvedType()) { - b, err = rowenc.EncodeTableKey(b, val, encoding.Ascending) + b, err = keyside.Encode(b, val, encoding.Ascending) if err == nil { return b } } - b, err = rowenc.EncodeTableValue(b, descpb.ColumnID(encoding.NoColumnID), val, nil /* scratch */) + b, err = valueside.Encode(b, valueside.NoColumnID, val, nil /* scratch */) if err != nil { panic(err) } diff --git a/pkg/sql/opt/memo/interner_test.go b/pkg/sql/opt/memo/interner_test.go index df2584b54a01..d1ce84f5b2b5 100644 --- a/pkg/sql/opt/memo/interner_test.go +++ b/pkg/sql/opt/memo/interner_test.go @@ -588,6 +588,23 @@ func TestInternerPhysProps(t *testing.T) { Presentation: physical.Presentation{{Alias: "d", ID: 2}, {Alias: "e", ID: 3}}, Ordering: props.ParseOrderingChoice("+(1|2),+3 opt(4,5,6)"), } + physProps7 := physical.Required{ + Presentation: physical.Presentation{{Alias: "c", ID: 1}}, + Ordering: props.ParseOrderingChoice("+(1|2),+3 opt(4,5)"), + LimitHint: 1, + } + physProps8 := physical.Required{ + Presentation: physical.Presentation{{Alias: "c", ID: 1}}, + Ordering: props.ParseOrderingChoice("+(1|2),+3 opt(4,5)"), + LimitHint: 1, + Distribution: physical.Distribution{Regions: []string{"us-east", "us-west"}}, + } + physProps9 := physical.Required{ + Presentation: physical.Presentation{{Alias: "c", ID: 1}}, + Ordering: props.ParseOrderingChoice("+(1|2),+3 opt(4,5)"), + LimitHint: 1, + Distribution: physical.Distribution{Regions: []string{"us-east", "us-west"}}, + } testCases := []struct { phys *physical.Required @@ -600,6 +617,9 @@ func TestInternerPhysProps(t *testing.T) { {phys: &physProps4, inCache: false}, {phys: &physProps5, inCache: false}, {phys: &physProps6, inCache: false}, + {phys: &physProps7, inCache: false}, + {phys: &physProps8, inCache: false}, + {phys: &physProps9, inCache: true}, } inCache := make(map[*physical.Required]bool) diff --git a/pkg/sql/opt/memo/memo.go b/pkg/sql/opt/memo/memo.go index 3459578a8630..7cc35b497844 100644 --- a/pkg/sql/opt/memo/memo.go +++ b/pkg/sql/opt/memo/memo.go @@ -364,7 +364,7 @@ func (m *Memo) SetBestProps( } bp := e.bestProps() bp.required = required - bp.provided = *provided + bp.provided = provided bp.cost = cost } diff --git a/pkg/sql/opt/memo/testdata/memo b/pkg/sql/opt/memo/testdata/memo index c0bd8e294f1e..1e2758189c7d 100644 --- a/pkg/sql/opt/memo/testdata/memo +++ b/pkg/sql/opt/memo/testdata/memo @@ -357,7 +357,7 @@ memo (optimized, ~4KB, required=[presentation: array_agg:5]) memo SELECT DISTINCT info FROM [EXPLAIN SELECT 123 AS k] ---- -memo (optimized, ~9KB, required=[presentation: info:3]) +memo (optimized, ~8KB, required=[presentation: info:3]) ├── G1: (distinct-on G2 G3 cols=(3)) │ └── [presentation: info:3] │ ├── best: (distinct-on G2 G3 cols=(3)) diff --git a/pkg/sql/opt/norm/BUILD.bazel b/pkg/sql/opt/norm/BUILD.bazel index 6f87565590ae..02249b60748a 100644 --- a/pkg/sql/opt/norm/BUILD.bazel +++ b/pkg/sql/opt/norm/BUILD.bazel @@ -40,7 +40,7 @@ go_library( "//pkg/sql/opt/props/physical", "//pkg/sql/parser", "//pkg/sql/privilege", - "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", "//pkg/sql/sem/builtins", "//pkg/sql/sem/tree", "//pkg/sql/types", @@ -51,7 +51,7 @@ go_library( "//pkg/util/errorutil", "//pkg/util/json", "//pkg/util/log", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/sql/opt/norm/general_funcs.go b/pkg/sql/opt/norm/general_funcs.go index 3cc8942d1890..8bc9611fb469 100644 --- a/pkg/sql/opt/norm/general_funcs.go +++ b/pkg/sql/opt/norm/general_funcs.go @@ -11,7 +11,7 @@ package norm import ( - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" diff --git a/pkg/sql/opt/norm/groupby_funcs.go b/pkg/sql/opt/norm/groupby_funcs.go index ce8b632d03a1..07bc37351ecc 100644 --- a/pkg/sql/opt/norm/groupby_funcs.go +++ b/pkg/sql/opt/norm/groupby_funcs.go @@ -14,7 +14,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/props" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -255,7 +255,7 @@ func (c *CustomFuncs) areRowsDistinct( // Encode the datum using the key encoding format. The encodings for // multiple column datums are simply appended to one another. var err error - encoded, err = rowenc.EncodeTableKey(encoded, datum, encoding.Ascending) + encoded, err = keyside.Encode(encoded, datum, encoding.Ascending) if err != nil { // Assume rows are not distinct if an encoding error occurs. return false diff --git a/pkg/sql/opt/norm/testdata/rules/fold_constants b/pkg/sql/opt/norm/testdata/rules/fold_constants index 43b029aa289f..798461e150d6 100644 --- a/pkg/sql/opt/norm/testdata/rules/fold_constants +++ b/pkg/sql/opt/norm/testdata/rules/fold_constants @@ -1075,6 +1075,7 @@ values ├── cardinality: [1 - 1] ├── key: () ├── fd: ()-->(1) + ├── distribution: east └── ('east1-b',) norm expect=FoldFunction diff --git a/pkg/sql/opt/ops/enforcer.opt b/pkg/sql/opt/ops/enforcer.opt index 6c3cbe07b62f..93eb8101033e 100644 --- a/pkg/sql/opt/ops/enforcer.opt +++ b/pkg/sql/opt/ops/enforcer.opt @@ -25,3 +25,15 @@ define Sort { # specified prefix of columns. InputOrdering OrderingChoice } + +# Distribute enforces the physical distribution of rows returned by its input +# expression. Currently, it is only used to re-distribute data across different +# sets of regions in a multi-region cluster. For example, if rows are spread +# across multiple regions, a Distribute enforcer can be used to route the rows +# to the gateway region. See the Distribution field in the PhysicalProps struct. +# TODO(rytaft): We should probably include the input distribution here so we can +# accurately cost the Distribute operator. This will likely require calculating +# "interesting distributions", similar to "interesting orderings". +[Enforcer, Telemetry] +define Distribute { +} diff --git a/pkg/sql/opt/optbuilder/fk_cascade.go b/pkg/sql/opt/optbuilder/fk_cascade.go index f7ca08262bf9..22df18aaea5c 100644 --- a/pkg/sql/opt/optbuilder/fk_cascade.go +++ b/pkg/sql/opt/optbuilder/fk_cascade.go @@ -458,7 +458,7 @@ func (cb *onDeleteSetBuilder) Build( updateExprs[i].Expr = tree.DefaultVal{} } } - mb.addUpdateCols(updateExprs, false /* isUpsert */) + mb.addUpdateCols(updateExprs) // TODO(radu): consider plumbing a flag to prevent building the FK check // against the parent we are cascading from. Need to investigate in which @@ -687,7 +687,7 @@ func (cb *onUpdateCascadeBuilder) Build( panic(errors.AssertionFailedf("unsupported action")) } } - mb.addUpdateCols(updateExprs, false /* isUpsert */) + mb.addUpdateCols(updateExprs) mb.buildUpdate(nil /* returning */) return mb.outScope.expr diff --git a/pkg/sql/opt/optbuilder/insert.go b/pkg/sql/opt/optbuilder/insert.go index cf76ab00f77a..e9d0d5221171 100644 --- a/pkg/sql/opt/optbuilder/insert.go +++ b/pkg/sql/opt/optbuilder/insert.go @@ -14,7 +14,6 @@ import ( "fmt" "sort" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" @@ -259,7 +258,6 @@ func (b *Builder) buildInsert(ins *tree.Insert, inScope *scope) (outScope *scope // // INSERT INTO DEFAULT VALUES // - isUpsert := ins.OnConflict != nil && !ins.OnConflict.DoNothing if !ins.DefaultValues() { // Replace any DEFAULT expressions in the VALUES clause, if a VALUES clause // exists: @@ -268,15 +266,15 @@ func (b *Builder) buildInsert(ins *tree.Insert, inScope *scope) (outScope *scope // rows := mb.replaceDefaultExprs(ins.Rows) - mb.buildInputForInsert(inScope, rows, isUpsert) + mb.buildInputForInsert(inScope, rows) } else { - mb.buildInputForInsert(inScope, nil /* rows */, isUpsert) + mb.buildInputForInsert(inScope, nil /* rows */) } // Add default columns that were not explicitly specified by name or // implicitly targeted by input columns. Also add any computed columns. In // both cases, include columns undergoing mutations in the write-only state. - mb.addSynthesizedColsForInsert(isUpsert) + mb.addSynthesizedColsForInsert() var returning tree.ReturningExprs if resultsNeeded(ins.Returning) { @@ -294,8 +292,7 @@ func (b *Builder) buildInsert(ins *tree.Insert, inScope *scope) (outScope *scope // Wrap the input in one ANTI JOIN per UNIQUE index, and filter out rows // that have conflicts. See the buildInputForDoNothing comment for more // details. - conflictOrds := mb.mapPublicColumnNamesToOrdinals(ins.OnConflict.Columns) - mb.buildInputForDoNothing(inScope, conflictOrds, ins.OnConflict.ArbiterPredicate) + mb.buildInputForDoNothing(inScope, ins.OnConflict) // Since buildInputForDoNothing filters out rows with conflicts, always // insert rows that are not filtered. @@ -312,12 +309,11 @@ func (b *Builder) buildInsert(ins *tree.Insert, inScope *scope) (outScope *scope if mb.needExistingRows() { // Left-join each input row to the target table, using conflict columns // derived from the primary index as the join condition. - primaryOrds := getExplicitPrimaryKeyOrdinals(mb.tab) - mb.buildInputForUpsert(inScope, primaryOrds, nil /* arbiterPredicate */, nil /* whereClause */) + mb.buildInputForUpsert(inScope, nil /* onConflict */, nil /* whereClause */) // Add additional columns for computed expressions that may depend on any // updated columns, as well as mutation columns with default values. - mb.addSynthesizedColsForUpdate(true /* isUpsert */) + mb.addSynthesizedColsForUpdate() } // Build the final upsert statement, including any returned expressions. @@ -327,14 +323,13 @@ func (b *Builder) buildInsert(ins *tree.Insert, inScope *scope) (outScope *scope default: // Left-join each input row to the target table, using the conflict columns // as the join condition. - conflictOrds := mb.mapPublicColumnNamesToOrdinals(ins.OnConflict.Columns) - mb.buildInputForUpsert(inScope, conflictOrds, ins.OnConflict.ArbiterPredicate, ins.OnConflict.Where) + mb.buildInputForUpsert(inScope, ins.OnConflict, ins.OnConflict.Where) // Derive the columns that will be updated from the SET expressions. mb.addTargetColsForUpdate(ins.OnConflict.Exprs) // Build each of the SET expressions. - mb.addUpdateCols(ins.OnConflict.Exprs, true /* isUpsert */) + mb.addUpdateCols(ins.OnConflict.Exprs) // Build the final upsert statement, including any returned expressions. mb.buildUpsert(returning) @@ -558,9 +553,7 @@ func (mb *mutationBuilder) addTargetTableColsForInsert(maxCols int) { // buildInputForInsert constructs the memo group for the input expression and // constructs a new output scope containing that expression's output columns. -func (mb *mutationBuilder) buildInputForInsert( - inScope *scope, inputRows *tree.Select, isUpsert bool, -) { +func (mb *mutationBuilder) buildInputForInsert(inScope *scope, inputRows *tree.Select) { // Handle DEFAULT VALUES case by creating a single empty row as input. if inputRows == nil { mb.outScope = inScope.push() @@ -619,11 +612,6 @@ func (mb *mutationBuilder) buildInputForInsert( inCol := &mb.outScope.cols[i] ord := mb.tabID.ColumnOrdinal(mb.targetColList[i]) - if isUpsert { - // Type check the input column against the corresponding table column. - checkDatumTypeFitsColumnType(mb.tab.Column(ord), inCol.typ) - } - // Raise an error if the target column is a `GENERATED ALWAYS AS // IDENTITY` column. Such a column is not allowed to be explicitly // written to. @@ -645,10 +633,8 @@ func (mb *mutationBuilder) buildInputForInsert( mb.insertColIDs[ord] = inCol.id } - if !isUpsert { - // Add assignment casts for insert columns. - mb.addAssignmentCasts(mb.insertColIDs) - } + // Add assignment casts for insert columns. + mb.addAssignmentCasts(mb.insertColIDs) } // addSynthesizedColsForInsert wraps an Insert input expression with a Project @@ -656,7 +642,7 @@ func (mb *mutationBuilder) buildInputForInsert( // columns that are not yet part of the target column list. This includes all // write-only mutation columns, since they must always have default or computed // values. -func (mb *mutationBuilder) addSynthesizedColsForInsert(isUpsert bool) { +func (mb *mutationBuilder) addSynthesizedColsForInsert() { // Start by adding non-computed columns that have not already been explicitly // specified in the query. Do this before adding computed columns, since those // may depend on non-computed columns. @@ -666,25 +652,14 @@ func (mb *mutationBuilder) addSynthesizedColsForInsert(isUpsert bool) { false, /* applyOnUpdate */ ) - if isUpsert { - // Possibly round DECIMAL-related columns containing insertion values (whether - // synthesized or not). - mb.roundDecimalValues(mb.insertColIDs, false /* roundComputedCols */) - } else { - // Add assignment casts for default column values. - mb.addAssignmentCasts(mb.insertColIDs) - } + // Add assignment casts for default column values. + mb.addAssignmentCasts(mb.insertColIDs) // Now add all computed columns. mb.addSynthesizedComputedCols(mb.insertColIDs, false /* restrict */) - // Possibly round DECIMAL-related computed columns. - if isUpsert { - mb.roundDecimalValues(mb.insertColIDs, true /* roundComputedCols */) - } else { - // Add assignment casts for computed column values. - mb.addAssignmentCasts(mb.insertColIDs) - } + // Add assignment casts for computed column values. + mb.addAssignmentCasts(mb.insertColIDs) } // buildInsert constructs an Insert operator, possibly wrapped by a Project @@ -715,13 +690,10 @@ func (mb *mutationBuilder) buildInsert(returning tree.ReturningExprs) { // buildInputForDoNothing wraps the input expression in ANTI JOIN expressions, // one for each arbiter on the target table. See the comment header for // Builder.buildInsert for an example. -func (mb *mutationBuilder) buildInputForDoNothing( - inScope *scope, conflictOrds util.FastIntSet, arbiterPredicate tree.Expr, -) { +func (mb *mutationBuilder) buildInputForDoNothing(inScope *scope, onConflict *tree.OnConflict) { // Determine the set of arbiter indexes and constraints to use to check for // conflicts. - mb.arbiters = mb.findArbiters(conflictOrds, arbiterPredicate) - + mb.arbiters = mb.findArbiters(onConflict) insertColScope := mb.outScope.replace() insertColScope.appendColumnsFromScope(mb.outScope) @@ -763,12 +735,11 @@ func (mb *mutationBuilder) buildInputForDoNothing( // given insert row conflicts with an existing row in the table. If it is null, // then there is no conflict. func (mb *mutationBuilder) buildInputForUpsert( - inScope *scope, conflictOrds util.FastIntSet, arbiterPredicate tree.Expr, whereClause *tree.Where, + inScope *scope, onConflict *tree.OnConflict, whereClause *tree.Where, ) { // Determine the set of arbiter indexes and constraints to use to check for // conflicts. - mb.arbiters = mb.findArbiters(conflictOrds, arbiterPredicate) - + mb.arbiters = mb.findArbiters(onConflict) // TODO(mgartner): Add support for multiple arbiter indexes or constraints, // similar to buildInputForDoNothing. if mb.arbiters.Len() > 1 { @@ -1021,26 +992,3 @@ func (mb *mutationBuilder) projectUpsertColumns() { mb.b.constructProjectForScope(mb.outScope, projectionsScope) mb.outScope = projectionsScope } - -// mapPublicColumnNamesToOrdinals returns the set of ordinal positions within -// the target table that correspond to the given names. Mutation and system -// columns are ignored. -func (mb *mutationBuilder) mapPublicColumnNamesToOrdinals(names tree.NameList) util.FastIntSet { - var ords util.FastIntSet - for _, name := range names { - found := false - for i, n := 0, mb.tab.ColumnCount(); i < n; i++ { - tabCol := mb.tab.Column(i) - if tabCol.ColName() == name && !tabCol.IsMutation() && tabCol.Kind() != cat.System { - ords.Add(i) - found = true - break - } - } - - if !found { - panic(colinfo.NewUndefinedColumnError(string(name))) - } - } - return ords -} diff --git a/pkg/sql/opt/optbuilder/mutation_builder.go b/pkg/sql/opt/optbuilder/mutation_builder.go index caa2064fd904..a30ee7c96e24 100644 --- a/pkg/sql/opt/optbuilder/mutation_builder.go +++ b/pkg/sql/opt/optbuilder/mutation_builder.go @@ -24,7 +24,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" - "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" @@ -137,10 +136,6 @@ type mutationBuilder struct { // detect conflicts for UPSERT and INSERT ON CONFLICT statements. arbiters arbiterSet - // roundedDecimalCols is the set of columns that have already been rounded. - // Keeping this set avoids rounding the same column multiple times. - roundedDecimalCols opt.ColSet - // subqueries temporarily stores subqueries that were built during initial // analysis of SET expressions. They will be used later when the subqueries // are joined into larger LEFT OUTER JOIN expressions. @@ -721,148 +716,6 @@ func (mb *mutationBuilder) addSynthesizedComputedCols(colIDs opt.OptionalColList mb.outScope = pb.Finish() } -// roundDecimalValues wraps each DECIMAL-related column (including arrays of -// decimals) with a call to the crdb_internal.round_decimal_values function, if -// column values may need to be rounded. This is necessary when mutating table -// columns that have a limited scale (e.g. DECIMAL(10, 1)). Here is the PG docs -// description: -// -// http://www.postgresql.org/docs/9.5/static/datatype-numeric.html -// "If the scale of a value to be stored is greater than -// the declared scale of the column, the system will round the -// value to the specified number of fractional digits. Then, -// if the number of digits to the left of the decimal point -// exceeds the declared precision minus the declared scale, an -// error is raised." -// -// Note that this function only handles the rounding portion of that. The -// precision check is done by the execution engine. The rounding cannot be done -// there, since it needs to happen before check constraints are computed, and -// before UPSERT joins. -// -// If roundComputedCols is false, then don't wrap computed columns. If true, -// then only wrap computed columns. This is necessary because computed columns -// can depend on other columns mutated by the operation; it is necessary to -// first round those values, then evaluated the computed expression, and then -// round the result of the computation. -// -// roundDecimalValues will only round decimal columns that are part of the -// colIDs list (i.e. are not 0). If a column is rounded, then the list will be -// updated with the column ID of the new synthesized column. -func (mb *mutationBuilder) roundDecimalValues(colIDs opt.OptionalColList, roundComputedCols bool) { - var projectionsScope *scope - - for i, id := range colIDs { - if id == 0 { - // Column not mutated, so nothing to do. - continue - } - - // Include or exclude computed columns, depending on the value of - // roundComputedCols. - col := mb.tab.Column(i) - if col.IsComputed() != roundComputedCols { - continue - } - - // Check whether the target column's type may require rounding of the - // input value. - colType := col.DatumType() - precision, width := colType.Precision(), colType.Width() - if colType.Family() == types.ArrayFamily { - innerType := colType.ArrayContents() - if innerType.Family() == types.ArrayFamily { - panic(errors.AssertionFailedf("column type should never be a nested array")) - } - precision, width = innerType.Precision(), innerType.Width() - } - - props, overload := findRoundingFunction(colType, precision) - if props == nil { - continue - } - - // If column has already been rounded, then skip it. - if mb.roundedDecimalCols.Contains(id) { - continue - } - - private := &memo.FunctionPrivate{ - Name: "crdb_internal.round_decimal_values", - Typ: col.DatumType(), - Properties: props, - Overload: overload, - } - variable := mb.b.factory.ConstructVariable(id) - scale := mb.b.factory.ConstructConstVal(tree.NewDInt(tree.DInt(width)), types.Int) - fn := mb.b.factory.ConstructFunction(memo.ScalarListExpr{variable, scale}, private) - - // Lazily create new scope and update the scope column to be rounded. - if projectionsScope == nil { - projectionsScope = mb.outScope.replace() - projectionsScope.appendColumnsFromScope(mb.outScope) - } - scopeCol := projectionsScope.getColumn(id) - mb.b.populateSynthesizedColumn(scopeCol, fn) - - // Overwrite the input column ID with the new synthesized column ID. - colIDs[i] = scopeCol.id - mb.roundedDecimalCols.Add(scopeCol.id) - - // When building an UPDATE..FROM expression the projectionScope may have - // two columns with different names but the same ID. As a result, the - // scope column with the correct name (the name of the target column) - // may not be returned from projectionScope.getColumn. We set the name - // of the new scope column to the target column name to ensure it is - // in-scope when building CHECK constraint and partial index PUT - // expressions. See #61520. - // TODO(mgartner): Find a less brittle way to manage the scopes of - // mutations so that this isn't necessary. Ideally the scope produced by - // addUpdateColumns would not include columns in the FROM clause. Those - // columns are only in-scope in the RETURNING clause via - // mb.extraAccessibleCols. - scopeCol.name = scopeColName(mb.tab.Column(i).ColName()) - } - - if projectionsScope != nil { - mb.b.constructProjectForScope(mb.outScope, projectionsScope) - mb.outScope = projectionsScope - } -} - -// findRoundingFunction returns the builtin function overload needed to round -// input values. This is only necessary for DECIMAL or DECIMAL[] types that have -// limited precision, such as: -// -// DECIMAL(15, 1) -// DECIMAL(10, 3)[] -// -// If an input decimal value has more than the required number of fractional -// digits, it must be rounded before being inserted into these types. -// -// NOTE: CRDB does not allow nested array storage types, so only one level of -// array nesting needs to be checked. -func findRoundingFunction( - typ *types.T, precision int32, -) (*tree.FunctionProperties, *tree.Overload) { - if precision == 0 { - // Unlimited precision decimal target type never needs rounding. - return nil, nil - } - - props, overloads := builtins.GetBuiltinProperties("crdb_internal.round_decimal_values") - - if typ.Equivalent(types.Decimal) { - return props, &overloads[0] - } - if typ.Equivalent(types.DecimalArray) { - return props, &overloads[1] - } - - // Not DECIMAL or DECIMAL[]. - return nil, nil -} - // addCheckConstraintCols synthesizes a boolean output column for each check // constraint defined on the target table. The mutation operator will report a // constraint violation error if the value of the column is false. @@ -1369,27 +1222,6 @@ func resultsNeeded(r tree.ReturningClause) bool { } } -// checkDatumTypeFitsColumnType verifies that a given scalar value type is valid -// to be stored in a column of the given column type. -// -// For the purpose of this analysis, column type aliases are not considered to -// be different (eg. TEXT and VARCHAR will fit the same scalar type String). -// -// This is used by the UPDATE, INSERT and UPSERT code. -// TODO(mgartner): Remove this once assignment casts are fully supported. -func checkDatumTypeFitsColumnType(col *cat.Column, typ *types.T) { - if typ.Equivalent(col.DatumType()) { - return - } - - colName := string(col.ColName()) - err := pgerror.Newf(pgcode.DatatypeMismatch, - "value type %s doesn't match type %s of column %q", - typ, col.DatumType(), tree.ErrNameString(colName)) - err = errors.WithHint(err, "you will need to rewrite or cast the expression") - panic(err) -} - // addAssignmentCasts builds a projection that wraps columns in srcCols with // assignment casts when necessary so that the resulting columns have types // identical to their target column types. diff --git a/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go b/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go index f21f3ef9d15c..97d96a18e7dd 100644 --- a/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go +++ b/pkg/sql/opt/optbuilder/mutation_builder_arbiter.go @@ -13,6 +13,7 @@ package optbuilder import ( "fmt" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" @@ -26,8 +27,10 @@ import ( ) // findArbiters returns a set of arbiters for an INSERT ON CONFLICT statement. -// Both unique indexes and unique constraints can be arbiters. This function -// panics if no arbiters are found. +// Both unique indexes and unique constraints can be arbiters. Arbiters may be +// selected either by explicitly passing a named constraint (the ON CONFLICT ON +// CONSTRAINT form) or by passing a list of columns on which to resolve +// conflicts. This function panics if no arbiters are found. // // Arbiter constraints ensure that the columns designated by conflictOrds // reference at most one target row of a UNIQUE index or constraint. Using ANTI @@ -35,17 +38,91 @@ import ( // (otherwise result cardinality could increase). This is also a Postgres // requirement. // +// When inferring arbiters from a list of columns, there are rules about which +// index or constraint may be returned. +// // An arbiter index: // -// 1. Must have lax key columns that match the columns in conflictOrds. +// 1. Must have lax key columns that match the columns in the ON CONFLICT +// clause. // 2. If it is a partial index, its predicate must be implied by the -// arbiterPredicate supplied by the user. +// arbiter predicate supplied by the user. // // An arbiter constraint: // -// 1. Must have columns that match the columns in conflictOrds. +// 1. Must have columns that match the columns in the ON CONFLICT clause. // 2. If it is a partial constraint, its predicate must be implied by the -// arbiterPredicate supplied by the user. +// arbiter predicate supplied by the user. +func (mb *mutationBuilder) findArbiters(onConflict *tree.OnConflict) arbiterSet { + if onConflict == nil { + // No on conflict constraint means that we're in the UPSERT case, which should + // use the primary constraint as the arbiter. + primaryOrds := getExplicitPrimaryKeyOrdinals(mb.tab) + return mb.inferArbitersFromConflictOrds(primaryOrds, nil /* arbiterPredicate */) + } else if onConflict.Constraint != "" { + // We have a constraint explicitly named, so we can set the arbiter to use + // it directly. + for i, ic := 0, mb.tab.IndexCount(); i < ic; i++ { + index := mb.tab.Index(i) + if !index.IsUnique() { + continue + } + if index.Name() == onConflict.Constraint { + if _, partial := index.Predicate(); partial { + panic(partialIndexArbiterError(onConflict, mb.tab.Name())) + + } + return makeSingleIndexArbiterSet(mb, i) + } + } + for i, uc := 0, mb.tab.UniqueCount(); i < uc; i++ { + constraint := mb.tab.Unique(i) + if constraint.Name() == string(onConflict.Constraint) { + if _, partial := constraint.Predicate(); partial { + panic(partialIndexArbiterError(onConflict, mb.tab.Name())) + } + return makeSingleUniqueConstraintArbiterSet(mb, i) + } + } + // Found nothing, we have to return an error. + panic(pgerror.Newf(pgcode.UndefinedObject, "constraint %q for table %q does not exist", onConflict.Constraint, mb.tab.Name())) + } + // We have to infer an arbiter set. + var ords util.FastIntSet + for _, name := range onConflict.Columns { + found := false + for i, n := 0, mb.tab.ColumnCount(); i < n; i++ { + tabCol := mb.tab.Column(i) + if tabCol.ColName() == name && !tabCol.IsMutation() && tabCol.Kind() != cat.System { + ords.Add(i) + found = true + break + } + } + + if !found { + panic(colinfo.NewUndefinedColumnError(string(name))) + } + } + return mb.inferArbitersFromConflictOrds(ords, onConflict.ArbiterPredicate) +} + +func partialIndexArbiterError(onConflict *tree.OnConflict, tableName tree.Name) error { + return errors.WithHint( + pgerror.Newf( + pgcode.WrongObjectType, + "unique constraint %q for table %q is partial, so cannot be used as an arbiter via the ON CONSTRAINT syntax", + onConflict.Constraint, + tableName, + ), + "use the ON CONFLICT (columns...) WHERE form to select this partial unique constraint as an arbiter", + ) +} + +// inferArbitersFromConflictOrds is a helper function for findArbiters that +// infers a set of conflict arbiters from a list of column ordinals that a +// user specified in an ON CONFLICT clause. See the comment above findArbiters +// for more information about what arbiters are. // // If conflictOrds is empty then all unique indexes and unique without index // constraints are returned as arbiters. This is required to support a @@ -68,7 +145,7 @@ import ( // found. // 3. Otherwise, returns all partial arbiter indexes and constraints. // -func (mb *mutationBuilder) findArbiters( +func (mb *mutationBuilder) inferArbitersFromConflictOrds( conflictOrds util.FastIntSet, arbiterPredicate tree.Expr, ) arbiterSet { arbiters := makeArbiterSet(mb) diff --git a/pkg/sql/opt/optbuilder/scope.go b/pkg/sql/opt/optbuilder/scope.go index dc6c03aa7ed3..1babdca8ac57 100644 --- a/pkg/sql/opt/optbuilder/scope.go +++ b/pkg/sql/opt/optbuilder/scope.go @@ -360,12 +360,15 @@ func (s *scope) makeOrderingChoice() props.OrderingChoice { } // makePhysicalProps constructs physical properties using the columns in the -// scope for presentation and s.ordering for required ordering. +// scope for presentation and s.ordering for required ordering. The distribution +// is determined based on the locality of the gateway node, since data must +// always be returned to the gateway. func (s *scope) makePhysicalProps() *physical.Required { p := &physical.Required{ Presentation: s.makePresentation(), } p.Ordering.FromOrdering(s.ordering) + p.Distribution.FromLocality(s.builder.evalCtx.Locality) return p } diff --git a/pkg/sql/opt/optbuilder/testdata/fk-on-update-cascade b/pkg/sql/opt/optbuilder/testdata/fk-on-update-cascade index 19a854ba08ee..61bd1ae7c7c6 100644 --- a/pkg/sql/opt/optbuilder/testdata/fk-on-update-cascade +++ b/pkg/sql/opt/optbuilder/testdata/fk-on-update-cascade @@ -734,7 +734,7 @@ root # Test a cascade to a child that requires an assignment cast because the # referencing column type is not identical to the referenced column type. exec-ddl -CREATE TABLE parent_assn_cast (p DECIMAL(10, 2) PRIMARY KEY) +CREATE TABLE parent_assn_cast (p DECIMAL(10, 2) PRIMARY KEY, p2 DECIMAL(10, 2) UNIQUE) ---- exec-ddl @@ -750,66 +750,249 @@ UPDATE parent_assn_cast SET p = 1.45 WHERE p > 1 root ├── update parent_assn_cast │ ├── columns: - │ ├── fetch columns: p:4 + │ ├── fetch columns: p:5 p2:6 │ ├── update-mapping: - │ │ └── p_cast:8 => p:1 + │ │ └── p_cast:10 => p:1 │ ├── input binding: &1 │ ├── cascades │ │ └── child_assn_cast_p_fkey │ └── project - │ ├── columns: p_cast:8!null p:4!null crdb_internal_mvcc_timestamp:5 tableoid:6 + │ ├── columns: p_cast:10!null p:5!null p2:6 crdb_internal_mvcc_timestamp:7 tableoid:8 │ ├── project - │ │ ├── columns: p_new:7!null p:4!null crdb_internal_mvcc_timestamp:5 tableoid:6 + │ │ ├── columns: p_new:9!null p:5!null p2:6 crdb_internal_mvcc_timestamp:7 tableoid:8 │ │ ├── select - │ │ │ ├── columns: p:4!null crdb_internal_mvcc_timestamp:5 tableoid:6 + │ │ │ ├── columns: p:5!null p2:6 crdb_internal_mvcc_timestamp:7 tableoid:8 │ │ │ ├── scan parent_assn_cast - │ │ │ │ └── columns: p:4!null crdb_internal_mvcc_timestamp:5 tableoid:6 + │ │ │ │ └── columns: p:5!null p2:6 crdb_internal_mvcc_timestamp:7 tableoid:8 │ │ │ └── filters - │ │ │ └── p:4 > 1 + │ │ │ └── p:5 > 1 │ │ └── projections - │ │ └── 1.45 [as=p_new:7] + │ │ └── 1.45 [as=p_new:9] │ └── projections - │ └── assignment-cast: DECIMAL(10,2) [as=p_cast:8] - │ └── p_new:7 + │ └── assignment-cast: DECIMAL(10,2) [as=p_cast:10] + │ └── p_new:9 └── cascade └── update child_assn_cast ├── columns: - ├── fetch columns: c:13 child_assn_cast.p:14 + ├── fetch columns: c:15 child_assn_cast.p:16 ├── update-mapping: - │ └── p_cast:19 => child_assn_cast.p:10 + │ └── p_cast:21 => child_assn_cast.p:12 ├── input binding: &2 ├── project - │ ├── columns: p_cast:19!null c:13!null child_assn_cast.p:14!null p_old:17!null p_new:18!null + │ ├── columns: p_cast:21!null c:15!null child_assn_cast.p:16!null p_old:19!null p_new:20!null │ ├── inner-join (hash) - │ │ ├── columns: c:13!null child_assn_cast.p:14!null p_old:17!null p_new:18!null + │ │ ├── columns: c:15!null child_assn_cast.p:16!null p_old:19!null p_new:20!null │ │ ├── scan child_assn_cast - │ │ │ └── columns: c:13!null child_assn_cast.p:14 + │ │ │ └── columns: c:15!null child_assn_cast.p:16 │ │ ├── select - │ │ │ ├── columns: p_old:17!null p_new:18!null + │ │ │ ├── columns: p_old:19!null p_new:20!null │ │ │ ├── with-scan &1 - │ │ │ │ ├── columns: p_old:17!null p_new:18!null + │ │ │ │ ├── columns: p_old:19!null p_new:20!null │ │ │ │ └── mapping: - │ │ │ │ ├── parent_assn_cast.p:4 => p_old:17 - │ │ │ │ └── p_cast:8 => p_new:18 + │ │ │ │ ├── parent_assn_cast.p:5 => p_old:19 + │ │ │ │ └── p_cast:10 => p_new:20 │ │ │ └── filters - │ │ │ └── p_old:17 IS DISTINCT FROM p_new:18 + │ │ │ └── p_old:19 IS DISTINCT FROM p_new:20 │ │ └── filters - │ │ └── child_assn_cast.p:14 = p_old:17 + │ │ └── child_assn_cast.p:16 = p_old:19 │ └── projections - │ └── assignment-cast: DECIMAL(10) [as=p_cast:19] - │ └── p_new:18 + │ └── assignment-cast: DECIMAL(10) [as=p_cast:21] + │ └── p_new:20 └── f-k-checks └── f-k-checks-item: child_assn_cast(p) -> parent_assn_cast(p) └── anti-join (hash) - ├── columns: p:20!null + ├── columns: p:22!null ├── with-scan &2 - │ ├── columns: p:20!null + │ ├── columns: p:22!null + │ └── mapping: + │ └── p_cast:21 => p:22 + ├── scan parent_assn_cast + │ └── columns: parent_assn_cast.p:23!null + └── filters + └── p:22 = parent_assn_cast.p:23 + +exec-ddl +CREATE TABLE child_assn_cast_p2 ( + c INT PRIMARY KEY, + p2 DECIMAL(10, 0) REFERENCES parent_assn_cast(p2) ON UPDATE CASCADE +) +---- + +build-cascades +UPSERT INTO parent_assn_cast (p, p2) VALUES (1.23, 4.56) +---- +root + ├── upsert parent_assn_cast + │ ├── columns: + │ ├── arbiter indexes: parent_assn_cast_pkey + │ ├── canary column: p:9 + │ ├── fetch columns: p:9 p2:10 + │ ├── insert-mapping: + │ │ ├── p_cast:7 => p:1 + │ │ └── p2_cast:8 => p2:2 + │ ├── update-mapping: + │ │ └── p2_cast:8 => p2:2 + │ ├── input binding: &1 + │ ├── cascades + │ │ └── child_assn_cast_p2_p2_fkey + │ └── project + │ ├── columns: upsert_p:13 p_cast:7!null p2_cast:8!null p:9 p2:10 crdb_internal_mvcc_timestamp:11 tableoid:12 + │ ├── left-join (hash) + │ │ ├── columns: p_cast:7!null p2_cast:8!null p:9 p2:10 crdb_internal_mvcc_timestamp:11 tableoid:12 + │ │ ├── ensure-upsert-distinct-on + │ │ │ ├── columns: p_cast:7!null p2_cast:8!null + │ │ │ ├── grouping columns: p_cast:7!null + │ │ │ ├── project + │ │ │ │ ├── columns: p_cast:7!null p2_cast:8!null + │ │ │ │ ├── values + │ │ │ │ │ ├── columns: column1:5!null column2:6!null + │ │ │ │ │ └── (1.23, 4.56) + │ │ │ │ └── projections + │ │ │ │ ├── assignment-cast: DECIMAL(10,2) [as=p_cast:7] + │ │ │ │ │ └── column1:5 + │ │ │ │ └── assignment-cast: DECIMAL(10,2) [as=p2_cast:8] + │ │ │ │ └── column2:6 + │ │ │ └── aggregations + │ │ │ └── first-agg [as=p2_cast:8] + │ │ │ └── p2_cast:8 + │ │ ├── scan parent_assn_cast + │ │ │ └── columns: p:9!null p2:10 crdb_internal_mvcc_timestamp:11 tableoid:12 + │ │ └── filters + │ │ └── p_cast:7 = p:9 + │ └── projections + │ └── CASE WHEN p:9 IS NULL THEN p_cast:7 ELSE p:9 END [as=upsert_p:13] + └── cascade + └── update child_assn_cast_p2 + ├── columns: + ├── fetch columns: c:18 child_assn_cast_p2.p2:19 + ├── update-mapping: + │ └── p2_cast:24 => child_assn_cast_p2.p2:15 + ├── input binding: &2 + ├── project + │ ├── columns: p2_cast:24!null c:18!null child_assn_cast_p2.p2:19!null p2_old:22!null p2_new:23!null + │ ├── inner-join (hash) + │ │ ├── columns: c:18!null child_assn_cast_p2.p2:19!null p2_old:22!null p2_new:23!null + │ │ ├── scan child_assn_cast_p2 + │ │ │ └── columns: c:18!null child_assn_cast_p2.p2:19 + │ │ ├── select + │ │ │ ├── columns: p2_old:22 p2_new:23!null + │ │ │ ├── with-scan &1 + │ │ │ │ ├── columns: p2_old:22 p2_new:23!null + │ │ │ │ └── mapping: + │ │ │ │ ├── parent_assn_cast.p2:10 => p2_old:22 + │ │ │ │ └── p2_cast:8 => p2_new:23 + │ │ │ └── filters + │ │ │ └── p2_old:22 IS DISTINCT FROM p2_new:23 + │ │ └── filters + │ │ └── child_assn_cast_p2.p2:19 = p2_old:22 + │ └── projections + │ └── assignment-cast: DECIMAL(10) [as=p2_cast:24] + │ └── p2_new:23 + └── f-k-checks + └── f-k-checks-item: child_assn_cast_p2(p2) -> parent_assn_cast(p2) + └── anti-join (hash) + ├── columns: p2:25!null + ├── with-scan &2 + │ ├── columns: p2:25!null │ └── mapping: - │ └── p_cast:19 => p:20 + │ └── p2_cast:24 => p2:25 ├── scan parent_assn_cast - │ └── columns: parent_assn_cast.p:21!null + │ └── columns: parent_assn_cast.p2:27 └── filters - └── p:20 = parent_assn_cast.p:21 + └── p2:25 = parent_assn_cast.p2:27 + +build-cascades +INSERT INTO parent_assn_cast (p, p2) VALUES (1.23, 4.56) ON CONFLICT (p) DO UPDATE SET p2 = 7.89 +---- +root + ├── upsert parent_assn_cast + │ ├── columns: + │ ├── arbiter indexes: parent_assn_cast_pkey + │ ├── canary column: p:9 + │ ├── fetch columns: p:9 p2:10 + │ ├── insert-mapping: + │ │ ├── p_cast:7 => p:1 + │ │ └── p2_cast:8 => p2:2 + │ ├── update-mapping: + │ │ └── upsert_p2:16 => p2:2 + │ ├── input binding: &1 + │ ├── cascades + │ │ └── child_assn_cast_p2_p2_fkey + │ └── project + │ ├── columns: upsert_p:15 upsert_p2:16!null p_cast:7!null p2_cast:8!null p:9 p2:10 crdb_internal_mvcc_timestamp:11 tableoid:12 p2_cast:14!null + │ ├── project + │ │ ├── columns: p2_cast:14!null p_cast:7!null p2_cast:8!null p:9 p2:10 crdb_internal_mvcc_timestamp:11 tableoid:12 + │ │ ├── project + │ │ │ ├── columns: p2_new:13!null p_cast:7!null p2_cast:8!null p:9 p2:10 crdb_internal_mvcc_timestamp:11 tableoid:12 + │ │ │ ├── left-join (hash) + │ │ │ │ ├── columns: p_cast:7!null p2_cast:8!null p:9 p2:10 crdb_internal_mvcc_timestamp:11 tableoid:12 + │ │ │ │ ├── ensure-upsert-distinct-on + │ │ │ │ │ ├── columns: p_cast:7!null p2_cast:8!null + │ │ │ │ │ ├── grouping columns: p_cast:7!null + │ │ │ │ │ ├── project + │ │ │ │ │ │ ├── columns: p_cast:7!null p2_cast:8!null + │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ ├── columns: column1:5!null column2:6!null + │ │ │ │ │ │ │ └── (1.23, 4.56) + │ │ │ │ │ │ └── projections + │ │ │ │ │ │ ├── assignment-cast: DECIMAL(10,2) [as=p_cast:7] + │ │ │ │ │ │ │ └── column1:5 + │ │ │ │ │ │ └── assignment-cast: DECIMAL(10,2) [as=p2_cast:8] + │ │ │ │ │ │ └── column2:6 + │ │ │ │ │ └── aggregations + │ │ │ │ │ └── first-agg [as=p2_cast:8] + │ │ │ │ │ └── p2_cast:8 + │ │ │ │ ├── scan parent_assn_cast + │ │ │ │ │ └── columns: p:9!null p2:10 crdb_internal_mvcc_timestamp:11 tableoid:12 + │ │ │ │ └── filters + │ │ │ │ └── p_cast:7 = p:9 + │ │ │ └── projections + │ │ │ └── 7.89 [as=p2_new:13] + │ │ └── projections + │ │ └── assignment-cast: DECIMAL(10,2) [as=p2_cast:14] + │ │ └── p2_new:13 + │ └── projections + │ ├── CASE WHEN p:9 IS NULL THEN p_cast:7 ELSE p:9 END [as=upsert_p:15] + │ └── CASE WHEN p:9 IS NULL THEN p2_cast:8 ELSE p2_cast:14 END [as=upsert_p2:16] + └── cascade + └── update child_assn_cast_p2 + ├── columns: + ├── fetch columns: c:21 child_assn_cast_p2.p2:22 + ├── update-mapping: + │ └── p2_cast:27 => child_assn_cast_p2.p2:18 + ├── input binding: &2 + ├── project + │ ├── columns: p2_cast:27!null c:21!null child_assn_cast_p2.p2:22!null p2_old:25!null p2_new:26!null + │ ├── inner-join (hash) + │ │ ├── columns: c:21!null child_assn_cast_p2.p2:22!null p2_old:25!null p2_new:26!null + │ │ ├── scan child_assn_cast_p2 + │ │ │ └── columns: c:21!null child_assn_cast_p2.p2:22 + │ │ ├── select + │ │ │ ├── columns: p2_old:25 p2_new:26!null + │ │ │ ├── with-scan &1 + │ │ │ │ ├── columns: p2_old:25 p2_new:26!null + │ │ │ │ └── mapping: + │ │ │ │ ├── parent_assn_cast.p2:10 => p2_old:25 + │ │ │ │ └── upsert_p2:16 => p2_new:26 + │ │ │ └── filters + │ │ │ └── p2_old:25 IS DISTINCT FROM p2_new:26 + │ │ └── filters + │ │ └── child_assn_cast_p2.p2:22 = p2_old:25 + │ └── projections + │ └── assignment-cast: DECIMAL(10) [as=p2_cast:27] + │ └── p2_new:26 + └── f-k-checks + └── f-k-checks-item: child_assn_cast_p2(p2) -> parent_assn_cast(p2) + └── anti-join (hash) + ├── columns: p2:28!null + ├── with-scan &2 + │ ├── columns: p2:28!null + │ └── mapping: + │ └── p2_cast:27 => p2:28 + ├── scan parent_assn_cast + │ └── columns: parent_assn_cast.p2:30 + └── filters + └── p2:28 = parent_assn_cast.p2:30 # Test a cascade to a child with a partial index. exec-ddl @@ -1403,3 +1586,91 @@ root └── eq [type=bool, outer=(19,20), constraints=(/19: (/NULL - ]; /20: (/NULL - ]), fd=(19)==(20), (20)==(19)] ├── variable: p:19 [type=int2] └── variable: t.public.parent_diff_type.p:20 [type=int] + +build-cascades +UPSERT INTO parent_diff_type VALUES (0) +---- +root + └── upsert parent_diff_type + ├── columns: + ├── upsert-mapping: + │ └── column1:4 => p:1 + └── values + ├── columns: column1:4!null + └── (0,) + +build-cascades +INSERT INTO parent_diff_type VALUES (0) ON CONFLICT (p) DO UPDATE SET p = 1 +---- +root + ├── upsert parent_diff_type + │ ├── columns: + │ ├── arbiter indexes: parent_diff_type_pkey + │ ├── canary column: p:5 + │ ├── fetch columns: p:5 + │ ├── insert-mapping: + │ │ └── column1:4 => p:1 + │ ├── update-mapping: + │ │ └── upsert_p:9 => p:1 + │ ├── input binding: &1 + │ ├── cascades + │ │ └── child_diff_type_p_fkey + │ └── project + │ ├── columns: upsert_p:9!null column1:4!null p:5 crdb_internal_mvcc_timestamp:6 tableoid:7 p_new:8!null + │ ├── project + │ │ ├── columns: p_new:8!null column1:4!null p:5 crdb_internal_mvcc_timestamp:6 tableoid:7 + │ │ ├── left-join (hash) + │ │ │ ├── columns: column1:4!null p:5 crdb_internal_mvcc_timestamp:6 tableoid:7 + │ │ │ ├── ensure-upsert-distinct-on + │ │ │ │ ├── columns: column1:4!null + │ │ │ │ ├── grouping columns: column1:4!null + │ │ │ │ └── values + │ │ │ │ ├── columns: column1:4!null + │ │ │ │ └── (0,) + │ │ │ ├── scan parent_diff_type + │ │ │ │ └── columns: p:5!null crdb_internal_mvcc_timestamp:6 tableoid:7 + │ │ │ └── filters + │ │ │ └── column1:4 = p:5 + │ │ └── projections + │ │ └── 1 [as=p_new:8] + │ └── projections + │ └── CASE WHEN p:5 IS NULL THEN column1:4 ELSE p_new:8 END [as=upsert_p:9] + └── cascade + └── update child_diff_type + ├── columns: + ├── fetch columns: c:14 child_diff_type.p:15 + ├── update-mapping: + │ └── p_cast:20 => child_diff_type.p:11 + ├── input binding: &2 + ├── project + │ ├── columns: p_cast:20!null c:14!null child_diff_type.p:15!null p_old:18!null p_new:19!null + │ ├── inner-join (hash) + │ │ ├── columns: c:14!null child_diff_type.p:15!null p_old:18!null p_new:19!null + │ │ ├── scan child_diff_type + │ │ │ └── columns: c:14!null child_diff_type.p:15 + │ │ ├── select + │ │ │ ├── columns: p_old:18 p_new:19!null + │ │ │ ├── with-scan &1 + │ │ │ │ ├── columns: p_old:18 p_new:19!null + │ │ │ │ └── mapping: + │ │ │ │ ├── parent_diff_type.p:5 => p_old:18 + │ │ │ │ └── upsert_p:9 => p_new:19 + │ │ │ └── filters + │ │ │ └── p_old:18 IS DISTINCT FROM p_new:19 + │ │ └── filters + │ │ └── child_diff_type.p:15 = p_old:18 + │ └── projections + │ └── assignment-cast: INT2 [as=p_cast:20] + │ └── p_new:19 + └── f-k-checks + └── f-k-checks-item: child_diff_type(p) -> parent_diff_type(p) + └── anti-join (hash) + ├── columns: p:21!null + ├── with-scan &2 + │ ├── columns: p:21!null + │ └── mapping: + │ └── p_cast:20 => p:21 + ├── scan parent_diff_type + │ └── columns: parent_diff_type.p:22!null + └── filters + └── p:21 = parent_diff_type.p:22 diff --git a/pkg/sql/opt/optbuilder/testdata/fk-on-update-set-default b/pkg/sql/opt/optbuilder/testdata/fk-on-update-set-default index d1b6ee113f6f..6a32ed363ebf 100644 --- a/pkg/sql/opt/optbuilder/testdata/fk-on-update-set-default +++ b/pkg/sql/opt/optbuilder/testdata/fk-on-update-set-default @@ -767,7 +767,7 @@ root # referencing column's DEFAULT expression type is not identical to the # referencing column type. exec-ddl -CREATE TABLE parent_assn_cast (p INT PRIMARY KEY) +CREATE TABLE parent_assn_cast (p INT PRIMARY KEY, p2 INT UNIQUE) ---- exec-ddl @@ -783,65 +783,237 @@ UPDATE parent_assn_cast SET p = 1 WHERE p > 1 root ├── update parent_assn_cast │ ├── columns: - │ ├── fetch columns: p:4 + │ ├── fetch columns: p:5 p2:6 │ ├── update-mapping: - │ │ └── p_new:7 => p:1 + │ │ └── p_new:9 => p:1 │ ├── input binding: &1 │ ├── cascades │ │ └── child_assn_cast_p_fkey │ └── project - │ ├── columns: p_new:7!null p:4!null crdb_internal_mvcc_timestamp:5 tableoid:6 + │ ├── columns: p_new:9!null p:5!null p2:6 crdb_internal_mvcc_timestamp:7 tableoid:8 │ ├── select - │ │ ├── columns: p:4!null crdb_internal_mvcc_timestamp:5 tableoid:6 + │ │ ├── columns: p:5!null p2:6 crdb_internal_mvcc_timestamp:7 tableoid:8 │ │ ├── scan parent_assn_cast - │ │ │ └── columns: p:4!null crdb_internal_mvcc_timestamp:5 tableoid:6 + │ │ │ └── columns: p:5!null p2:6 crdb_internal_mvcc_timestamp:7 tableoid:8 │ │ └── filters - │ │ └── p:4 > 1 + │ │ └── p:5 > 1 │ └── projections - │ └── 1 [as=p_new:7] + │ └── 1 [as=p_new:9] └── cascade └── update child_assn_cast ├── columns: - ├── fetch columns: c:12 child_assn_cast.p:13 + ├── fetch columns: c:14 child_assn_cast.p:15 ├── update-mapping: - │ └── p_cast:19 => child_assn_cast.p:9 + │ └── p_cast:21 => child_assn_cast.p:11 ├── input binding: &2 ├── project - │ ├── columns: p_cast:19!null c:12!null child_assn_cast.p:13!null p_old:16!null p_new:17!null + │ ├── columns: p_cast:21!null c:14!null child_assn_cast.p:15!null p_old:18!null p_new:19!null │ ├── project - │ │ ├── columns: p_new:18!null c:12!null child_assn_cast.p:13!null p_old:16!null p_new:17!null + │ │ ├── columns: p_new:20!null c:14!null child_assn_cast.p:15!null p_old:18!null p_new:19!null │ │ ├── inner-join (cross) - │ │ │ ├── columns: c:12!null child_assn_cast.p:13!null p_old:16!null p_new:17!null + │ │ │ ├── columns: c:14!null child_assn_cast.p:15!null p_old:18!null p_new:19!null │ │ │ ├── scan child_assn_cast - │ │ │ │ └── columns: c:12!null child_assn_cast.p:13 + │ │ │ │ └── columns: c:14!null child_assn_cast.p:15 │ │ │ ├── select - │ │ │ │ ├── columns: p_old:16!null p_new:17!null + │ │ │ │ ├── columns: p_old:18!null p_new:19!null │ │ │ │ ├── with-scan &1 - │ │ │ │ │ ├── columns: p_old:16!null p_new:17!null + │ │ │ │ │ ├── columns: p_old:18!null p_new:19!null │ │ │ │ │ └── mapping: - │ │ │ │ │ ├── parent_assn_cast.p:4 => p_old:16 - │ │ │ │ │ └── p_new:7 => p_new:17 + │ │ │ │ │ ├── parent_assn_cast.p:5 => p_old:18 + │ │ │ │ │ └── p_new:9 => p_new:19 │ │ │ │ └── filters - │ │ │ │ └── p_old:16 IS DISTINCT FROM p_new:17 + │ │ │ │ └── p_old:18 IS DISTINCT FROM p_new:19 │ │ │ └── filters - │ │ │ └── child_assn_cast.p:13 = p_old:16 + │ │ │ └── child_assn_cast.p:15 = p_old:18 │ │ └── projections - │ │ └── 1.45::DECIMAL(10,2) [as=p_new:18] + │ │ └── 1.45::DECIMAL(10,2) [as=p_new:20] │ └── projections - │ └── assignment-cast: DECIMAL(10) [as=p_cast:19] - │ └── p_new:18 + │ └── assignment-cast: DECIMAL(10) [as=p_cast:21] + │ └── p_new:20 └── f-k-checks └── f-k-checks-item: child_assn_cast(p) -> parent_assn_cast(p) └── anti-join (cross) - ├── columns: p:20!null + ├── columns: p:22!null + ├── with-scan &2 + │ ├── columns: p:22!null + │ └── mapping: + │ └── p_cast:21 => p:22 + ├── scan parent_assn_cast + │ └── columns: parent_assn_cast.p:23!null + └── filters + └── p:22 = parent_assn_cast.p:23 + +exec-ddl +CREATE TABLE child_assn_cast_p2 ( + c INT PRIMARY KEY, + p2 DECIMAL(10, 0) DEFAULT 1.45::DECIMAL(10, 2) REFERENCES parent_assn_cast(p2) ON UPDATE SET DEFAULT +) +---- + +build-cascades +UPSERT INTO parent_assn_cast (p, p2) VALUES (1, 2) +---- +root + ├── upsert parent_assn_cast + │ ├── columns: + │ ├── arbiter indexes: parent_assn_cast_pkey + │ ├── canary column: p:7 + │ ├── fetch columns: p:7 p2:8 + │ ├── insert-mapping: + │ │ ├── column1:5 => p:1 + │ │ └── column2:6 => p2:2 + │ ├── update-mapping: + │ │ └── column2:6 => p2:2 + │ ├── input binding: &1 + │ ├── cascades + │ │ └── child_assn_cast_p2_p2_fkey + │ └── project + │ ├── columns: upsert_p:11 column1:5!null column2:6!null p:7 p2:8 crdb_internal_mvcc_timestamp:9 tableoid:10 + │ ├── left-join (hash) + │ │ ├── columns: column1:5!null column2:6!null p:7 p2:8 crdb_internal_mvcc_timestamp:9 tableoid:10 + │ │ ├── ensure-upsert-distinct-on + │ │ │ ├── columns: column1:5!null column2:6!null + │ │ │ ├── grouping columns: column1:5!null + │ │ │ ├── values + │ │ │ │ ├── columns: column1:5!null column2:6!null + │ │ │ │ └── (1, 2) + │ │ │ └── aggregations + │ │ │ └── first-agg [as=column2:6] + │ │ │ └── column2:6 + │ │ ├── scan parent_assn_cast + │ │ │ └── columns: p:7!null p2:8 crdb_internal_mvcc_timestamp:9 tableoid:10 + │ │ └── filters + │ │ └── column1:5 = p:7 + │ └── projections + │ └── CASE WHEN p:7 IS NULL THEN column1:5 ELSE p:7 END [as=upsert_p:11] + └── cascade + └── update child_assn_cast_p2 + ├── columns: + ├── fetch columns: c:16 child_assn_cast_p2.p2:17 + ├── update-mapping: + │ └── p2_cast:23 => child_assn_cast_p2.p2:13 + ├── input binding: &2 + ├── project + │ ├── columns: p2_cast:23!null c:16!null child_assn_cast_p2.p2:17!null p2_old:20!null p2_new:21!null + │ ├── project + │ │ ├── columns: p2_new:22!null c:16!null child_assn_cast_p2.p2:17!null p2_old:20!null p2_new:21!null + │ │ ├── inner-join (cross) + │ │ │ ├── columns: c:16!null child_assn_cast_p2.p2:17!null p2_old:20!null p2_new:21!null + │ │ │ ├── scan child_assn_cast_p2 + │ │ │ │ └── columns: c:16!null child_assn_cast_p2.p2:17 + │ │ │ ├── select + │ │ │ │ ├── columns: p2_old:20 p2_new:21!null + │ │ │ │ ├── with-scan &1 + │ │ │ │ │ ├── columns: p2_old:20 p2_new:21!null + │ │ │ │ │ └── mapping: + │ │ │ │ │ ├── parent_assn_cast.p2:8 => p2_old:20 + │ │ │ │ │ └── column2:6 => p2_new:21 + │ │ │ │ └── filters + │ │ │ │ └── p2_old:20 IS DISTINCT FROM p2_new:21 + │ │ │ └── filters + │ │ │ └── child_assn_cast_p2.p2:17 = p2_old:20 + │ │ └── projections + │ │ └── 1.45::DECIMAL(10,2) [as=p2_new:22] + │ └── projections + │ └── assignment-cast: DECIMAL(10) [as=p2_cast:23] + │ └── p2_new:22 + └── f-k-checks + └── f-k-checks-item: child_assn_cast_p2(p2) -> parent_assn_cast(p2) + └── anti-join (cross) + ├── columns: p2:24!null + ├── with-scan &2 + │ ├── columns: p2:24!null + │ └── mapping: + │ └── p2_cast:23 => p2:24 + ├── scan parent_assn_cast + │ └── columns: parent_assn_cast.p2:26 + └── filters + └── p2:24 = parent_assn_cast.p2:26 + +build-cascades +INSERT INTO parent_assn_cast (p, p2) VALUES (1, 2) ON CONFLICT (p) DO UPDATE SET p2 = 3 +---- +root + ├── upsert parent_assn_cast + │ ├── columns: + │ ├── arbiter indexes: parent_assn_cast_pkey + │ ├── canary column: p:7 + │ ├── fetch columns: p:7 p2:8 + │ ├── insert-mapping: + │ │ ├── column1:5 => p:1 + │ │ └── column2:6 => p2:2 + │ ├── update-mapping: + │ │ └── upsert_p2:13 => p2:2 + │ ├── input binding: &1 + │ ├── cascades + │ │ └── child_assn_cast_p2_p2_fkey + │ └── project + │ ├── columns: upsert_p:12 upsert_p2:13!null column1:5!null column2:6!null p:7 p2:8 crdb_internal_mvcc_timestamp:9 tableoid:10 p2_new:11!null + │ ├── project + │ │ ├── columns: p2_new:11!null column1:5!null column2:6!null p:7 p2:8 crdb_internal_mvcc_timestamp:9 tableoid:10 + │ │ ├── left-join (hash) + │ │ │ ├── columns: column1:5!null column2:6!null p:7 p2:8 crdb_internal_mvcc_timestamp:9 tableoid:10 + │ │ │ ├── ensure-upsert-distinct-on + │ │ │ │ ├── columns: column1:5!null column2:6!null + │ │ │ │ ├── grouping columns: column1:5!null + │ │ │ │ ├── values + │ │ │ │ │ ├── columns: column1:5!null column2:6!null + │ │ │ │ │ └── (1, 2) + │ │ │ │ └── aggregations + │ │ │ │ └── first-agg [as=column2:6] + │ │ │ │ └── column2:6 + │ │ │ ├── scan parent_assn_cast + │ │ │ │ └── columns: p:7!null p2:8 crdb_internal_mvcc_timestamp:9 tableoid:10 + │ │ │ └── filters + │ │ │ └── column1:5 = p:7 + │ │ └── projections + │ │ └── 3 [as=p2_new:11] + │ └── projections + │ ├── CASE WHEN p:7 IS NULL THEN column1:5 ELSE p:7 END [as=upsert_p:12] + │ └── CASE WHEN p:7 IS NULL THEN column2:6 ELSE p2_new:11 END [as=upsert_p2:13] + └── cascade + └── update child_assn_cast_p2 + ├── columns: + ├── fetch columns: c:18 child_assn_cast_p2.p2:19 + ├── update-mapping: + │ └── p2_cast:25 => child_assn_cast_p2.p2:15 + ├── input binding: &2 + ├── project + │ ├── columns: p2_cast:25!null c:18!null child_assn_cast_p2.p2:19!null p2_old:22!null p2_new:23!null + │ ├── project + │ │ ├── columns: p2_new:24!null c:18!null child_assn_cast_p2.p2:19!null p2_old:22!null p2_new:23!null + │ │ ├── inner-join (cross) + │ │ │ ├── columns: c:18!null child_assn_cast_p2.p2:19!null p2_old:22!null p2_new:23!null + │ │ │ ├── scan child_assn_cast_p2 + │ │ │ │ └── columns: c:18!null child_assn_cast_p2.p2:19 + │ │ │ ├── select + │ │ │ │ ├── columns: p2_old:22 p2_new:23!null + │ │ │ │ ├── with-scan &1 + │ │ │ │ │ ├── columns: p2_old:22 p2_new:23!null + │ │ │ │ │ └── mapping: + │ │ │ │ │ ├── parent_assn_cast.p2:8 => p2_old:22 + │ │ │ │ │ └── upsert_p2:13 => p2_new:23 + │ │ │ │ └── filters + │ │ │ │ └── p2_old:22 IS DISTINCT FROM p2_new:23 + │ │ │ └── filters + │ │ │ └── child_assn_cast_p2.p2:19 = p2_old:22 + │ │ └── projections + │ │ └── 1.45::DECIMAL(10,2) [as=p2_new:24] + │ └── projections + │ └── assignment-cast: DECIMAL(10) [as=p2_cast:25] + │ └── p2_new:24 + └── f-k-checks + └── f-k-checks-item: child_assn_cast_p2(p2) -> parent_assn_cast(p2) + └── anti-join (cross) + ├── columns: p2:26!null ├── with-scan &2 - │ ├── columns: p:20!null + │ ├── columns: p2:26!null │ └── mapping: - │ └── p_cast:19 => p:20 + │ └── p2_cast:25 => p2:26 ├── scan parent_assn_cast - │ └── columns: parent_assn_cast.p:21!null + │ └── columns: parent_assn_cast.p2:28 └── filters - └── p:20 = parent_assn_cast.p:21 + └── p2:26 = parent_assn_cast.p2:28 # Test a cascade to a child with a partial index. exec-ddl diff --git a/pkg/sql/opt/optbuilder/testdata/partial-indexes b/pkg/sql/opt/optbuilder/testdata/partial-indexes index b640c273f832..d13bb4adf099 100644 --- a/pkg/sql/opt/optbuilder/testdata/partial-indexes +++ b/pkg/sql/opt/optbuilder/testdata/partial-indexes @@ -1751,6 +1751,12 @@ INSERT INTO comp VALUES (1, 1, 'bar') ON CONFLICT (b) WHERE d = 'foo' AND e = 'b ---- error (0A000): unimplemented: there are multiple unique or exclusion constraints matching the ON CONFLICT specification +# Error when a partial arbiter index is explicitly specified, which is not allowed. +build +INSERT INTO comp VALUES (1, 1, 'bar') ON CONFLICT ON CONSTRAINT u1 DO UPDATE SET b = 10 +---- +error (42809): unique constraint "u1" for table "comp" is partial, so cannot be used as an arbiter via the ON CONSTRAINT syntax + build INSERT INTO comp VALUES (1, 1, 'bar') ON CONFLICT (b) WHERE d = 'foo' DO UPDATE SET b = 10 ---- diff --git a/pkg/sql/opt/optbuilder/testdata/unique-checks-insert b/pkg/sql/opt/optbuilder/testdata/unique-checks-insert index 741bca8cc181..af55d949ec23 100644 --- a/pkg/sql/opt/optbuilder/testdata/unique-checks-insert +++ b/pkg/sql/opt/optbuilder/testdata/unique-checks-insert @@ -397,6 +397,62 @@ insert uniq ├── y:31 = uniq.y:24 └── k:27 != uniq.k:20 +# On conflict clause references unique without index constraint explicitly. +build +INSERT INTO uniq VALUES (1, 2, 3, 4, 5) ON CONFLICT ON CONSTRAINT unique_w DO NOTHING +---- +insert uniq + ├── columns: + ├── arbiter constraints: unique_w + ├── insert-mapping: + │ ├── column1:8 => uniq.k:1 + │ ├── column2:9 => uniq.v:2 + │ ├── column3:10 => uniq.w:3 + │ ├── column4:11 => uniq.x:4 + │ └── column5:12 => uniq.y:5 + ├── input binding: &1 + ├── upsert-distinct-on + │ ├── columns: column1:8!null column2:9!null column3:10!null column4:11!null column5:12!null + │ ├── grouping columns: column3:10!null + │ ├── anti-join (hash) + │ │ ├── columns: column1:8!null column2:9!null column3:10!null column4:11!null column5:12!null + │ │ ├── values + │ │ │ ├── columns: column1:8!null column2:9!null column3:10!null column4:11!null column5:12!null + │ │ │ └── (1, 2, 3, 4, 5) + │ │ ├── scan uniq + │ │ │ └── columns: uniq.k:13!null uniq.v:14 uniq.w:15 uniq.x:16 uniq.y:17 + │ │ └── filters + │ │ └── column3:10 = uniq.w:15 + │ └── aggregations + │ ├── first-agg [as=column1:8] + │ │ └── column1:8 + │ ├── first-agg [as=column2:9] + │ │ └── column2:9 + │ ├── first-agg [as=column4:11] + │ │ └── column4:11 + │ └── first-agg [as=column5:12] + │ └── column5:12 + └── unique-checks + └── unique-checks-item: uniq(x,y) + └── project + ├── columns: x:30!null y:31!null + └── semi-join (hash) + ├── columns: k:27!null v:28!null w:29!null x:30!null y:31!null + ├── with-scan &1 + │ ├── columns: k:27!null v:28!null w:29!null x:30!null y:31!null + │ └── mapping: + │ ├── column1:8 => k:27 + │ ├── column2:9 => v:28 + │ ├── column3:10 => w:29 + │ ├── column4:11 => x:30 + │ └── column5:12 => y:31 + ├── scan uniq + │ └── columns: uniq.k:20!null uniq.v:21 uniq.w:22 uniq.x:23 uniq.y:24 + └── filters + ├── x:30 = uniq.x:23 + ├── y:31 = uniq.y:24 + └── k:27 != uniq.k:20 + exec-ddl CREATE TABLE other (k INT, v INT, w INT NOT NULL, x INT, y INT) ---- @@ -952,6 +1008,13 @@ INSERT INTO uniq_partial VALUES (1, 2, 3) ON CONFLICT (a) DO NOTHING ---- error (42P10): there is no unique or exclusion constraint matching the ON CONFLICT specification +# Error when trying to select a partial unique without index constraint +# explicitly, which is not allowed. +build +INSERT INTO uniq_partial VALUES (1, 2, 3) ON CONFLICT ON CONSTRAINT unique_a DO NOTHING +---- +error (42809): unique constraint "unique_a" for table "uniq_partial" is partial, so cannot be used as an arbiter via the ON CONSTRAINT syntax + # On conflict clause references unique without index constraint. build INSERT INTO uniq_partial VALUES (1, 2, 3) ON CONFLICT (a) WHERE b > 0 DO NOTHING diff --git a/pkg/sql/opt/optbuilder/testdata/unique-checks-upsert b/pkg/sql/opt/optbuilder/testdata/unique-checks-upsert index 8b2228861c4d..f369d03e1f0f 100644 --- a/pkg/sql/opt/optbuilder/testdata/unique-checks-upsert +++ b/pkg/sql/opt/optbuilder/testdata/unique-checks-upsert @@ -1605,6 +1605,78 @@ upsert t ├── i:20 = t.i:16 └── rowid:21 != t.rowid:17 +# On conflict clause references unique without index constraint explicitly. +build +INSERT INTO uniq VALUES (1, 2, 3, 4, 5) ON CONFLICT ON CONSTRAINT unique_w DO UPDATE SET v=1 +---- +upsert uniq + ├── columns: + ├── arbiter constraints: unique_w + ├── canary column: uniq.k:13 + ├── fetch columns: uniq.k:13 uniq.v:14 uniq.w:15 uniq.x:16 uniq.y:17 + ├── insert-mapping: + │ ├── column1:8 => uniq.k:1 + │ ├── column2:9 => uniq.v:2 + │ ├── column3:10 => uniq.w:3 + │ ├── column4:11 => uniq.x:4 + │ └── column5:12 => uniq.y:5 + ├── update-mapping: + │ └── upsert_v:22 => uniq.v:2 + ├── input binding: &1 + ├── project + │ ├── columns: upsert_k:21 upsert_v:22!null upsert_w:23 upsert_x:24 upsert_y:25 column1:8!null column2:9!null column3:10!null column4:11!null column5:12!null uniq.k:13 uniq.v:14 uniq.w:15 uniq.x:16 uniq.y:17 crdb_internal_mvcc_timestamp:18 tableoid:19 v_new:20!null + │ ├── project + │ │ ├── columns: v_new:20!null column1:8!null column2:9!null column3:10!null column4:11!null column5:12!null uniq.k:13 uniq.v:14 uniq.w:15 uniq.x:16 uniq.y:17 crdb_internal_mvcc_timestamp:18 tableoid:19 + │ │ ├── left-join (hash) + │ │ │ ├── columns: column1:8!null column2:9!null column3:10!null column4:11!null column5:12!null uniq.k:13 uniq.v:14 uniq.w:15 uniq.x:16 uniq.y:17 crdb_internal_mvcc_timestamp:18 tableoid:19 + │ │ │ ├── ensure-upsert-distinct-on + │ │ │ │ ├── columns: column1:8!null column2:9!null column3:10!null column4:11!null column5:12!null + │ │ │ │ ├── grouping columns: column3:10!null + │ │ │ │ ├── values + │ │ │ │ │ ├── columns: column1:8!null column2:9!null column3:10!null column4:11!null column5:12!null + │ │ │ │ │ └── (1, 2, 3, 4, 5) + │ │ │ │ └── aggregations + │ │ │ │ ├── first-agg [as=column1:8] + │ │ │ │ │ └── column1:8 + │ │ │ │ ├── first-agg [as=column2:9] + │ │ │ │ │ └── column2:9 + │ │ │ │ ├── first-agg [as=column4:11] + │ │ │ │ │ └── column4:11 + │ │ │ │ └── first-agg [as=column5:12] + │ │ │ │ └── column5:12 + │ │ │ ├── scan uniq + │ │ │ │ └── columns: uniq.k:13!null uniq.v:14 uniq.w:15 uniq.x:16 uniq.y:17 crdb_internal_mvcc_timestamp:18 tableoid:19 + │ │ │ └── filters + │ │ │ └── column3:10 = uniq.w:15 + │ │ └── projections + │ │ └── 1 [as=v_new:20] + │ └── projections + │ ├── CASE WHEN uniq.k:13 IS NULL THEN column1:8 ELSE uniq.k:13 END [as=upsert_k:21] + │ ├── CASE WHEN uniq.k:13 IS NULL THEN column2:9 ELSE v_new:20 END [as=upsert_v:22] + │ ├── CASE WHEN uniq.k:13 IS NULL THEN column3:10 ELSE uniq.w:15 END [as=upsert_w:23] + │ ├── CASE WHEN uniq.k:13 IS NULL THEN column4:11 ELSE uniq.x:16 END [as=upsert_x:24] + │ └── CASE WHEN uniq.k:13 IS NULL THEN column5:12 ELSE uniq.y:17 END [as=upsert_y:25] + └── unique-checks + └── unique-checks-item: uniq(x,y) + └── project + ├── columns: x:36 y:37 + └── semi-join (hash) + ├── columns: k:33 v:34!null w:35 x:36 y:37 + ├── with-scan &1 + │ ├── columns: k:33 v:34!null w:35 x:36 y:37 + │ └── mapping: + │ ├── upsert_k:21 => k:33 + │ ├── upsert_v:22 => v:34 + │ ├── upsert_w:23 => w:35 + │ ├── upsert_x:24 => x:36 + │ └── upsert_y:25 => y:37 + ├── scan uniq + │ └── columns: uniq.k:26!null uniq.v:27 uniq.w:28 uniq.x:29 uniq.y:30 + └── filters + ├── x:36 = uniq.x:29 + ├── y:37 = uniq.y:30 + └── k:33 != uniq.k:26 + exec-ddl CREATE TABLE uniq_partial ( k INT PRIMARY KEY, @@ -1782,6 +1854,13 @@ INSERT INTO uniq_partial VALUES (1, 1, 1) ON CONFLICT (a) DO UPDATE SET a = 2 ---- error (42P10): there is no unique or exclusion constraint matching the ON CONFLICT specification +# Error when trying to select a partial unique without index constraint +# explicitly, which is not allowed. +build +INSERT INTO uniq_partial VALUES (1, 2, 3) ON CONFLICT ON CONSTRAINT unique_a DO UPDATE SET a = 2 +---- +error (42809): unique constraint "unique_a" for table "uniq_partial" is partial, so cannot be used as an arbiter via the ON CONSTRAINT syntax + exec-ddl CREATE TABLE uniq_partial_constraint_and_index ( k INT PRIMARY KEY, diff --git a/pkg/sql/opt/optbuilder/testdata/upsert b/pkg/sql/opt/optbuilder/testdata/upsert index 592995c88ee0..97ad9f4aefc2 100644 --- a/pkg/sql/opt/optbuilder/testdata/upsert +++ b/pkg/sql/opt/optbuilder/testdata/upsert @@ -88,6 +88,28 @@ CREATE TABLE uniq ( ) ---- +exec-ddl +CREATE TABLE assn_cast ( + k INT PRIMARY KEY, + c CHAR, + qc "char", + i INT DEFAULT 10::INT2, + s STRING, + d DECIMAL(10, 0), + d_comp DECIMAL(10, 0) AS (d + 10.0) STORED +) +---- + +exec-ddl +CREATE TABLE assn_cast_on_update ( + k INT PRIMARY KEY, + i INT, + d DECIMAL(10, 1) ON UPDATE 1.23, + d2 DECIMAL(10, 1) ON UPDATE 1.23::DECIMAL(10, 2), + d_comp DECIMAL(10, 0) AS (d) STORED +) +---- + # ------------------------------------------------------------------------------ # Basic tests. # ------------------------------------------------------------------------------ @@ -1694,7 +1716,7 @@ upsert xyz └── CASE WHEN x:12 IS NULL THEN c:8 ELSE z:14 END [as=upsert_z:20] # ------------------------------------------------------------------------------ -# Test decimal column truncation. +# Test assignment casts. # ------------------------------------------------------------------------------ # Fast UPSERT case. @@ -1704,71 +1726,78 @@ UPSERT INTO decimals (a, b) VALUES (1.1, ARRAY[0.95]) upsert decimals ├── columns: ├── arbiter indexes: decimals_pkey - ├── canary column: decimals.a:15 - ├── fetch columns: decimals.a:15 decimals.b:16 c:17 d:18 + ├── canary column: a:15 + ├── fetch columns: a:15 b:16 c:17 d:18 ├── insert-mapping: - │ ├── a:10 => decimals.a:1 - │ ├── b:11 => decimals.b:2 - │ ├── c_default:12 => c:3 - │ └── d_comp:14 => d:4 + │ ├── a_cast:9 => a:1 + │ ├── b_cast:10 => b:2 + │ ├── c_cast:12 => c:3 + │ └── d_cast:14 => d:4 ├── update-mapping: - │ └── b:11 => decimals.b:2 + │ └── b_cast:10 => b:2 ├── check columns: check1:25 check2:26 └── project - ├── columns: check1:25 check2:26 a:10 b:11 c_default:12 d_comp:14 decimals.a:15 decimals.b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 d_comp:21 upsert_a:22 upsert_c:23 upsert_d:24 + ├── columns: check1:25 check2:26 a_cast:9!null b_cast:10 c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 d_comp:21 upsert_a:22 upsert_c:23 upsert_d:24 ├── project - │ ├── columns: upsert_a:22 upsert_c:23 upsert_d:24 a:10 b:11 c_default:12 d_comp:14 decimals.a:15 decimals.b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 d_comp:21 + │ ├── columns: upsert_a:22 upsert_c:23 upsert_d:24 a_cast:9!null b_cast:10 c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 d_comp:21 │ ├── project - │ │ ├── columns: d_comp:21 a:10 b:11 c_default:12 d_comp:14 decimals.a:15 decimals.b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ ├── columns: d_comp:21 a_cast:9!null b_cast:10 c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 │ │ ├── left-join (hash) - │ │ │ ├── columns: a:10 b:11 c_default:12 d_comp:14 decimals.a:15 decimals.b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ │ ├── columns: a_cast:9!null b_cast:10 c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 │ │ │ ├── ensure-upsert-distinct-on - │ │ │ │ ├── columns: a:10 b:11 c_default:12 d_comp:14 - │ │ │ │ ├── grouping columns: a:10 + │ │ │ │ ├── columns: a_cast:9!null b_cast:10 c_cast:12!null d_cast:14!null + │ │ │ │ ├── grouping columns: a_cast:9!null │ │ │ │ ├── project - │ │ │ │ │ ├── columns: d_comp:14 a:10 b:11 c_default:12 + │ │ │ │ │ ├── columns: d_cast:14!null a_cast:9!null b_cast:10 c_cast:12!null │ │ │ │ │ ├── project - │ │ │ │ │ │ ├── columns: d_comp:13 a:10 b:11 c_default:12 + │ │ │ │ │ │ ├── columns: d_comp:13!null a_cast:9!null b_cast:10 c_cast:12!null │ │ │ │ │ │ ├── project - │ │ │ │ │ │ │ ├── columns: a:10 b:11 c_default:12 + │ │ │ │ │ │ │ ├── columns: c_cast:12!null a_cast:9!null b_cast:10 │ │ │ │ │ │ │ ├── project - │ │ │ │ │ │ │ │ ├── columns: c_default:9!null column1:7!null column2:8 - │ │ │ │ │ │ │ │ ├── values - │ │ │ │ │ │ │ │ │ ├── columns: column1:7!null column2:8 - │ │ │ │ │ │ │ │ │ └── (1.1, ARRAY[0.95]) + │ │ │ │ │ │ │ │ ├── columns: c_default:11!null a_cast:9!null b_cast:10 + │ │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ │ ├── columns: a_cast:9!null b_cast:10 + │ │ │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ │ │ ├── columns: column1:7!null column2:8 + │ │ │ │ │ │ │ │ │ │ └── (1.1, ARRAY[0.95]) + │ │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ │ ├── assignment-cast: DECIMAL(10) [as=a_cast:9] + │ │ │ │ │ │ │ │ │ │ └── column1:7 + │ │ │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(5,1)[] [as=b_cast:10] + │ │ │ │ │ │ │ │ │ └── column2:8 │ │ │ │ │ │ │ │ └── projections - │ │ │ │ │ │ │ │ └── 1.23 [as=c_default:9] + │ │ │ │ │ │ │ │ └── 1.23 [as=c_default:11] │ │ │ │ │ │ │ └── projections - │ │ │ │ │ │ │ ├── crdb_internal.round_decimal_values(column1:7, 0) [as=a:10] - │ │ │ │ │ │ │ ├── crdb_internal.round_decimal_values(column2:8, 1) [as=b:11] - │ │ │ │ │ │ │ └── crdb_internal.round_decimal_values(c_default:9, 1) [as=c_default:12] + │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10,1) [as=c_cast:12] + │ │ │ │ │ │ │ └── c_default:11 │ │ │ │ │ │ └── projections - │ │ │ │ │ │ └── a:10::DECIMAL + c_default:12::DECIMAL [as=d_comp:13] + │ │ │ │ │ │ └── a_cast:9::DECIMAL + c_cast:12::DECIMAL [as=d_comp:13] │ │ │ │ │ └── projections - │ │ │ │ │ └── crdb_internal.round_decimal_values(d_comp:13, 1) [as=d_comp:14] + │ │ │ │ │ └── assignment-cast: DECIMAL(10,1) [as=d_cast:14] + │ │ │ │ │ └── d_comp:13 │ │ │ │ └── aggregations - │ │ │ │ ├── first-agg [as=b:11] - │ │ │ │ │ └── b:11 - │ │ │ │ ├── first-agg [as=c_default:12] - │ │ │ │ │ └── c_default:12 - │ │ │ │ └── first-agg [as=d_comp:14] - │ │ │ │ └── d_comp:14 + │ │ │ │ ├── first-agg [as=b_cast:10] + │ │ │ │ │ └── b_cast:10 + │ │ │ │ ├── first-agg [as=c_cast:12] + │ │ │ │ │ └── c_cast:12 + │ │ │ │ └── first-agg [as=d_cast:14] + │ │ │ │ └── d_cast:14 │ │ │ ├── scan decimals - │ │ │ │ ├── columns: decimals.a:15!null decimals.b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ │ │ ├── columns: a:15!null b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 │ │ │ │ └── computed column expressions │ │ │ │ └── d:18 - │ │ │ │ └── decimals.a:15::DECIMAL + c:17::DECIMAL + │ │ │ │ └── a:15::DECIMAL + c:17::DECIMAL │ │ │ └── filters - │ │ │ └── a:10 = decimals.a:15 + │ │ │ └── a_cast:9 = a:15 │ │ └── projections - │ │ └── decimals.a:15::DECIMAL + c:17::DECIMAL [as=d_comp:21] + │ │ └── a:15::DECIMAL + c:17::DECIMAL [as=d_comp:21] │ └── projections - │ ├── CASE WHEN decimals.a:15 IS NULL THEN a:10 ELSE decimals.a:15 END [as=upsert_a:22] - │ ├── CASE WHEN decimals.a:15 IS NULL THEN c_default:12 ELSE c:17 END [as=upsert_c:23] - │ └── CASE WHEN decimals.a:15 IS NULL THEN d_comp:14 ELSE d:18 END [as=upsert_d:24] + │ ├── CASE WHEN a:15 IS NULL THEN a_cast:9 ELSE a:15 END [as=upsert_a:22] + │ ├── CASE WHEN a:15 IS NULL THEN c_cast:12 ELSE c:17 END [as=upsert_c:23] + │ └── CASE WHEN a:15 IS NULL THEN d_cast:14 ELSE d:18 END [as=upsert_d:24] └── projections ├── round(upsert_a:22) = upsert_a:22 [as=check1:25] - └── b:11[0] > 1 [as=check2:26] + └── b_cast:10[0] > 1 [as=check2:26] # Regular UPSERT case. build @@ -1777,71 +1806,154 @@ UPSERT INTO decimals (a) VALUES (1.1) upsert decimals ├── columns: ├── arbiter indexes: decimals_pkey - ├── canary column: decimals.a:15 - ├── fetch columns: decimals.a:15 b:16 c:17 d:18 + ├── canary column: a:14 + ├── fetch columns: a:14 b:15 c:16 d:17 ├── insert-mapping: - │ ├── a:10 => decimals.a:1 - │ ├── b_default:11 => b:2 - │ ├── c_default:12 => c:3 - │ └── d_comp:14 => d:4 - ├── check columns: check1:26 check2:27 + │ ├── a_cast:8 => a:1 + │ ├── b_default:9 => b:2 + │ ├── c_cast:11 => c:3 + │ └── d_cast:13 => d:4 + ├── check columns: check1:25 check2:26 └── project - ├── columns: check1:26 check2:27 a:10 b_default:11 c_default:12 d_comp:14 decimals.a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 d_comp:21 upsert_a:22 upsert_b:23 upsert_c:24 upsert_d:25 + ├── columns: check1:25 check2:26 a_cast:8!null b_default:9 c_cast:11!null d_cast:13!null a:14 b:15 c:16 d:17 crdb_internal_mvcc_timestamp:18 tableoid:19 d_comp:20 upsert_a:21 upsert_b:22 upsert_c:23 upsert_d:24 ├── project - │ ├── columns: upsert_a:22 upsert_b:23 upsert_c:24 upsert_d:25 a:10 b_default:11 c_default:12 d_comp:14 decimals.a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 d_comp:21 + │ ├── columns: upsert_a:21 upsert_b:22 upsert_c:23 upsert_d:24 a_cast:8!null b_default:9 c_cast:11!null d_cast:13!null a:14 b:15 c:16 d:17 crdb_internal_mvcc_timestamp:18 tableoid:19 d_comp:20 │ ├── project - │ │ ├── columns: d_comp:21 a:10 b_default:11 c_default:12 d_comp:14 decimals.a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ ├── columns: d_comp:20 a_cast:8!null b_default:9 c_cast:11!null d_cast:13!null a:14 b:15 c:16 d:17 crdb_internal_mvcc_timestamp:18 tableoid:19 │ │ ├── left-join (hash) - │ │ │ ├── columns: a:10 b_default:11 c_default:12 d_comp:14 decimals.a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ │ ├── columns: a_cast:8!null b_default:9 c_cast:11!null d_cast:13!null a:14 b:15 c:16 d:17 crdb_internal_mvcc_timestamp:18 tableoid:19 │ │ │ ├── ensure-upsert-distinct-on - │ │ │ │ ├── columns: a:10 b_default:11 c_default:12 d_comp:14 - │ │ │ │ ├── grouping columns: a:10 + │ │ │ │ ├── columns: a_cast:8!null b_default:9 c_cast:11!null d_cast:13!null + │ │ │ │ ├── grouping columns: a_cast:8!null │ │ │ │ ├── project - │ │ │ │ │ ├── columns: d_comp:14 a:10 b_default:11 c_default:12 + │ │ │ │ │ ├── columns: d_cast:13!null a_cast:8!null b_default:9 c_cast:11!null │ │ │ │ │ ├── project - │ │ │ │ │ │ ├── columns: d_comp:13 a:10 b_default:11 c_default:12 + │ │ │ │ │ │ ├── columns: d_comp:12!null a_cast:8!null b_default:9 c_cast:11!null │ │ │ │ │ │ ├── project - │ │ │ │ │ │ │ ├── columns: a:10 b_default:11 c_default:12 + │ │ │ │ │ │ │ ├── columns: c_cast:11!null a_cast:8!null b_default:9 │ │ │ │ │ │ │ ├── project - │ │ │ │ │ │ │ │ ├── columns: b_default:8 c_default:9!null column1:7!null - │ │ │ │ │ │ │ │ ├── values - │ │ │ │ │ │ │ │ │ ├── columns: column1:7!null - │ │ │ │ │ │ │ │ │ └── (1.1,) + │ │ │ │ │ │ │ │ ├── columns: b_default:9 c_default:10!null a_cast:8!null + │ │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ │ ├── columns: a_cast:8!null + │ │ │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ │ │ ├── columns: column1:7!null + │ │ │ │ │ │ │ │ │ │ └── (1.1,) + │ │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10) [as=a_cast:8] + │ │ │ │ │ │ │ │ │ └── column1:7 │ │ │ │ │ │ │ │ └── projections - │ │ │ │ │ │ │ │ ├── NULL::DECIMAL(5,1)[] [as=b_default:8] - │ │ │ │ │ │ │ │ └── 1.23 [as=c_default:9] + │ │ │ │ │ │ │ │ ├── NULL::DECIMAL(5,1)[] [as=b_default:9] + │ │ │ │ │ │ │ │ └── 1.23 [as=c_default:10] │ │ │ │ │ │ │ └── projections - │ │ │ │ │ │ │ ├── crdb_internal.round_decimal_values(column1:7, 0) [as=a:10] - │ │ │ │ │ │ │ ├── crdb_internal.round_decimal_values(b_default:8, 1) [as=b_default:11] - │ │ │ │ │ │ │ └── crdb_internal.round_decimal_values(c_default:9, 1) [as=c_default:12] + │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10,1) [as=c_cast:11] + │ │ │ │ │ │ │ └── c_default:10 │ │ │ │ │ │ └── projections - │ │ │ │ │ │ └── a:10::DECIMAL + c_default:12::DECIMAL [as=d_comp:13] + │ │ │ │ │ │ └── a_cast:8::DECIMAL + c_cast:11::DECIMAL [as=d_comp:12] │ │ │ │ │ └── projections - │ │ │ │ │ └── crdb_internal.round_decimal_values(d_comp:13, 1) [as=d_comp:14] + │ │ │ │ │ └── assignment-cast: DECIMAL(10,1) [as=d_cast:13] + │ │ │ │ │ └── d_comp:12 │ │ │ │ └── aggregations - │ │ │ │ ├── first-agg [as=b_default:11] - │ │ │ │ │ └── b_default:11 - │ │ │ │ ├── first-agg [as=c_default:12] - │ │ │ │ │ └── c_default:12 - │ │ │ │ └── first-agg [as=d_comp:14] - │ │ │ │ └── d_comp:14 + │ │ │ │ ├── first-agg [as=b_default:9] + │ │ │ │ │ └── b_default:9 + │ │ │ │ ├── first-agg [as=c_cast:11] + │ │ │ │ │ └── c_cast:11 + │ │ │ │ └── first-agg [as=d_cast:13] + │ │ │ │ └── d_cast:13 │ │ │ ├── scan decimals - │ │ │ │ ├── columns: decimals.a:15!null b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ │ │ ├── columns: a:14!null b:15 c:16 d:17 crdb_internal_mvcc_timestamp:18 tableoid:19 │ │ │ │ └── computed column expressions - │ │ │ │ └── d:18 - │ │ │ │ └── decimals.a:15::DECIMAL + c:17::DECIMAL + │ │ │ │ └── d:17 + │ │ │ │ └── a:14::DECIMAL + c:16::DECIMAL │ │ │ └── filters - │ │ │ └── a:10 = decimals.a:15 + │ │ │ └── a_cast:8 = a:14 │ │ └── projections - │ │ └── decimals.a:15::DECIMAL + c:17::DECIMAL [as=d_comp:21] + │ │ └── a:14::DECIMAL + c:16::DECIMAL [as=d_comp:20] │ └── projections - │ ├── CASE WHEN decimals.a:15 IS NULL THEN a:10 ELSE decimals.a:15 END [as=upsert_a:22] - │ ├── CASE WHEN decimals.a:15 IS NULL THEN b_default:11 ELSE b:16 END [as=upsert_b:23] - │ ├── CASE WHEN decimals.a:15 IS NULL THEN c_default:12 ELSE c:17 END [as=upsert_c:24] - │ └── CASE WHEN decimals.a:15 IS NULL THEN d_comp:14 ELSE d:18 END [as=upsert_d:25] + │ ├── CASE WHEN a:14 IS NULL THEN a_cast:8 ELSE a:14 END [as=upsert_a:21] + │ ├── CASE WHEN a:14 IS NULL THEN b_default:9 ELSE b:15 END [as=upsert_b:22] + │ ├── CASE WHEN a:14 IS NULL THEN c_cast:11 ELSE c:16 END [as=upsert_c:23] + │ └── CASE WHEN a:14 IS NULL THEN d_cast:13 ELSE d:17 END [as=upsert_d:24] └── projections - ├── round(upsert_a:22) = upsert_a:22 [as=check1:26] - └── upsert_b:23[0] > 1 [as=check2:27] + ├── round(upsert_a:21) = upsert_a:21 [as=check1:25] + └── upsert_b:22[0] > 1 [as=check2:26] + +# Regular UPSERT case as a prepared statement. +assign-placeholders-build query-args=(1.1) +UPSERT INTO decimals (a) VALUES ($1) +---- +upsert decimals + ├── columns: + ├── arbiter indexes: decimals_pkey + ├── canary column: a:14 + ├── fetch columns: a:14 b:15 c:16 d:17 + ├── insert-mapping: + │ ├── a_cast:8 => a:1 + │ ├── b_default:9 => b:2 + │ ├── c_cast:11 => c:3 + │ └── d_cast:13 => d:4 + ├── check columns: check1:25 check2:26 + └── project + ├── columns: check1:25 check2:26 a_cast:8!null b_default:9 c_cast:11!null d_cast:13!null a:14 b:15 c:16 d:17 crdb_internal_mvcc_timestamp:18 tableoid:19 d_comp:20 upsert_a:21 upsert_b:22 upsert_c:23 upsert_d:24 + ├── project + │ ├── columns: upsert_a:21 upsert_b:22 upsert_c:23 upsert_d:24 a_cast:8!null b_default:9 c_cast:11!null d_cast:13!null a:14 b:15 c:16 d:17 crdb_internal_mvcc_timestamp:18 tableoid:19 d_comp:20 + │ ├── project + │ │ ├── columns: d_comp:20 a_cast:8!null b_default:9 c_cast:11!null d_cast:13!null a:14 b:15 c:16 d:17 crdb_internal_mvcc_timestamp:18 tableoid:19 + │ │ ├── left-join (hash) + │ │ │ ├── columns: a_cast:8!null b_default:9 c_cast:11!null d_cast:13!null a:14 b:15 c:16 d:17 crdb_internal_mvcc_timestamp:18 tableoid:19 + │ │ │ ├── ensure-upsert-distinct-on + │ │ │ │ ├── columns: a_cast:8!null b_default:9 c_cast:11!null d_cast:13!null + │ │ │ │ ├── grouping columns: a_cast:8!null + │ │ │ │ ├── project + │ │ │ │ │ ├── columns: d_cast:13!null a_cast:8!null b_default:9 c_cast:11!null + │ │ │ │ │ ├── project + │ │ │ │ │ │ ├── columns: d_comp:12!null a_cast:8!null b_default:9 c_cast:11!null + │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ ├── columns: c_cast:11!null a_cast:8!null b_default:9 + │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ ├── columns: b_default:9 c_default:10!null a_cast:8!null + │ │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ │ ├── columns: a_cast:8!null + │ │ │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ │ │ ├── columns: column1:7!null + │ │ │ │ │ │ │ │ │ │ └── (1.1,) + │ │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10) [as=a_cast:8] + │ │ │ │ │ │ │ │ │ └── column1:7 + │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ ├── NULL::DECIMAL(5,1)[] [as=b_default:9] + │ │ │ │ │ │ │ │ └── 1.23 [as=c_default:10] + │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10,1) [as=c_cast:11] + │ │ │ │ │ │ │ └── c_default:10 + │ │ │ │ │ │ └── projections + │ │ │ │ │ │ └── a_cast:8::DECIMAL + c_cast:11::DECIMAL [as=d_comp:12] + │ │ │ │ │ └── projections + │ │ │ │ │ └── assignment-cast: DECIMAL(10,1) [as=d_cast:13] + │ │ │ │ │ └── d_comp:12 + │ │ │ │ └── aggregations + │ │ │ │ ├── first-agg [as=b_default:9] + │ │ │ │ │ └── b_default:9 + │ │ │ │ ├── first-agg [as=c_cast:11] + │ │ │ │ │ └── c_cast:11 + │ │ │ │ └── first-agg [as=d_cast:13] + │ │ │ │ └── d_cast:13 + │ │ │ ├── scan decimals + │ │ │ │ ├── columns: a:14!null b:15 c:16 d:17 crdb_internal_mvcc_timestamp:18 tableoid:19 + │ │ │ │ └── computed column expressions + │ │ │ │ └── d:17 + │ │ │ │ └── a:14::DECIMAL + c:16::DECIMAL + │ │ │ └── filters + │ │ │ └── a_cast:8 = a:14 + │ │ └── projections + │ │ └── a:14::DECIMAL + c:16::DECIMAL [as=d_comp:20] + │ └── projections + │ ├── CASE WHEN a:14 IS NULL THEN a_cast:8 ELSE a:14 END [as=upsert_a:21] + │ ├── CASE WHEN a:14 IS NULL THEN b_default:9 ELSE b:15 END [as=upsert_b:22] + │ ├── CASE WHEN a:14 IS NULL THEN c_cast:11 ELSE c:16 END [as=upsert_c:23] + │ └── CASE WHEN a:14 IS NULL THEN d_cast:13 ELSE d:17 END [as=upsert_d:24] + └── projections + ├── round(upsert_a:21) = upsert_a:21 [as=check1:25] + └── upsert_b:22[0] > 1 [as=check2:26] # INSERT...ON CONFLICT case. build @@ -1852,81 +1964,1116 @@ DO UPDATE SET b=ARRAY[0.99] upsert decimals ├── columns: ├── arbiter indexes: decimals_pkey - ├── canary column: decimals.a:15 - ├── fetch columns: decimals.a:15 decimals.b:16 c:17 d:18 + ├── canary column: a:15 + ├── fetch columns: a:15 b:16 c:17 d:18 ├── insert-mapping: - │ ├── a:10 => decimals.a:1 - │ ├── b:11 => decimals.b:2 - │ ├── c_default:12 => c:3 - │ └── d_comp:14 => d:4 + │ ├── a_cast:9 => a:1 + │ ├── b_cast:10 => b:2 + │ ├── c_cast:12 => c:3 + │ └── d_cast:14 => d:4 ├── update-mapping: - │ └── upsert_b:25 => decimals.b:2 + │ └── upsert_b:25 => b:2 ├── check columns: check1:28 check2:29 └── project - ├── columns: check1:28 check2:29 a:10 b:11 c_default:12 d_comp:14 decimals.a:15 decimals.b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 b_new:22 d_comp:23 upsert_a:24 upsert_b:25 upsert_c:26 upsert_d:27 + ├── columns: check1:28 check2:29 a_cast:9!null b_cast:10 c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 b_cast:22!null d_comp:23 upsert_a:24 upsert_b:25 upsert_c:26 upsert_d:27 ├── project - │ ├── columns: upsert_a:24 upsert_b:25 upsert_c:26 upsert_d:27 a:10 b:11 c_default:12 d_comp:14 decimals.a:15 decimals.b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 b_new:22 d_comp:23 + │ ├── columns: upsert_a:24 upsert_b:25 upsert_c:26 upsert_d:27 a_cast:9!null b_cast:10 c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 b_cast:22!null d_comp:23 │ ├── project - │ │ ├── columns: d_comp:23 a:10 b:11 c_default:12 d_comp:14 decimals.a:15 decimals.b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 b_new:22 + │ │ ├── columns: d_comp:23 a_cast:9!null b_cast:10 c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 b_cast:22!null │ │ ├── project - │ │ │ ├── columns: b_new:22 a:10 b:11 c_default:12 d_comp:14 decimals.a:15 decimals.b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ │ ├── columns: b_cast:22!null a_cast:9!null b_cast:10 c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 │ │ │ ├── project - │ │ │ │ ├── columns: b_new:21!null a:10 b:11 c_default:12 d_comp:14 decimals.a:15 decimals.b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ │ │ ├── columns: b_new:21!null a_cast:9!null b_cast:10 c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 │ │ │ │ ├── left-join (hash) - │ │ │ │ │ ├── columns: a:10 b:11 c_default:12 d_comp:14 decimals.a:15 decimals.b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ │ │ │ ├── columns: a_cast:9!null b_cast:10 c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 │ │ │ │ │ ├── ensure-upsert-distinct-on - │ │ │ │ │ │ ├── columns: a:10 b:11 c_default:12 d_comp:14 - │ │ │ │ │ │ ├── grouping columns: a:10 + │ │ │ │ │ │ ├── columns: a_cast:9!null b_cast:10 c_cast:12!null d_cast:14!null + │ │ │ │ │ │ ├── grouping columns: a_cast:9!null │ │ │ │ │ │ ├── project - │ │ │ │ │ │ │ ├── columns: d_comp:14 a:10 b:11 c_default:12 + │ │ │ │ │ │ │ ├── columns: d_cast:14!null a_cast:9!null b_cast:10 c_cast:12!null │ │ │ │ │ │ │ ├── project - │ │ │ │ │ │ │ │ ├── columns: d_comp:13 a:10 b:11 c_default:12 + │ │ │ │ │ │ │ │ ├── columns: d_comp:13!null a_cast:9!null b_cast:10 c_cast:12!null │ │ │ │ │ │ │ │ ├── project - │ │ │ │ │ │ │ │ │ ├── columns: a:10 b:11 c_default:12 + │ │ │ │ │ │ │ │ │ ├── columns: c_cast:12!null a_cast:9!null b_cast:10 │ │ │ │ │ │ │ │ │ ├── project - │ │ │ │ │ │ │ │ │ │ ├── columns: c_default:9!null column1:7!null column2:8 - │ │ │ │ │ │ │ │ │ │ ├── values - │ │ │ │ │ │ │ │ │ │ │ ├── columns: column1:7!null column2:8 - │ │ │ │ │ │ │ │ │ │ │ └── (1.1, ARRAY[0.95]) + │ │ │ │ │ │ │ │ │ │ ├── columns: c_default:11!null a_cast:9!null b_cast:10 + │ │ │ │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ │ │ │ ├── columns: a_cast:9!null b_cast:10 + │ │ │ │ │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ │ │ │ │ ├── columns: column1:7!null column2:8 + │ │ │ │ │ │ │ │ │ │ │ │ └── (1.1, ARRAY[0.95]) + │ │ │ │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ │ │ │ ├── assignment-cast: DECIMAL(10) [as=a_cast:9] + │ │ │ │ │ │ │ │ │ │ │ │ └── column1:7 + │ │ │ │ │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(5,1)[] [as=b_cast:10] + │ │ │ │ │ │ │ │ │ │ │ └── column2:8 │ │ │ │ │ │ │ │ │ │ └── projections - │ │ │ │ │ │ │ │ │ │ └── 1.23 [as=c_default:9] + │ │ │ │ │ │ │ │ │ │ └── 1.23 [as=c_default:11] │ │ │ │ │ │ │ │ │ └── projections - │ │ │ │ │ │ │ │ │ ├── crdb_internal.round_decimal_values(column1:7, 0) [as=a:10] - │ │ │ │ │ │ │ │ │ ├── crdb_internal.round_decimal_values(column2:8, 1) [as=b:11] - │ │ │ │ │ │ │ │ │ └── crdb_internal.round_decimal_values(c_default:9, 1) [as=c_default:12] + │ │ │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10,1) [as=c_cast:12] + │ │ │ │ │ │ │ │ │ └── c_default:11 │ │ │ │ │ │ │ │ └── projections - │ │ │ │ │ │ │ │ └── a:10::DECIMAL + c_default:12::DECIMAL [as=d_comp:13] + │ │ │ │ │ │ │ │ └── a_cast:9::DECIMAL + c_cast:12::DECIMAL [as=d_comp:13] │ │ │ │ │ │ │ └── projections - │ │ │ │ │ │ │ └── crdb_internal.round_decimal_values(d_comp:13, 1) [as=d_comp:14] + │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10,1) [as=d_cast:14] + │ │ │ │ │ │ │ └── d_comp:13 │ │ │ │ │ │ └── aggregations - │ │ │ │ │ │ ├── first-agg [as=b:11] - │ │ │ │ │ │ │ └── b:11 - │ │ │ │ │ │ ├── first-agg [as=c_default:12] - │ │ │ │ │ │ │ └── c_default:12 - │ │ │ │ │ │ └── first-agg [as=d_comp:14] - │ │ │ │ │ │ └── d_comp:14 + │ │ │ │ │ │ ├── first-agg [as=b_cast:10] + │ │ │ │ │ │ │ └── b_cast:10 + │ │ │ │ │ │ ├── first-agg [as=c_cast:12] + │ │ │ │ │ │ │ └── c_cast:12 + │ │ │ │ │ │ └── first-agg [as=d_cast:14] + │ │ │ │ │ │ └── d_cast:14 │ │ │ │ │ ├── scan decimals - │ │ │ │ │ │ ├── columns: decimals.a:15!null decimals.b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ │ │ │ │ ├── columns: a:15!null b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 │ │ │ │ │ │ └── computed column expressions │ │ │ │ │ │ └── d:18 - │ │ │ │ │ │ └── decimals.a:15::DECIMAL + c:17::DECIMAL + │ │ │ │ │ │ └── a:15::DECIMAL + c:17::DECIMAL │ │ │ │ │ └── filters - │ │ │ │ │ └── a:10 = decimals.a:15 + │ │ │ │ │ └── a_cast:9 = a:15 │ │ │ │ └── projections │ │ │ │ └── ARRAY[0.99] [as=b_new:21] │ │ │ └── projections - │ │ │ └── crdb_internal.round_decimal_values(b_new:21, 1) [as=b_new:22] + │ │ │ └── assignment-cast: DECIMAL(5,1)[] [as=b_cast:22] + │ │ │ └── b_new:21 │ │ └── projections - │ │ └── decimals.a:15::DECIMAL + c:17::DECIMAL [as=d_comp:23] + │ │ └── a:15::DECIMAL + c:17::DECIMAL [as=d_comp:23] │ └── projections - │ ├── CASE WHEN decimals.a:15 IS NULL THEN a:10 ELSE decimals.a:15 END [as=upsert_a:24] - │ ├── CASE WHEN decimals.a:15 IS NULL THEN b:11 ELSE b_new:22 END [as=upsert_b:25] - │ ├── CASE WHEN decimals.a:15 IS NULL THEN c_default:12 ELSE c:17 END [as=upsert_c:26] - │ └── CASE WHEN decimals.a:15 IS NULL THEN d_comp:14 ELSE d:18 END [as=upsert_d:27] + │ ├── CASE WHEN a:15 IS NULL THEN a_cast:9 ELSE a:15 END [as=upsert_a:24] + │ ├── CASE WHEN a:15 IS NULL THEN b_cast:10 ELSE b_cast:22 END [as=upsert_b:25] + │ ├── CASE WHEN a:15 IS NULL THEN c_cast:12 ELSE c:17 END [as=upsert_c:26] + │ └── CASE WHEN a:15 IS NULL THEN d_cast:14 ELSE d:18 END [as=upsert_d:27] └── projections ├── round(upsert_a:24) = upsert_a:24 [as=check1:28] └── upsert_b:25[0] > 1 [as=check2:29] +# INSERT...ON CONFLICT case as a prepared statement. +assign-placeholders-build query-args=(1.1, ARRAY[0.95], ARRAY[0.99]) +INSERT INTO decimals (a, b) VALUES ($1, $2) +ON CONFLICT (a) +DO UPDATE SET b=$3 +---- +upsert decimals + ├── columns: + ├── arbiter indexes: decimals_pkey + ├── canary column: a:15 + ├── fetch columns: a:15 b:16 c:17 d:18 + ├── insert-mapping: + │ ├── a_cast:9 => a:1 + │ ├── b_cast:10 => b:2 + │ ├── c_cast:12 => c:3 + │ └── d_cast:14 => d:4 + ├── update-mapping: + │ └── upsert_b:25 => b:2 + ├── check columns: check1:28 check2:29 + └── project + ├── columns: check1:28 check2:29 a_cast:9!null b_cast:10!null c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 b_cast:22!null d_comp:23 upsert_a:24 upsert_b:25!null upsert_c:26 upsert_d:27 + ├── project + │ ├── columns: upsert_a:24 upsert_b:25!null upsert_c:26 upsert_d:27 a_cast:9!null b_cast:10!null c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 b_cast:22!null d_comp:23 + │ ├── project + │ │ ├── columns: d_comp:23 a_cast:9!null b_cast:10!null c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 b_cast:22!null + │ │ ├── project + │ │ │ ├── columns: b_cast:22!null a_cast:9!null b_cast:10!null c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ │ ├── project + │ │ │ │ ├── columns: b_new:21!null a_cast:9!null b_cast:10!null c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ │ │ ├── left-join (hash) + │ │ │ │ │ ├── columns: a_cast:9!null b_cast:10!null c_cast:12!null d_cast:14!null a:15 b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ │ │ │ ├── ensure-upsert-distinct-on + │ │ │ │ │ │ ├── columns: a_cast:9!null b_cast:10!null c_cast:12!null d_cast:14!null + │ │ │ │ │ │ ├── grouping columns: a_cast:9!null + │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ ├── columns: d_cast:14!null a_cast:9!null b_cast:10!null c_cast:12!null + │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ ├── columns: d_comp:13!null a_cast:9!null b_cast:10!null c_cast:12!null + │ │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ │ ├── columns: c_cast:12!null a_cast:9!null b_cast:10!null + │ │ │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ │ │ ├── columns: c_default:11!null a_cast:9!null b_cast:10!null + │ │ │ │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ │ │ │ ├── columns: a_cast:9!null b_cast:10!null + │ │ │ │ │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ │ │ │ │ ├── columns: column1:7!null column2:8!null + │ │ │ │ │ │ │ │ │ │ │ │ └── (1.1, ARRAY[0.95]) + │ │ │ │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ │ │ │ ├── assignment-cast: DECIMAL(10) [as=a_cast:9] + │ │ │ │ │ │ │ │ │ │ │ │ └── column1:7 + │ │ │ │ │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(5,1)[] [as=b_cast:10] + │ │ │ │ │ │ │ │ │ │ │ └── column2:8 + │ │ │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ │ │ └── 1.23 [as=c_default:11] + │ │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10,1) [as=c_cast:12] + │ │ │ │ │ │ │ │ │ └── c_default:11 + │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ └── a_cast:9::DECIMAL + c_cast:12::DECIMAL [as=d_comp:13] + │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10,1) [as=d_cast:14] + │ │ │ │ │ │ │ └── d_comp:13 + │ │ │ │ │ │ └── aggregations + │ │ │ │ │ │ ├── first-agg [as=b_cast:10] + │ │ │ │ │ │ │ └── b_cast:10 + │ │ │ │ │ │ ├── first-agg [as=c_cast:12] + │ │ │ │ │ │ │ └── c_cast:12 + │ │ │ │ │ │ └── first-agg [as=d_cast:14] + │ │ │ │ │ │ └── d_cast:14 + │ │ │ │ │ ├── scan decimals + │ │ │ │ │ │ ├── columns: a:15!null b:16 c:17 d:18 crdb_internal_mvcc_timestamp:19 tableoid:20 + │ │ │ │ │ │ └── computed column expressions + │ │ │ │ │ │ └── d:18 + │ │ │ │ │ │ └── a:15::DECIMAL + c:17::DECIMAL + │ │ │ │ │ └── filters + │ │ │ │ │ └── a_cast:9 = a:15 + │ │ │ │ └── projections + │ │ │ │ └── ARRAY[0.99] [as=b_new:21] + │ │ │ └── projections + │ │ │ └── assignment-cast: DECIMAL(5,1)[] [as=b_cast:22] + │ │ │ └── b_new:21 + │ │ └── projections + │ │ └── a:15::DECIMAL + c:17::DECIMAL [as=d_comp:23] + │ └── projections + │ ├── CASE WHEN a:15 IS NULL THEN a_cast:9 ELSE a:15 END [as=upsert_a:24] + │ ├── CASE WHEN a:15 IS NULL THEN b_cast:10 ELSE b_cast:22 END [as=upsert_b:25] + │ ├── CASE WHEN a:15 IS NULL THEN c_cast:12 ELSE c:17 END [as=upsert_c:26] + │ └── CASE WHEN a:15 IS NULL THEN d_cast:14 ELSE d:18 END [as=upsert_d:27] + └── projections + ├── round(upsert_a:24) = upsert_a:24 [as=check1:28] + └── upsert_b:25[0] > 1 [as=check2:29] + +# Test standard upsert with some types that require assignment casts. +build +UPSERT INTO assn_cast (k, c, qc, i, s) VALUES (1.0::DECIMAL, ' ', 'foo', '1', 2) +---- +upsert assn_cast + ├── columns: + ├── arbiter indexes: assn_cast_pkey + ├── canary column: k:22 + ├── fetch columns: k:22 c:23 qc:24 i:25 s:26 d:27 d_comp:28 + ├── insert-mapping: + │ ├── k_cast:15 => k:1 + │ ├── c_cast:16 => c:2 + │ ├── qc_cast:17 => qc:3 + │ ├── column4:13 => i:4 + │ ├── s_cast:18 => s:5 + │ ├── d_default:19 => d:6 + │ └── d_comp_cast:21 => d_comp:7 + ├── update-mapping: + │ ├── c_cast:16 => c:2 + │ ├── qc_cast:17 => qc:3 + │ ├── column4:13 => i:4 + │ └── s_cast:18 => s:5 + └── project + ├── columns: upsert_k:32 upsert_d:33 upsert_d_comp:34 column4:13!null k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null d_default:19 d_comp_cast:21 k:22 c:23 qc:24 i:25 s:26 d:27 d_comp:28 crdb_internal_mvcc_timestamp:29 tableoid:30 d_comp_comp:31 + ├── project + │ ├── columns: d_comp_comp:31 column4:13!null k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null d_default:19 d_comp_cast:21 k:22 c:23 qc:24 i:25 s:26 d:27 d_comp:28 crdb_internal_mvcc_timestamp:29 tableoid:30 + │ ├── left-join (hash) + │ │ ├── columns: column4:13!null k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null d_default:19 d_comp_cast:21 k:22 c:23 qc:24 i:25 s:26 d:27 d_comp:28 crdb_internal_mvcc_timestamp:29 tableoid:30 + │ │ ├── ensure-upsert-distinct-on + │ │ │ ├── columns: column4:13!null k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null d_default:19 d_comp_cast:21 + │ │ │ ├── grouping columns: k_cast:15!null + │ │ │ ├── project + │ │ │ │ ├── columns: d_comp_cast:21 column4:13!null k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null d_default:19 + │ │ │ │ ├── project + │ │ │ │ │ ├── columns: d_comp_comp:20 column4:13!null k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null d_default:19 + │ │ │ │ │ ├── project + │ │ │ │ │ │ ├── columns: d_default:19 column4:13!null k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null + │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ ├── columns: k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null column4:13!null + │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ ├── columns: column1:10!null column2:11!null column3:12!null column4:13!null column5:14!null + │ │ │ │ │ │ │ │ └── (1.0, ' ', 'foo', 1, 2) + │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ ├── assignment-cast: INT8 [as=k_cast:15] + │ │ │ │ │ │ │ │ └── column1:10 + │ │ │ │ │ │ │ ├── assignment-cast: CHAR [as=c_cast:16] + │ │ │ │ │ │ │ │ └── column2:11 + │ │ │ │ │ │ │ ├── assignment-cast: "char" [as=qc_cast:17] + │ │ │ │ │ │ │ │ └── column3:12 + │ │ │ │ │ │ │ └── assignment-cast: STRING [as=s_cast:18] + │ │ │ │ │ │ │ └── column5:14 + │ │ │ │ │ │ └── projections + │ │ │ │ │ │ └── NULL::DECIMAL(10) [as=d_default:19] + │ │ │ │ │ └── projections + │ │ │ │ │ └── d_default:19::DECIMAL + 10.0 [as=d_comp_comp:20] + │ │ │ │ └── projections + │ │ │ │ └── assignment-cast: DECIMAL(10) [as=d_comp_cast:21] + │ │ │ │ └── d_comp_comp:20 + │ │ │ └── aggregations + │ │ │ ├── first-agg [as=c_cast:16] + │ │ │ │ └── c_cast:16 + │ │ │ ├── first-agg [as=qc_cast:17] + │ │ │ │ └── qc_cast:17 + │ │ │ ├── first-agg [as=column4:13] + │ │ │ │ └── column4:13 + │ │ │ ├── first-agg [as=s_cast:18] + │ │ │ │ └── s_cast:18 + │ │ │ ├── first-agg [as=d_default:19] + │ │ │ │ └── d_default:19 + │ │ │ └── first-agg [as=d_comp_cast:21] + │ │ │ └── d_comp_cast:21 + │ │ ├── scan assn_cast + │ │ │ ├── columns: k:22!null c:23 qc:24 i:25 s:26 d:27 d_comp:28 crdb_internal_mvcc_timestamp:29 tableoid:30 + │ │ │ └── computed column expressions + │ │ │ └── d_comp:28 + │ │ │ └── d:27::DECIMAL + 10.0 + │ │ └── filters + │ │ └── k_cast:15 = k:22 + │ └── projections + │ └── d:27::DECIMAL + 10.0 [as=d_comp_comp:31] + └── projections + ├── CASE WHEN k:22 IS NULL THEN k_cast:15 ELSE k:22 END [as=upsert_k:32] + ├── CASE WHEN k:22 IS NULL THEN d_default:19 ELSE d:27 END [as=upsert_d:33] + └── CASE WHEN k:22 IS NULL THEN d_comp_cast:21 ELSE d_comp:28 END [as=upsert_d_comp:34] + +# Test standard prepared upsert with some types that require assignment casts. +assign-placeholders-build query-args=(1.0, ' ', 'foo', '1') +UPSERT INTO assn_cast (k, c, qc, i) VALUES ($1::DECIMAL, $2, $3, $4) +---- +upsert assn_cast + ├── columns: + ├── arbiter indexes: assn_cast_pkey + ├── canary column: k:21 + ├── fetch columns: k:21 c:22 qc:23 i:24 s:25 d:26 d_comp:27 + ├── insert-mapping: + │ ├── k_cast:14 => k:1 + │ ├── c_cast:15 => c:2 + │ ├── qc_cast:16 => qc:3 + │ ├── column4:13 => i:4 + │ ├── s_default:17 => s:5 + │ ├── d_default:18 => d:6 + │ └── d_comp_cast:20 => d_comp:7 + ├── update-mapping: + │ ├── c_cast:15 => c:2 + │ ├── qc_cast:16 => qc:3 + │ └── column4:13 => i:4 + └── project + ├── columns: upsert_k:31 upsert_s:32 upsert_d:33 upsert_d_comp:34 column4:13!null k_cast:14!null c_cast:15!null qc_cast:16!null s_default:17 d_default:18 d_comp_cast:20 k:21 c:22 qc:23 i:24 s:25 d:26 d_comp:27 crdb_internal_mvcc_timestamp:28 tableoid:29 d_comp_comp:30 + ├── project + │ ├── columns: d_comp_comp:30 column4:13!null k_cast:14!null c_cast:15!null qc_cast:16!null s_default:17 d_default:18 d_comp_cast:20 k:21 c:22 qc:23 i:24 s:25 d:26 d_comp:27 crdb_internal_mvcc_timestamp:28 tableoid:29 + │ ├── left-join (hash) + │ │ ├── columns: column4:13!null k_cast:14!null c_cast:15!null qc_cast:16!null s_default:17 d_default:18 d_comp_cast:20 k:21 c:22 qc:23 i:24 s:25 d:26 d_comp:27 crdb_internal_mvcc_timestamp:28 tableoid:29 + │ │ ├── ensure-upsert-distinct-on + │ │ │ ├── columns: column4:13!null k_cast:14!null c_cast:15!null qc_cast:16!null s_default:17 d_default:18 d_comp_cast:20 + │ │ │ ├── grouping columns: k_cast:14!null + │ │ │ ├── project + │ │ │ │ ├── columns: d_comp_cast:20 column4:13!null k_cast:14!null c_cast:15!null qc_cast:16!null s_default:17 d_default:18 + │ │ │ │ ├── project + │ │ │ │ │ ├── columns: d_comp_comp:19 column4:13!null k_cast:14!null c_cast:15!null qc_cast:16!null s_default:17 d_default:18 + │ │ │ │ │ ├── project + │ │ │ │ │ │ ├── columns: s_default:17 d_default:18 column4:13!null k_cast:14!null c_cast:15!null qc_cast:16!null + │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ ├── columns: k_cast:14!null c_cast:15!null qc_cast:16!null column4:13!null + │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ ├── columns: column1:10!null column2:11!null column3:12!null column4:13!null + │ │ │ │ │ │ │ │ └── (1.0, ' ', 'foo', 1) + │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ ├── assignment-cast: INT8 [as=k_cast:14] + │ │ │ │ │ │ │ │ └── column1:10 + │ │ │ │ │ │ │ ├── assignment-cast: CHAR [as=c_cast:15] + │ │ │ │ │ │ │ │ └── column2:11 + │ │ │ │ │ │ │ └── assignment-cast: "char" [as=qc_cast:16] + │ │ │ │ │ │ │ └── column3:12 + │ │ │ │ │ │ └── projections + │ │ │ │ │ │ ├── NULL::STRING [as=s_default:17] + │ │ │ │ │ │ └── NULL::DECIMAL(10) [as=d_default:18] + │ │ │ │ │ └── projections + │ │ │ │ │ └── d_default:18::DECIMAL + 10.0 [as=d_comp_comp:19] + │ │ │ │ └── projections + │ │ │ │ └── assignment-cast: DECIMAL(10) [as=d_comp_cast:20] + │ │ │ │ └── d_comp_comp:19 + │ │ │ └── aggregations + │ │ │ ├── first-agg [as=c_cast:15] + │ │ │ │ └── c_cast:15 + │ │ │ ├── first-agg [as=qc_cast:16] + │ │ │ │ └── qc_cast:16 + │ │ │ ├── first-agg [as=column4:13] + │ │ │ │ └── column4:13 + │ │ │ ├── first-agg [as=s_default:17] + │ │ │ │ └── s_default:17 + │ │ │ ├── first-agg [as=d_default:18] + │ │ │ │ └── d_default:18 + │ │ │ └── first-agg [as=d_comp_cast:20] + │ │ │ └── d_comp_cast:20 + │ │ ├── scan assn_cast + │ │ │ ├── columns: k:21!null c:22 qc:23 i:24 s:25 d:26 d_comp:27 crdb_internal_mvcc_timestamp:28 tableoid:29 + │ │ │ └── computed column expressions + │ │ │ └── d_comp:27 + │ │ │ └── d:26::DECIMAL + 10.0 + │ │ └── filters + │ │ └── k_cast:14 = k:21 + │ └── projections + │ └── d:26::DECIMAL + 10.0 [as=d_comp_comp:30] + └── projections + ├── CASE WHEN k:21 IS NULL THEN k_cast:14 ELSE k:21 END [as=upsert_k:31] + ├── CASE WHEN k:21 IS NULL THEN s_default:17 ELSE s:25 END [as=upsert_s:32] + ├── CASE WHEN k:21 IS NULL THEN d_default:18 ELSE d:26 END [as=upsert_d:33] + └── CASE WHEN k:21 IS NULL THEN d_comp_cast:20 ELSE d_comp:27 END [as=upsert_d_comp:34] + +# Test standard insert-do-nothing with some types that require assignment casts. +build +INSERT INTO assn_cast (k, c, qc, i, s) VALUES (1.0::DECIMAL, ' ', 'foo', '1', 2) ON CONFLICT DO NOTHING +---- +insert assn_cast + ├── columns: + ├── arbiter indexes: assn_cast_pkey + ├── insert-mapping: + │ ├── k_cast:15 => k:1 + │ ├── c_cast:16 => c:2 + │ ├── qc_cast:17 => qc:3 + │ ├── column4:13 => i:4 + │ ├── s_cast:18 => s:5 + │ ├── d_default:19 => d:6 + │ └── d_comp_cast:21 => d_comp:7 + └── upsert-distinct-on + ├── columns: column4:13!null k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null d_default:19 d_comp_cast:21 + ├── grouping columns: k_cast:15!null + ├── anti-join (hash) + │ ├── columns: column4:13!null k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null d_default:19 d_comp_cast:21 + │ ├── project + │ │ ├── columns: d_comp_cast:21 column4:13!null k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null d_default:19 + │ │ ├── project + │ │ │ ├── columns: d_comp_comp:20 column4:13!null k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null d_default:19 + │ │ │ ├── project + │ │ │ │ ├── columns: d_default:19 column4:13!null k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null + │ │ │ │ ├── project + │ │ │ │ │ ├── columns: k_cast:15!null c_cast:16!null qc_cast:17!null s_cast:18!null column4:13!null + │ │ │ │ │ ├── values + │ │ │ │ │ │ ├── columns: column1:10!null column2:11!null column3:12!null column4:13!null column5:14!null + │ │ │ │ │ │ └── (1.0, ' ', 'foo', 1, 2) + │ │ │ │ │ └── projections + │ │ │ │ │ ├── assignment-cast: INT8 [as=k_cast:15] + │ │ │ │ │ │ └── column1:10 + │ │ │ │ │ ├── assignment-cast: CHAR [as=c_cast:16] + │ │ │ │ │ │ └── column2:11 + │ │ │ │ │ ├── assignment-cast: "char" [as=qc_cast:17] + │ │ │ │ │ │ └── column3:12 + │ │ │ │ │ └── assignment-cast: STRING [as=s_cast:18] + │ │ │ │ │ └── column5:14 + │ │ │ │ └── projections + │ │ │ │ └── NULL::DECIMAL(10) [as=d_default:19] + │ │ │ └── projections + │ │ │ └── d_default:19::DECIMAL + 10.0 [as=d_comp_comp:20] + │ │ └── projections + │ │ └── assignment-cast: DECIMAL(10) [as=d_comp_cast:21] + │ │ └── d_comp_comp:20 + │ ├── scan assn_cast + │ │ ├── columns: k:22!null c:23 qc:24 i:25 s:26 d:27 d_comp:28 + │ │ └── computed column expressions + │ │ └── d_comp:28 + │ │ └── d:27::DECIMAL + 10.0 + │ └── filters + │ └── k_cast:15 = k:22 + └── aggregations + ├── first-agg [as=c_cast:16] + │ └── c_cast:16 + ├── first-agg [as=qc_cast:17] + │ └── qc_cast:17 + ├── first-agg [as=column4:13] + │ └── column4:13 + ├── first-agg [as=s_cast:18] + │ └── s_cast:18 + ├── first-agg [as=d_default:19] + │ └── d_default:19 + └── first-agg [as=d_comp_cast:21] + └── d_comp_cast:21 + +# Test standard insert-do-update with some types that require assignment casts. +build +INSERT INTO assn_cast (k, c, qc, i, s) VALUES (1.0::DECIMAL, 'a', 'b', 1, 'c') +ON CONFLICT (k) DO UPDATE SET c = ' ', qc = 'foo', i = '1', s = 2 +---- +upsert assn_cast + ├── columns: + ├── arbiter indexes: assn_cast_pkey + ├── canary column: k:21 + ├── fetch columns: k:21 c:22 qc:23 i:24 s:25 d:26 d_comp:27 + ├── insert-mapping: + │ ├── k_cast:15 => k:1 + │ ├── c_cast:16 => c:2 + │ ├── qc_cast:17 => qc:3 + │ ├── column4:13 => i:4 + │ ├── column5:14 => s:5 + │ ├── d_default:18 => d:6 + │ └── d_comp_cast:20 => d_comp:7 + ├── update-mapping: + │ ├── upsert_c:39 => c:2 + │ ├── upsert_qc:40 => qc:3 + │ ├── upsert_i:41 => i:4 + │ └── upsert_s:42 => s:5 + └── project + ├── columns: upsert_k:38 upsert_c:39!null upsert_qc:40!null upsert_i:41!null upsert_s:42!null upsert_d:43 upsert_d_comp:44 column4:13!null column5:14!null k_cast:15!null c_cast:16!null qc_cast:17!null d_default:18 d_comp_cast:20 k:21 c:22 qc:23 i:24 s:25 d:26 d_comp:27 crdb_internal_mvcc_timestamp:28 tableoid:29 i_new:32!null c_cast:34!null qc_cast:35!null s_cast:36!null d_comp_comp:37 + ├── project + │ ├── columns: d_comp_comp:37 column4:13!null column5:14!null k_cast:15!null c_cast:16!null qc_cast:17!null d_default:18 d_comp_cast:20 k:21 c:22 qc:23 i:24 s:25 d:26 d_comp:27 crdb_internal_mvcc_timestamp:28 tableoid:29 i_new:32!null c_cast:34!null qc_cast:35!null s_cast:36!null + │ ├── project + │ │ ├── columns: c_cast:34!null qc_cast:35!null s_cast:36!null column4:13!null column5:14!null k_cast:15!null c_cast:16!null qc_cast:17!null d_default:18 d_comp_cast:20 k:21 c:22 qc:23 i:24 s:25 d:26 d_comp:27 crdb_internal_mvcc_timestamp:28 tableoid:29 i_new:32!null + │ │ ├── project + │ │ │ ├── columns: c_new:30!null qc_new:31!null i_new:32!null s_new:33!null column4:13!null column5:14!null k_cast:15!null c_cast:16!null qc_cast:17!null d_default:18 d_comp_cast:20 k:21 c:22 qc:23 i:24 s:25 d:26 d_comp:27 crdb_internal_mvcc_timestamp:28 tableoid:29 + │ │ │ ├── left-join (hash) + │ │ │ │ ├── columns: column4:13!null column5:14!null k_cast:15!null c_cast:16!null qc_cast:17!null d_default:18 d_comp_cast:20 k:21 c:22 qc:23 i:24 s:25 d:26 d_comp:27 crdb_internal_mvcc_timestamp:28 tableoid:29 + │ │ │ │ ├── ensure-upsert-distinct-on + │ │ │ │ │ ├── columns: column4:13!null column5:14!null k_cast:15!null c_cast:16!null qc_cast:17!null d_default:18 d_comp_cast:20 + │ │ │ │ │ ├── grouping columns: k_cast:15!null + │ │ │ │ │ ├── project + │ │ │ │ │ │ ├── columns: d_comp_cast:20 column4:13!null column5:14!null k_cast:15!null c_cast:16!null qc_cast:17!null d_default:18 + │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ ├── columns: d_comp_comp:19 column4:13!null column5:14!null k_cast:15!null c_cast:16!null qc_cast:17!null d_default:18 + │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ ├── columns: d_default:18 column4:13!null column5:14!null k_cast:15!null c_cast:16!null qc_cast:17!null + │ │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ │ ├── columns: k_cast:15!null c_cast:16!null qc_cast:17!null column4:13!null column5:14!null + │ │ │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ │ │ ├── columns: column1:10!null column2:11!null column3:12!null column4:13!null column5:14!null + │ │ │ │ │ │ │ │ │ │ └── (1.0, 'a', 'b', 1, 'c') + │ │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ │ ├── assignment-cast: INT8 [as=k_cast:15] + │ │ │ │ │ │ │ │ │ │ └── column1:10 + │ │ │ │ │ │ │ │ │ ├── assignment-cast: CHAR [as=c_cast:16] + │ │ │ │ │ │ │ │ │ │ └── column2:11 + │ │ │ │ │ │ │ │ │ └── assignment-cast: "char" [as=qc_cast:17] + │ │ │ │ │ │ │ │ │ └── column3:12 + │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ └── NULL::DECIMAL(10) [as=d_default:18] + │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ └── d_default:18::DECIMAL + 10.0 [as=d_comp_comp:19] + │ │ │ │ │ │ └── projections + │ │ │ │ │ │ └── assignment-cast: DECIMAL(10) [as=d_comp_cast:20] + │ │ │ │ │ │ └── d_comp_comp:19 + │ │ │ │ │ └── aggregations + │ │ │ │ │ ├── first-agg [as=c_cast:16] + │ │ │ │ │ │ └── c_cast:16 + │ │ │ │ │ ├── first-agg [as=qc_cast:17] + │ │ │ │ │ │ └── qc_cast:17 + │ │ │ │ │ ├── first-agg [as=column4:13] + │ │ │ │ │ │ └── column4:13 + │ │ │ │ │ ├── first-agg [as=column5:14] + │ │ │ │ │ │ └── column5:14 + │ │ │ │ │ ├── first-agg [as=d_default:18] + │ │ │ │ │ │ └── d_default:18 + │ │ │ │ │ └── first-agg [as=d_comp_cast:20] + │ │ │ │ │ └── d_comp_cast:20 + │ │ │ │ ├── scan assn_cast + │ │ │ │ │ ├── columns: k:21!null c:22 qc:23 i:24 s:25 d:26 d_comp:27 crdb_internal_mvcc_timestamp:28 tableoid:29 + │ │ │ │ │ └── computed column expressions + │ │ │ │ │ └── d_comp:27 + │ │ │ │ │ └── d:26::DECIMAL + 10.0 + │ │ │ │ └── filters + │ │ │ │ └── k_cast:15 = k:21 + │ │ │ └── projections + │ │ │ ├── ' ' [as=c_new:30] + │ │ │ ├── 'foo' [as=qc_new:31] + │ │ │ ├── 1 [as=i_new:32] + │ │ │ └── 2 [as=s_new:33] + │ │ └── projections + │ │ ├── assignment-cast: CHAR [as=c_cast:34] + │ │ │ └── c_new:30 + │ │ ├── assignment-cast: "char" [as=qc_cast:35] + │ │ │ └── qc_new:31 + │ │ └── assignment-cast: STRING [as=s_cast:36] + │ │ └── s_new:33 + │ └── projections + │ └── d:26::DECIMAL + 10.0 [as=d_comp_comp:37] + └── projections + ├── CASE WHEN k:21 IS NULL THEN k_cast:15 ELSE k:21 END [as=upsert_k:38] + ├── CASE WHEN k:21 IS NULL THEN c_cast:16 ELSE c_cast:34 END [as=upsert_c:39] + ├── CASE WHEN k:21 IS NULL THEN qc_cast:17 ELSE qc_cast:35 END [as=upsert_qc:40] + ├── CASE WHEN k:21 IS NULL THEN column4:13 ELSE i_new:32 END [as=upsert_i:41] + ├── CASE WHEN k:21 IS NULL THEN column5:14 ELSE s_cast:36 END [as=upsert_s:42] + ├── CASE WHEN k:21 IS NULL THEN d_default:18 ELSE d:26 END [as=upsert_d:43] + └── CASE WHEN k:21 IS NULL THEN d_comp_cast:20 ELSE d_comp:27 END [as=upsert_d_comp:44] + +# Test upsert to DEFAULT that requires an assignment cast. +build +UPSERT INTO assn_cast (k, i) VALUES (1, DEFAULT) +---- +upsert assn_cast + ├── columns: + ├── arbiter indexes: assn_cast_pkey + ├── canary column: k:19 + ├── fetch columns: k:19 c:20 qc:21 i:22 s:23 d:24 d_comp:25 + ├── insert-mapping: + │ ├── column1:10 => k:1 + │ ├── c_default:13 => c:2 + │ ├── qc_default:14 => qc:3 + │ ├── i_cast:12 => i:4 + │ ├── s_default:15 => s:5 + │ ├── d_default:16 => d:6 + │ └── d_comp_cast:18 => d_comp:7 + ├── update-mapping: + │ └── i_cast:12 => i:4 + └── project + ├── columns: upsert_k:29 upsert_c:30 upsert_qc:31 upsert_s:32 upsert_d:33 upsert_d_comp:34 column1:10!null i_cast:12 c_default:13 qc_default:14 s_default:15 d_default:16 d_comp_cast:18 k:19 c:20 qc:21 i:22 s:23 d:24 d_comp:25 crdb_internal_mvcc_timestamp:26 tableoid:27 d_comp_comp:28 + ├── project + │ ├── columns: d_comp_comp:28 column1:10!null i_cast:12 c_default:13 qc_default:14 s_default:15 d_default:16 d_comp_cast:18 k:19 c:20 qc:21 i:22 s:23 d:24 d_comp:25 crdb_internal_mvcc_timestamp:26 tableoid:27 + │ ├── left-join (hash) + │ │ ├── columns: column1:10!null i_cast:12 c_default:13 qc_default:14 s_default:15 d_default:16 d_comp_cast:18 k:19 c:20 qc:21 i:22 s:23 d:24 d_comp:25 crdb_internal_mvcc_timestamp:26 tableoid:27 + │ │ ├── ensure-upsert-distinct-on + │ │ │ ├── columns: column1:10!null i_cast:12 c_default:13 qc_default:14 s_default:15 d_default:16 d_comp_cast:18 + │ │ │ ├── grouping columns: column1:10!null + │ │ │ ├── project + │ │ │ │ ├── columns: d_comp_cast:18 column1:10!null i_cast:12 c_default:13 qc_default:14 s_default:15 d_default:16 + │ │ │ │ ├── project + │ │ │ │ │ ├── columns: d_comp_comp:17 column1:10!null i_cast:12 c_default:13 qc_default:14 s_default:15 d_default:16 + │ │ │ │ │ ├── project + │ │ │ │ │ │ ├── columns: c_default:13 qc_default:14 s_default:15 d_default:16 column1:10!null i_cast:12 + │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ ├── columns: i_cast:12 column1:10!null + │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ ├── columns: column1:10!null column2:11 + │ │ │ │ │ │ │ │ └── (1, 10::INT2) + │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ └── assignment-cast: INT8 [as=i_cast:12] + │ │ │ │ │ │ │ └── column2:11 + │ │ │ │ │ │ └── projections + │ │ │ │ │ │ ├── NULL::CHAR [as=c_default:13] + │ │ │ │ │ │ ├── NULL::"char" [as=qc_default:14] + │ │ │ │ │ │ ├── NULL::STRING [as=s_default:15] + │ │ │ │ │ │ └── NULL::DECIMAL(10) [as=d_default:16] + │ │ │ │ │ └── projections + │ │ │ │ │ └── d_default:16::DECIMAL + 10.0 [as=d_comp_comp:17] + │ │ │ │ └── projections + │ │ │ │ └── assignment-cast: DECIMAL(10) [as=d_comp_cast:18] + │ │ │ │ └── d_comp_comp:17 + │ │ │ └── aggregations + │ │ │ ├── first-agg [as=i_cast:12] + │ │ │ │ └── i_cast:12 + │ │ │ ├── first-agg [as=c_default:13] + │ │ │ │ └── c_default:13 + │ │ │ ├── first-agg [as=qc_default:14] + │ │ │ │ └── qc_default:14 + │ │ │ ├── first-agg [as=s_default:15] + │ │ │ │ └── s_default:15 + │ │ │ ├── first-agg [as=d_default:16] + │ │ │ │ └── d_default:16 + │ │ │ └── first-agg [as=d_comp_cast:18] + │ │ │ └── d_comp_cast:18 + │ │ ├── scan assn_cast + │ │ │ ├── columns: k:19!null c:20 qc:21 i:22 s:23 d:24 d_comp:25 crdb_internal_mvcc_timestamp:26 tableoid:27 + │ │ │ └── computed column expressions + │ │ │ └── d_comp:25 + │ │ │ └── d:24::DECIMAL + 10.0 + │ │ └── filters + │ │ └── column1:10 = k:19 + │ └── projections + │ └── d:24::DECIMAL + 10.0 [as=d_comp_comp:28] + └── projections + ├── CASE WHEN k:19 IS NULL THEN column1:10 ELSE k:19 END [as=upsert_k:29] + ├── CASE WHEN k:19 IS NULL THEN c_default:13 ELSE c:20 END [as=upsert_c:30] + ├── CASE WHEN k:19 IS NULL THEN qc_default:14 ELSE qc:21 END [as=upsert_qc:31] + ├── CASE WHEN k:19 IS NULL THEN s_default:15 ELSE s:23 END [as=upsert_s:32] + ├── CASE WHEN k:19 IS NULL THEN d_default:16 ELSE d:24 END [as=upsert_d:33] + └── CASE WHEN k:19 IS NULL THEN d_comp_cast:18 ELSE d_comp:25 END [as=upsert_d_comp:34] + +# Test insert-do-update to DEFAULT that requires an assignment cast. +build +INSERT INTO assn_cast (k, i) VALUES (1, 2) ON CONFLICT (k) DO UPDATE SET i = DEFAULT +---- +upsert assn_cast + ├── columns: + ├── arbiter indexes: assn_cast_pkey + ├── canary column: k:18 + ├── fetch columns: k:18 c:19 qc:20 i:21 s:22 d:23 d_comp:24 + ├── insert-mapping: + │ ├── column1:10 => k:1 + │ ├── c_default:12 => c:2 + │ ├── qc_default:13 => qc:3 + │ ├── column2:11 => i:4 + │ ├── s_default:14 => s:5 + │ ├── d_default:15 => d:6 + │ └── d_comp_cast:17 => d_comp:7 + ├── update-mapping: + │ └── upsert_i:33 => i:4 + └── project + ├── columns: upsert_k:30 upsert_c:31 upsert_qc:32 upsert_i:33!null upsert_s:34 upsert_d:35 upsert_d_comp:36 column1:10!null column2:11!null c_default:12 qc_default:13 s_default:14 d_default:15 d_comp_cast:17 k:18 c:19 qc:20 i:21 s:22 d:23 d_comp:24 crdb_internal_mvcc_timestamp:25 tableoid:26 i_cast:28!null d_comp_comp:29 + ├── project + │ ├── columns: d_comp_comp:29 column1:10!null column2:11!null c_default:12 qc_default:13 s_default:14 d_default:15 d_comp_cast:17 k:18 c:19 qc:20 i:21 s:22 d:23 d_comp:24 crdb_internal_mvcc_timestamp:25 tableoid:26 i_cast:28!null + │ ├── project + │ │ ├── columns: i_cast:28!null column1:10!null column2:11!null c_default:12 qc_default:13 s_default:14 d_default:15 d_comp_cast:17 k:18 c:19 qc:20 i:21 s:22 d:23 d_comp:24 crdb_internal_mvcc_timestamp:25 tableoid:26 + │ │ ├── project + │ │ │ ├── columns: i_new:27!null column1:10!null column2:11!null c_default:12 qc_default:13 s_default:14 d_default:15 d_comp_cast:17 k:18 c:19 qc:20 i:21 s:22 d:23 d_comp:24 crdb_internal_mvcc_timestamp:25 tableoid:26 + │ │ │ ├── left-join (hash) + │ │ │ │ ├── columns: column1:10!null column2:11!null c_default:12 qc_default:13 s_default:14 d_default:15 d_comp_cast:17 k:18 c:19 qc:20 i:21 s:22 d:23 d_comp:24 crdb_internal_mvcc_timestamp:25 tableoid:26 + │ │ │ │ ├── ensure-upsert-distinct-on + │ │ │ │ │ ├── columns: column1:10!null column2:11!null c_default:12 qc_default:13 s_default:14 d_default:15 d_comp_cast:17 + │ │ │ │ │ ├── grouping columns: column1:10!null + │ │ │ │ │ ├── project + │ │ │ │ │ │ ├── columns: d_comp_cast:17 column1:10!null column2:11!null c_default:12 qc_default:13 s_default:14 d_default:15 + │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ ├── columns: d_comp_comp:16 column1:10!null column2:11!null c_default:12 qc_default:13 s_default:14 d_default:15 + │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ ├── columns: c_default:12 qc_default:13 s_default:14 d_default:15 column1:10!null column2:11!null + │ │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ │ ├── columns: column1:10!null column2:11!null + │ │ │ │ │ │ │ │ │ └── (1, 2) + │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ ├── NULL::CHAR [as=c_default:12] + │ │ │ │ │ │ │ │ ├── NULL::"char" [as=qc_default:13] + │ │ │ │ │ │ │ │ ├── NULL::STRING [as=s_default:14] + │ │ │ │ │ │ │ │ └── NULL::DECIMAL(10) [as=d_default:15] + │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ └── d_default:15::DECIMAL + 10.0 [as=d_comp_comp:16] + │ │ │ │ │ │ └── projections + │ │ │ │ │ │ └── assignment-cast: DECIMAL(10) [as=d_comp_cast:17] + │ │ │ │ │ │ └── d_comp_comp:16 + │ │ │ │ │ └── aggregations + │ │ │ │ │ ├── first-agg [as=column2:11] + │ │ │ │ │ │ └── column2:11 + │ │ │ │ │ ├── first-agg [as=c_default:12] + │ │ │ │ │ │ └── c_default:12 + │ │ │ │ │ ├── first-agg [as=qc_default:13] + │ │ │ │ │ │ └── qc_default:13 + │ │ │ │ │ ├── first-agg [as=s_default:14] + │ │ │ │ │ │ └── s_default:14 + │ │ │ │ │ ├── first-agg [as=d_default:15] + │ │ │ │ │ │ └── d_default:15 + │ │ │ │ │ └── first-agg [as=d_comp_cast:17] + │ │ │ │ │ └── d_comp_cast:17 + │ │ │ │ ├── scan assn_cast + │ │ │ │ │ ├── columns: k:18!null c:19 qc:20 i:21 s:22 d:23 d_comp:24 crdb_internal_mvcc_timestamp:25 tableoid:26 + │ │ │ │ │ └── computed column expressions + │ │ │ │ │ └── d_comp:24 + │ │ │ │ │ └── d:23::DECIMAL + 10.0 + │ │ │ │ └── filters + │ │ │ │ └── column1:10 = k:18 + │ │ │ └── projections + │ │ │ └── 10::INT2 [as=i_new:27] + │ │ └── projections + │ │ └── assignment-cast: INT8 [as=i_cast:28] + │ │ └── i_new:27 + │ └── projections + │ └── d:23::DECIMAL + 10.0 [as=d_comp_comp:29] + └── projections + ├── CASE WHEN k:18 IS NULL THEN column1:10 ELSE k:18 END [as=upsert_k:30] + ├── CASE WHEN k:18 IS NULL THEN c_default:12 ELSE c:19 END [as=upsert_c:31] + ├── CASE WHEN k:18 IS NULL THEN qc_default:13 ELSE qc:20 END [as=upsert_qc:32] + ├── CASE WHEN k:18 IS NULL THEN column2:11 ELSE i_cast:28 END [as=upsert_i:33] + ├── CASE WHEN k:18 IS NULL THEN s_default:14 ELSE s:22 END [as=upsert_s:34] + ├── CASE WHEN k:18 IS NULL THEN d_default:15 ELSE d:23 END [as=upsert_d:35] + └── CASE WHEN k:18 IS NULL THEN d_comp_cast:17 ELSE d_comp:24 END [as=upsert_d_comp:36] + +# Test upsert to a column that requires an assignment cast and a computed column +# that depends on the new value. +build +UPSERT INTO assn_cast (k, d) VALUES (1, 1.45::DECIMAL(10, 2)) +---- +upsert assn_cast + ├── columns: + ├── arbiter indexes: assn_cast_pkey + ├── canary column: k:20 + ├── fetch columns: k:20 c:21 qc:22 i:23 s:24 d:25 d_comp:26 + ├── insert-mapping: + │ ├── column1:10 => k:1 + │ ├── c_default:13 => c:2 + │ ├── qc_default:14 => qc:3 + │ ├── i_cast:17 => i:4 + │ ├── s_default:16 => s:5 + │ ├── d_cast:12 => d:6 + │ └── d_comp_cast:19 => d_comp:7 + ├── update-mapping: + │ ├── d_cast:12 => d:6 + │ └── d_comp_cast:19 => d_comp:7 + └── project + ├── columns: upsert_k:29 upsert_c:30 upsert_qc:31 upsert_i:32 upsert_s:33 column1:10!null d_cast:12 c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19 k:20 c:21 qc:22 i:23 s:24 d:25 d_comp:26 crdb_internal_mvcc_timestamp:27 tableoid:28 + ├── left-join (hash) + │ ├── columns: column1:10!null d_cast:12 c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19 k:20 c:21 qc:22 i:23 s:24 d:25 d_comp:26 crdb_internal_mvcc_timestamp:27 tableoid:28 + │ ├── ensure-upsert-distinct-on + │ │ ├── columns: column1:10!null d_cast:12 c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19 + │ │ ├── grouping columns: column1:10!null + │ │ ├── project + │ │ │ ├── columns: d_comp_cast:19 column1:10!null d_cast:12 c_default:13 qc_default:14 s_default:16 i_cast:17!null + │ │ │ ├── project + │ │ │ │ ├── columns: d_comp_comp:18 column1:10!null d_cast:12 c_default:13 qc_default:14 s_default:16 i_cast:17!null + │ │ │ │ ├── project + │ │ │ │ │ ├── columns: i_cast:17!null column1:10!null d_cast:12 c_default:13 qc_default:14 s_default:16 + │ │ │ │ │ ├── project + │ │ │ │ │ │ ├── columns: c_default:13 qc_default:14 i_default:15!null s_default:16 column1:10!null d_cast:12 + │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ ├── columns: d_cast:12 column1:10!null + │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ ├── columns: column1:10!null column2:11 + │ │ │ │ │ │ │ │ └── (1, 1.45::DECIMAL(10,2)) + │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10) [as=d_cast:12] + │ │ │ │ │ │ │ └── column2:11 + │ │ │ │ │ │ └── projections + │ │ │ │ │ │ ├── NULL::CHAR [as=c_default:13] + │ │ │ │ │ │ ├── NULL::"char" [as=qc_default:14] + │ │ │ │ │ │ ├── 10::INT2 [as=i_default:15] + │ │ │ │ │ │ └── NULL::STRING [as=s_default:16] + │ │ │ │ │ └── projections + │ │ │ │ │ └── assignment-cast: INT8 [as=i_cast:17] + │ │ │ │ │ └── i_default:15 + │ │ │ │ └── projections + │ │ │ │ └── d_cast:12::DECIMAL + 10.0 [as=d_comp_comp:18] + │ │ │ └── projections + │ │ │ └── assignment-cast: DECIMAL(10) [as=d_comp_cast:19] + │ │ │ └── d_comp_comp:18 + │ │ └── aggregations + │ │ ├── first-agg [as=d_cast:12] + │ │ │ └── d_cast:12 + │ │ ├── first-agg [as=c_default:13] + │ │ │ └── c_default:13 + │ │ ├── first-agg [as=qc_default:14] + │ │ │ └── qc_default:14 + │ │ ├── first-agg [as=i_cast:17] + │ │ │ └── i_cast:17 + │ │ ├── first-agg [as=s_default:16] + │ │ │ └── s_default:16 + │ │ └── first-agg [as=d_comp_cast:19] + │ │ └── d_comp_cast:19 + │ ├── scan assn_cast + │ │ ├── columns: k:20!null c:21 qc:22 i:23 s:24 d:25 d_comp:26 crdb_internal_mvcc_timestamp:27 tableoid:28 + │ │ └── computed column expressions + │ │ └── d_comp:26 + │ │ └── d:25::DECIMAL + 10.0 + │ └── filters + │ └── column1:10 = k:20 + └── projections + ├── CASE WHEN k:20 IS NULL THEN column1:10 ELSE k:20 END [as=upsert_k:29] + ├── CASE WHEN k:20 IS NULL THEN c_default:13 ELSE c:21 END [as=upsert_c:30] + ├── CASE WHEN k:20 IS NULL THEN qc_default:14 ELSE qc:22 END [as=upsert_qc:31] + ├── CASE WHEN k:20 IS NULL THEN i_cast:17 ELSE i:23 END [as=upsert_i:32] + └── CASE WHEN k:20 IS NULL THEN s_default:16 ELSE s:24 END [as=upsert_s:33] + +# Test prepared upsert to a column that requires an assignment cast and a +# computed column that depends on the new value. +assign-placeholders-build query-args=(1.45::DECIMAL(10, 2)) +UPSERT INTO assn_cast (k, d) VALUES (1, $1) +---- +upsert assn_cast + ├── columns: + ├── arbiter indexes: assn_cast_pkey + ├── canary column: k:20 + ├── fetch columns: k:20 c:21 qc:22 i:23 s:24 d:25 d_comp:26 + ├── insert-mapping: + │ ├── column1:10 => k:1 + │ ├── c_default:13 => c:2 + │ ├── qc_default:14 => qc:3 + │ ├── i_cast:17 => i:4 + │ ├── s_default:16 => s:5 + │ ├── d_cast:12 => d:6 + │ └── d_comp_cast:19 => d_comp:7 + ├── update-mapping: + │ ├── d_cast:12 => d:6 + │ └── d_comp_cast:19 => d_comp:7 + └── project + ├── columns: upsert_k:29 upsert_c:30 upsert_qc:31 upsert_i:32 upsert_s:33 column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19!null k:20 c:21 qc:22 i:23 s:24 d:25 d_comp:26 crdb_internal_mvcc_timestamp:27 tableoid:28 + ├── left-join (hash) + │ ├── columns: column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19!null k:20 c:21 qc:22 i:23 s:24 d:25 d_comp:26 crdb_internal_mvcc_timestamp:27 tableoid:28 + │ ├── ensure-upsert-distinct-on + │ │ ├── columns: column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19!null + │ │ ├── grouping columns: column1:10!null + │ │ ├── project + │ │ │ ├── columns: d_comp_cast:19!null column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null + │ │ │ ├── project + │ │ │ │ ├── columns: d_comp_comp:18!null column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null + │ │ │ │ ├── project + │ │ │ │ │ ├── columns: i_cast:17!null column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 + │ │ │ │ │ ├── project + │ │ │ │ │ │ ├── columns: c_default:13 qc_default:14 i_default:15!null s_default:16 column1:10!null d_cast:12!null + │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ ├── columns: d_cast:12!null column1:10!null + │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ ├── columns: column1:10!null column2:11!null + │ │ │ │ │ │ │ │ └── (1, 1.45) + │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10) [as=d_cast:12] + │ │ │ │ │ │ │ └── column2:11 + │ │ │ │ │ │ └── projections + │ │ │ │ │ │ ├── NULL::CHAR [as=c_default:13] + │ │ │ │ │ │ ├── NULL::"char" [as=qc_default:14] + │ │ │ │ │ │ ├── 10::INT2 [as=i_default:15] + │ │ │ │ │ │ └── NULL::STRING [as=s_default:16] + │ │ │ │ │ └── projections + │ │ │ │ │ └── assignment-cast: INT8 [as=i_cast:17] + │ │ │ │ │ └── i_default:15 + │ │ │ │ └── projections + │ │ │ │ └── d_cast:12::DECIMAL + 10.0 [as=d_comp_comp:18] + │ │ │ └── projections + │ │ │ └── assignment-cast: DECIMAL(10) [as=d_comp_cast:19] + │ │ │ └── d_comp_comp:18 + │ │ └── aggregations + │ │ ├── first-agg [as=d_cast:12] + │ │ │ └── d_cast:12 + │ │ ├── first-agg [as=c_default:13] + │ │ │ └── c_default:13 + │ │ ├── first-agg [as=qc_default:14] + │ │ │ └── qc_default:14 + │ │ ├── first-agg [as=i_cast:17] + │ │ │ └── i_cast:17 + │ │ ├── first-agg [as=s_default:16] + │ │ │ └── s_default:16 + │ │ └── first-agg [as=d_comp_cast:19] + │ │ └── d_comp_cast:19 + │ ├── scan assn_cast + │ │ ├── columns: k:20!null c:21 qc:22 i:23 s:24 d:25 d_comp:26 crdb_internal_mvcc_timestamp:27 tableoid:28 + │ │ └── computed column expressions + │ │ └── d_comp:26 + │ │ └── d:25::DECIMAL + 10.0 + │ └── filters + │ └── column1:10 = k:20 + └── projections + ├── CASE WHEN k:20 IS NULL THEN column1:10 ELSE k:20 END [as=upsert_k:29] + ├── CASE WHEN k:20 IS NULL THEN c_default:13 ELSE c:21 END [as=upsert_c:30] + ├── CASE WHEN k:20 IS NULL THEN qc_default:14 ELSE qc:22 END [as=upsert_qc:31] + ├── CASE WHEN k:20 IS NULL THEN i_cast:17 ELSE i:23 END [as=upsert_i:32] + └── CASE WHEN k:20 IS NULL THEN s_default:16 ELSE s:24 END [as=upsert_s:33] + +# Test insert-do-nothing to a column that requires an assignment cast and a +# computed column that depends on the new value. +build +INSERT INTO assn_cast (k, d) VALUES (1, 1.45::DECIMAL(10, 2)) ON CONFLICT DO NOTHING +---- +insert assn_cast + ├── columns: + ├── arbiter indexes: assn_cast_pkey + ├── insert-mapping: + │ ├── column1:10 => k:1 + │ ├── c_default:13 => c:2 + │ ├── qc_default:14 => qc:3 + │ ├── i_cast:17 => i:4 + │ ├── s_default:16 => s:5 + │ ├── d_cast:12 => d:6 + │ └── d_comp_cast:19 => d_comp:7 + └── upsert-distinct-on + ├── columns: column1:10!null d_cast:12 c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19 + ├── grouping columns: column1:10!null + ├── anti-join (hash) + │ ├── columns: column1:10!null d_cast:12 c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19 + │ ├── project + │ │ ├── columns: d_comp_cast:19 column1:10!null d_cast:12 c_default:13 qc_default:14 s_default:16 i_cast:17!null + │ │ ├── project + │ │ │ ├── columns: d_comp_comp:18 column1:10!null d_cast:12 c_default:13 qc_default:14 s_default:16 i_cast:17!null + │ │ │ ├── project + │ │ │ │ ├── columns: i_cast:17!null column1:10!null d_cast:12 c_default:13 qc_default:14 s_default:16 + │ │ │ │ ├── project + │ │ │ │ │ ├── columns: c_default:13 qc_default:14 i_default:15!null s_default:16 column1:10!null d_cast:12 + │ │ │ │ │ ├── project + │ │ │ │ │ │ ├── columns: d_cast:12 column1:10!null + │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ ├── columns: column1:10!null column2:11 + │ │ │ │ │ │ │ └── (1, 1.45::DECIMAL(10,2)) + │ │ │ │ │ │ └── projections + │ │ │ │ │ │ └── assignment-cast: DECIMAL(10) [as=d_cast:12] + │ │ │ │ │ │ └── column2:11 + │ │ │ │ │ └── projections + │ │ │ │ │ ├── NULL::CHAR [as=c_default:13] + │ │ │ │ │ ├── NULL::"char" [as=qc_default:14] + │ │ │ │ │ ├── 10::INT2 [as=i_default:15] + │ │ │ │ │ └── NULL::STRING [as=s_default:16] + │ │ │ │ └── projections + │ │ │ │ └── assignment-cast: INT8 [as=i_cast:17] + │ │ │ │ └── i_default:15 + │ │ │ └── projections + │ │ │ └── d_cast:12::DECIMAL + 10.0 [as=d_comp_comp:18] + │ │ └── projections + │ │ └── assignment-cast: DECIMAL(10) [as=d_comp_cast:19] + │ │ └── d_comp_comp:18 + │ ├── scan assn_cast + │ │ ├── columns: k:20!null c:21 qc:22 i:23 s:24 d:25 d_comp:26 + │ │ └── computed column expressions + │ │ └── d_comp:26 + │ │ └── d:25::DECIMAL + 10.0 + │ └── filters + │ └── column1:10 = k:20 + └── aggregations + ├── first-agg [as=d_cast:12] + │ └── d_cast:12 + ├── first-agg [as=c_default:13] + │ └── c_default:13 + ├── first-agg [as=qc_default:14] + │ └── qc_default:14 + ├── first-agg [as=i_cast:17] + │ └── i_cast:17 + ├── first-agg [as=s_default:16] + │ └── s_default:16 + └── first-agg [as=d_comp_cast:19] + └── d_comp_cast:19 + +# Test insert-do-update to a column that requires an assignment cast and a +# computed column that depends on the new value. +build +INSERT INTO assn_cast (k, d) VALUES (1, 1.45) ON CONFLICT (k) DO UPDATE SET d = 2.67::DECIMAL(10, 2) +---- +upsert assn_cast + ├── columns: + ├── arbiter indexes: assn_cast_pkey + ├── canary column: k:20 + ├── fetch columns: k:20 c:21 qc:22 i:23 s:24 d:25 d_comp:26 + ├── insert-mapping: + │ ├── column1:10 => k:1 + │ ├── c_default:13 => c:2 + │ ├── qc_default:14 => qc:3 + │ ├── i_cast:17 => i:4 + │ ├── s_default:16 => s:5 + │ ├── d_cast:12 => d:6 + │ └── d_comp_cast:19 => d_comp:7 + ├── update-mapping: + │ ├── upsert_d:38 => d:6 + │ └── upsert_d_comp:39 => d_comp:7 + └── project + ├── columns: upsert_k:33 upsert_c:34 upsert_qc:35 upsert_i:36 upsert_s:37 upsert_d:38!null upsert_d_comp:39!null column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19!null k:20 c:21 qc:22 i:23 s:24 d:25 d_comp:26 crdb_internal_mvcc_timestamp:27 tableoid:28 d_cast:30!null d_comp_cast:32!null + ├── project + │ ├── columns: d_comp_cast:32!null column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19!null k:20 c:21 qc:22 i:23 s:24 d:25 d_comp:26 crdb_internal_mvcc_timestamp:27 tableoid:28 d_cast:30!null + │ ├── project + │ │ ├── columns: d_comp_comp:31!null column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19!null k:20 c:21 qc:22 i:23 s:24 d:25 d_comp:26 crdb_internal_mvcc_timestamp:27 tableoid:28 d_cast:30!null + │ │ ├── project + │ │ │ ├── columns: d_cast:30!null column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19!null k:20 c:21 qc:22 i:23 s:24 d:25 d_comp:26 crdb_internal_mvcc_timestamp:27 tableoid:28 + │ │ │ ├── project + │ │ │ │ ├── columns: d_new:29!null column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19!null k:20 c:21 qc:22 i:23 s:24 d:25 d_comp:26 crdb_internal_mvcc_timestamp:27 tableoid:28 + │ │ │ │ ├── left-join (hash) + │ │ │ │ │ ├── columns: column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19!null k:20 c:21 qc:22 i:23 s:24 d:25 d_comp:26 crdb_internal_mvcc_timestamp:27 tableoid:28 + │ │ │ │ │ ├── ensure-upsert-distinct-on + │ │ │ │ │ │ ├── columns: column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null d_comp_cast:19!null + │ │ │ │ │ │ ├── grouping columns: column1:10!null + │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ ├── columns: d_comp_cast:19!null column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null + │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ ├── columns: d_comp_comp:18!null column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 i_cast:17!null + │ │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ │ ├── columns: i_cast:17!null column1:10!null d_cast:12!null c_default:13 qc_default:14 s_default:16 + │ │ │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ │ │ ├── columns: c_default:13 qc_default:14 i_default:15!null s_default:16 column1:10!null d_cast:12!null + │ │ │ │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ │ │ │ ├── columns: d_cast:12!null column1:10!null + │ │ │ │ │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ │ │ │ │ ├── columns: column1:10!null column2:11!null + │ │ │ │ │ │ │ │ │ │ │ │ └── (1, 1.45) + │ │ │ │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10) [as=d_cast:12] + │ │ │ │ │ │ │ │ │ │ │ └── column2:11 + │ │ │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ │ │ ├── NULL::CHAR [as=c_default:13] + │ │ │ │ │ │ │ │ │ │ ├── NULL::"char" [as=qc_default:14] + │ │ │ │ │ │ │ │ │ │ ├── 10::INT2 [as=i_default:15] + │ │ │ │ │ │ │ │ │ │ └── NULL::STRING [as=s_default:16] + │ │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ │ └── assignment-cast: INT8 [as=i_cast:17] + │ │ │ │ │ │ │ │ │ └── i_default:15 + │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ └── d_cast:12::DECIMAL + 10.0 [as=d_comp_comp:18] + │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10) [as=d_comp_cast:19] + │ │ │ │ │ │ │ └── d_comp_comp:18 + │ │ │ │ │ │ └── aggregations + │ │ │ │ │ │ ├── first-agg [as=d_cast:12] + │ │ │ │ │ │ │ └── d_cast:12 + │ │ │ │ │ │ ├── first-agg [as=c_default:13] + │ │ │ │ │ │ │ └── c_default:13 + │ │ │ │ │ │ ├── first-agg [as=qc_default:14] + │ │ │ │ │ │ │ └── qc_default:14 + │ │ │ │ │ │ ├── first-agg [as=i_cast:17] + │ │ │ │ │ │ │ └── i_cast:17 + │ │ │ │ │ │ ├── first-agg [as=s_default:16] + │ │ │ │ │ │ │ └── s_default:16 + │ │ │ │ │ │ └── first-agg [as=d_comp_cast:19] + │ │ │ │ │ │ └── d_comp_cast:19 + │ │ │ │ │ ├── scan assn_cast + │ │ │ │ │ │ ├── columns: k:20!null c:21 qc:22 i:23 s:24 d:25 d_comp:26 crdb_internal_mvcc_timestamp:27 tableoid:28 + │ │ │ │ │ │ └── computed column expressions + │ │ │ │ │ │ └── d_comp:26 + │ │ │ │ │ │ └── d:25::DECIMAL + 10.0 + │ │ │ │ │ └── filters + │ │ │ │ │ └── column1:10 = k:20 + │ │ │ │ └── projections + │ │ │ │ └── 2.67::DECIMAL(10,2) [as=d_new:29] + │ │ │ └── projections + │ │ │ └── assignment-cast: DECIMAL(10) [as=d_cast:30] + │ │ │ └── d_new:29 + │ │ └── projections + │ │ └── d_cast:30::DECIMAL + 10.0 [as=d_comp_comp:31] + │ └── projections + │ └── assignment-cast: DECIMAL(10) [as=d_comp_cast:32] + │ └── d_comp_comp:31 + └── projections + ├── CASE WHEN k:20 IS NULL THEN column1:10 ELSE k:20 END [as=upsert_k:33] + ├── CASE WHEN k:20 IS NULL THEN c_default:13 ELSE c:21 END [as=upsert_c:34] + ├── CASE WHEN k:20 IS NULL THEN qc_default:14 ELSE qc:22 END [as=upsert_qc:35] + ├── CASE WHEN k:20 IS NULL THEN i_cast:17 ELSE i:23 END [as=upsert_i:36] + ├── CASE WHEN k:20 IS NULL THEN s_default:16 ELSE s:24 END [as=upsert_s:37] + ├── CASE WHEN k:20 IS NULL THEN d_cast:12 ELSE d_cast:30 END [as=upsert_d:38] + └── CASE WHEN k:20 IS NULL THEN d_comp_cast:19 ELSE d_comp_cast:32 END [as=upsert_d_comp:39] + +# Test ON UPDATE columns that require assignment casts. +build +INSERT INTO assn_cast_on_update (k, i) VALUES (1, 2) ON CONFLICT (k) DO UPDATE SET i = 3 +---- +upsert assn_cast_on_update + ├── columns: + ├── arbiter indexes: assn_cast_on_update_pkey + ├── canary column: k:12 + ├── fetch columns: k:12 i:13 d:14 d2:15 d_comp:16 + ├── insert-mapping: + │ ├── column1:8 => k:1 + │ ├── column2:9 => i:2 + │ ├── d_default:10 => d:3 + │ ├── d_default:10 => d2:4 + │ └── d_comp_cast:11 => d_comp:5 + ├── update-mapping: + │ ├── upsert_i:26 => i:2 + │ ├── upsert_d:27 => d:3 + │ ├── upsert_d2:28 => d2:4 + │ └── upsert_d_comp:29 => d_comp:5 + └── project + ├── columns: upsert_k:25 upsert_i:26!null upsert_d:27 upsert_d2:28 upsert_d_comp:29 column1:8!null column2:9!null d_default:10 d_comp_cast:11 k:12 i:13 d:14 d2:15 d_comp:16 crdb_internal_mvcc_timestamp:17 tableoid:18 i_new:19!null d_cast:22!null d2_cast:23!null d_comp_cast:24!null + ├── project + │ ├── columns: d_comp_cast:24!null column1:8!null column2:9!null d_default:10 d_comp_cast:11 k:12 i:13 d:14 d2:15 d_comp:16 crdb_internal_mvcc_timestamp:17 tableoid:18 i_new:19!null d_cast:22!null d2_cast:23!null + │ ├── project + │ │ ├── columns: d_cast:22!null d2_cast:23!null column1:8!null column2:9!null d_default:10 d_comp_cast:11 k:12 i:13 d:14 d2:15 d_comp:16 crdb_internal_mvcc_timestamp:17 tableoid:18 i_new:19!null + │ │ ├── project + │ │ │ ├── columns: d_on_update:20!null d2_on_update:21!null column1:8!null column2:9!null d_default:10 d_comp_cast:11 k:12 i:13 d:14 d2:15 d_comp:16 crdb_internal_mvcc_timestamp:17 tableoid:18 i_new:19!null + │ │ │ ├── project + │ │ │ │ ├── columns: i_new:19!null column1:8!null column2:9!null d_default:10 d_comp_cast:11 k:12 i:13 d:14 d2:15 d_comp:16 crdb_internal_mvcc_timestamp:17 tableoid:18 + │ │ │ │ ├── left-join (hash) + │ │ │ │ │ ├── columns: column1:8!null column2:9!null d_default:10 d_comp_cast:11 k:12 i:13 d:14 d2:15 d_comp:16 crdb_internal_mvcc_timestamp:17 tableoid:18 + │ │ │ │ │ ├── ensure-upsert-distinct-on + │ │ │ │ │ │ ├── columns: column1:8!null column2:9!null d_default:10 d_comp_cast:11 + │ │ │ │ │ │ ├── grouping columns: column1:8!null + │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ ├── columns: d_comp_cast:11 column1:8!null column2:9!null d_default:10 + │ │ │ │ │ │ │ ├── project + │ │ │ │ │ │ │ │ ├── columns: d_default:10 column1:8!null column2:9!null + │ │ │ │ │ │ │ │ ├── values + │ │ │ │ │ │ │ │ │ ├── columns: column1:8!null column2:9!null + │ │ │ │ │ │ │ │ │ └── (1, 2) + │ │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ │ └── NULL::DECIMAL(10,1) [as=d_default:10] + │ │ │ │ │ │ │ └── projections + │ │ │ │ │ │ │ └── assignment-cast: DECIMAL(10) [as=d_comp_cast:11] + │ │ │ │ │ │ │ └── d_default:10 + │ │ │ │ │ │ └── aggregations + │ │ │ │ │ │ ├── first-agg [as=column2:9] + │ │ │ │ │ │ │ └── column2:9 + │ │ │ │ │ │ ├── first-agg [as=d_default:10] + │ │ │ │ │ │ │ └── d_default:10 + │ │ │ │ │ │ └── first-agg [as=d_comp_cast:11] + │ │ │ │ │ │ └── d_comp_cast:11 + │ │ │ │ │ ├── scan assn_cast_on_update + │ │ │ │ │ │ ├── columns: k:12!null i:13 d:14 d2:15 d_comp:16 crdb_internal_mvcc_timestamp:17 tableoid:18 + │ │ │ │ │ │ └── computed column expressions + │ │ │ │ │ │ └── d_comp:16 + │ │ │ │ │ │ └── d:14 + │ │ │ │ │ └── filters + │ │ │ │ │ └── column1:8 = k:12 + │ │ │ │ └── projections + │ │ │ │ └── 3 [as=i_new:19] + │ │ │ └── projections + │ │ │ ├── 1.23 [as=d_on_update:20] + │ │ │ └── 1.23::DECIMAL(10,2) [as=d2_on_update:21] + │ │ └── projections + │ │ ├── assignment-cast: DECIMAL(10,1) [as=d_cast:22] + │ │ │ └── d_on_update:20 + │ │ └── assignment-cast: DECIMAL(10,1) [as=d2_cast:23] + │ │ └── d2_on_update:21 + │ └── projections + │ └── assignment-cast: DECIMAL(10) [as=d_comp_cast:24] + │ └── d_cast:22 + └── projections + ├── CASE WHEN k:12 IS NULL THEN column1:8 ELSE k:12 END [as=upsert_k:25] + ├── CASE WHEN k:12 IS NULL THEN column2:9 ELSE i_new:19 END [as=upsert_i:26] + ├── CASE WHEN k:12 IS NULL THEN d_default:10 ELSE d_cast:22 END [as=upsert_d:27] + ├── CASE WHEN k:12 IS NULL THEN d_default:10 ELSE d2_cast:23 END [as=upsert_d2:28] + └── CASE WHEN k:12 IS NULL THEN d_comp_cast:11 ELSE d_comp_cast:24 END [as=upsert_d_comp:29] + + # Regression test for #67100. Do not prune check columns for UPSERTs even if the # expression does not reference any mutating columns. @@ -2320,3 +3467,159 @@ upsert generated_as_identity └── projections ├── CASE WHEN rowid:14 IS NULL THEN b_default:9 ELSE b:12 END [as=upsert_b:17] └── CASE WHEN rowid:14 IS NULL THEN rowid_default:10 ELSE rowid:14 END [as=upsert_rowid:18] + +# ------------------------------------------------------------------------------ +# Test ON CONSTRAINT arbiter index selection. +# ------------------------------------------------------------------------------ +build +INSERT INTO xyz (x, y) +VALUES (1, 2) +ON CONFLICT ON CONSTRAINT xyz_pkey DO +UPDATE SET x=5 +---- +upsert xyz + ├── columns: + ├── arbiter indexes: xyz_pkey + ├── canary column: x:9 + ├── fetch columns: x:9 y:10 z:11 + ├── insert-mapping: + │ ├── column1:6 => x:1 + │ ├── column2:7 => y:2 + │ └── z_default:8 => z:3 + ├── update-mapping: + │ └── upsert_x:15 => x:1 + └── project + ├── columns: upsert_x:15!null upsert_y:16 upsert_z:17 column1:6!null column2:7!null z_default:8 x:9 y:10 z:11 crdb_internal_mvcc_timestamp:12 tableoid:13 x_new:14!null + ├── project + │ ├── columns: x_new:14!null column1:6!null column2:7!null z_default:8 x:9 y:10 z:11 crdb_internal_mvcc_timestamp:12 tableoid:13 + │ ├── left-join (hash) + │ │ ├── columns: column1:6!null column2:7!null z_default:8 x:9 y:10 z:11 crdb_internal_mvcc_timestamp:12 tableoid:13 + │ │ ├── ensure-upsert-distinct-on + │ │ │ ├── columns: column1:6!null column2:7!null z_default:8 + │ │ │ ├── grouping columns: column1:6!null + │ │ │ ├── project + │ │ │ │ ├── columns: z_default:8 column1:6!null column2:7!null + │ │ │ │ ├── values + │ │ │ │ │ ├── columns: column1:6!null column2:7!null + │ │ │ │ │ └── (1, 2) + │ │ │ │ └── projections + │ │ │ │ └── NULL::INT8 [as=z_default:8] + │ │ │ └── aggregations + │ │ │ ├── first-agg [as=column2:7] + │ │ │ │ └── column2:7 + │ │ │ └── first-agg [as=z_default:8] + │ │ │ └── z_default:8 + │ │ ├── scan xyz + │ │ │ └── columns: x:9!null y:10 z:11 crdb_internal_mvcc_timestamp:12 tableoid:13 + │ │ └── filters + │ │ └── column1:6 = x:9 + │ └── projections + │ └── 5 [as=x_new:14] + └── projections + ├── CASE WHEN x:9 IS NULL THEN column1:6 ELSE x_new:14 END [as=upsert_x:15] + ├── CASE WHEN x:9 IS NULL THEN column2:7 ELSE y:10 END [as=upsert_y:16] + └── CASE WHEN x:9 IS NULL THEN z_default:8 ELSE z:11 END [as=upsert_z:17] + +build +INSERT INTO xyz (x, y) +VALUES (1, 2) +ON CONFLICT ON CONSTRAINT xyz_y_z_key DO +UPDATE SET x=5 +---- +upsert xyz + ├── columns: + ├── arbiter indexes: xyz_y_z_key + ├── canary column: x:9 + ├── fetch columns: x:9 y:10 z:11 + ├── insert-mapping: + │ ├── column1:6 => x:1 + │ ├── column2:7 => y:2 + │ └── z_default:8 => z:3 + ├── update-mapping: + │ └── upsert_x:15 => x:1 + └── project + ├── columns: upsert_x:15!null upsert_y:16 upsert_z:17 column1:6!null column2:7!null z_default:8 x:9 y:10 z:11 crdb_internal_mvcc_timestamp:12 tableoid:13 x_new:14!null + ├── project + │ ├── columns: x_new:14!null column1:6!null column2:7!null z_default:8 x:9 y:10 z:11 crdb_internal_mvcc_timestamp:12 tableoid:13 + │ ├── left-join (hash) + │ │ ├── columns: column1:6!null column2:7!null z_default:8 x:9 y:10 z:11 crdb_internal_mvcc_timestamp:12 tableoid:13 + │ │ ├── ensure-upsert-distinct-on + │ │ │ ├── columns: column1:6!null column2:7!null z_default:8 + │ │ │ ├── grouping columns: column2:7!null z_default:8 + │ │ │ ├── project + │ │ │ │ ├── columns: z_default:8 column1:6!null column2:7!null + │ │ │ │ ├── values + │ │ │ │ │ ├── columns: column1:6!null column2:7!null + │ │ │ │ │ └── (1, 2) + │ │ │ │ └── projections + │ │ │ │ └── NULL::INT8 [as=z_default:8] + │ │ │ └── aggregations + │ │ │ └── first-agg [as=column1:6] + │ │ │ └── column1:6 + │ │ ├── scan xyz + │ │ │ └── columns: x:9!null y:10 z:11 crdb_internal_mvcc_timestamp:12 tableoid:13 + │ │ └── filters + │ │ ├── column2:7 = y:10 + │ │ └── z_default:8 = z:11 + │ └── projections + │ └── 5 [as=x_new:14] + └── projections + ├── CASE WHEN x:9 IS NULL THEN column1:6 ELSE x_new:14 END [as=upsert_x:15] + ├── CASE WHEN x:9 IS NULL THEN column2:7 ELSE y:10 END [as=upsert_y:16] + └── CASE WHEN x:9 IS NULL THEN z_default:8 ELSE z:11 END [as=upsert_z:17] + +build +INSERT INTO xyz (x, y) +VALUES (1, 2) +ON CONFLICT ON CONSTRAINT xyz_z_y_key DO +UPDATE SET x=5 +---- +upsert xyz + ├── columns: + ├── arbiter indexes: xyz_z_y_key + ├── canary column: x:9 + ├── fetch columns: x:9 y:10 z:11 + ├── insert-mapping: + │ ├── column1:6 => x:1 + │ ├── column2:7 => y:2 + │ └── z_default:8 => z:3 + ├── update-mapping: + │ └── upsert_x:15 => x:1 + └── project + ├── columns: upsert_x:15!null upsert_y:16 upsert_z:17 column1:6!null column2:7!null z_default:8 x:9 y:10 z:11 crdb_internal_mvcc_timestamp:12 tableoid:13 x_new:14!null + ├── project + │ ├── columns: x_new:14!null column1:6!null column2:7!null z_default:8 x:9 y:10 z:11 crdb_internal_mvcc_timestamp:12 tableoid:13 + │ ├── left-join (hash) + │ │ ├── columns: column1:6!null column2:7!null z_default:8 x:9 y:10 z:11 crdb_internal_mvcc_timestamp:12 tableoid:13 + │ │ ├── ensure-upsert-distinct-on + │ │ │ ├── columns: column1:6!null column2:7!null z_default:8 + │ │ │ ├── grouping columns: column2:7!null z_default:8 + │ │ │ ├── project + │ │ │ │ ├── columns: z_default:8 column1:6!null column2:7!null + │ │ │ │ ├── values + │ │ │ │ │ ├── columns: column1:6!null column2:7!null + │ │ │ │ │ └── (1, 2) + │ │ │ │ └── projections + │ │ │ │ └── NULL::INT8 [as=z_default:8] + │ │ │ └── aggregations + │ │ │ └── first-agg [as=column1:6] + │ │ │ └── column1:6 + │ │ ├── scan xyz + │ │ │ └── columns: x:9!null y:10 z:11 crdb_internal_mvcc_timestamp:12 tableoid:13 + │ │ └── filters + │ │ ├── column2:7 = y:10 + │ │ └── z_default:8 = z:11 + │ └── projections + │ └── 5 [as=x_new:14] + └── projections + ├── CASE WHEN x:9 IS NULL THEN column1:6 ELSE x_new:14 END [as=upsert_x:15] + ├── CASE WHEN x:9 IS NULL THEN column2:7 ELSE y:10 END [as=upsert_y:16] + └── CASE WHEN x:9 IS NULL THEN z_default:8 ELSE z:11 END [as=upsert_z:17] + +build +INSERT INTO xyz (x, y) +VALUES (1, 2) +ON CONFLICT ON CONSTRAINT no_such_constraint DO +UPDATE SET x=5 +---- +error (42704): constraint "no_such_constraint" for table "xyz" does not exist diff --git a/pkg/sql/opt/optbuilder/update.go b/pkg/sql/opt/optbuilder/update.go index 66927222f36c..5386fa8f2cb8 100644 --- a/pkg/sql/opt/optbuilder/update.go +++ b/pkg/sql/opt/optbuilder/update.go @@ -104,7 +104,7 @@ func (b *Builder) buildUpdate(upd *tree.Update, inScope *scope) (outScope *scope mb.addTargetColsForUpdate(upd.Exprs) // Build each of the SET expressions. - mb.addUpdateCols(upd.Exprs, false /* isUpsert */) + mb.addUpdateCols(upd.Exprs) // Build the final update statement, including any returned expressions. if resultsNeeded(upd.Returning) { @@ -180,7 +180,7 @@ func (mb *mutationBuilder) addTargetColsForUpdate(exprs tree.UpdateExprs) { // Multiple subqueries result in multiple left joins successively wrapping the // input. A final Project operator is built if any single-column or tuple SET // expressions are present. -func (mb *mutationBuilder) addUpdateCols(exprs tree.UpdateExprs, isUpsert bool) { +func (mb *mutationBuilder) addUpdateCols(exprs tree.UpdateExprs) { // SET expressions should reject aggregates, generators, etc. scalarProps := &mb.b.semaCtx.Properties defer scalarProps.Restore(*scalarProps) @@ -221,12 +221,6 @@ func (mb *mutationBuilder) addUpdateCols(exprs tree.UpdateExprs, isUpsert bool) scopeCol := projectionsScope.addColumn(colName, texpr) mb.b.buildScalar(texpr, inScope, projectionsScope, scopeCol, nil) - if isUpsert { - // Type check the input expression against the corresponding table - // column. - checkDatumTypeFitsColumnType(targetCol, scopeCol.typ) - } - // Add the column ID to the list of columns to update. mb.updateColIDs[ord] = scopeCol.id } @@ -246,11 +240,6 @@ func (mb *mutationBuilder) addUpdateCols(exprs tree.UpdateExprs, isUpsert bool) ord := mb.tabID.ColumnOrdinal(mb.targetColList[n]) targetCol := mb.tab.Column(ord) subqueryScope.cols[i].name = scopeColName(targetCol.ColName()) - if isUpsert { - // Type check the input expression against the corresponding table - // column. - checkDatumTypeFitsColumnType(targetCol, subqueryScope.cols[i].typ) - } // Add the column ID to the list of columns to update. mb.updateColIDs[ord] = subqueryScope.cols[i].id @@ -291,20 +280,18 @@ func (mb *mutationBuilder) addUpdateCols(exprs tree.UpdateExprs, isUpsert bool) mb.b.constructProjectForScope(mb.outScope, projectionsScope) mb.outScope = projectionsScope - if !isUpsert { - // Add assignment casts for update columns. - mb.addAssignmentCasts(mb.updateColIDs) - } + // Add assignment casts for update columns. + mb.addAssignmentCasts(mb.updateColIDs) // Add additional columns for computed expressions that may depend on the // updated columns. - mb.addSynthesizedColsForUpdate(isUpsert) + mb.addSynthesizedColsForUpdate() } // addSynthesizedColsForUpdate wraps an Update input expression with a Project // operator containing any computed columns that need to be updated. This // includes write-only mutation columns that are computed. -func (mb *mutationBuilder) addSynthesizedColsForUpdate(isUpsert bool) { +func (mb *mutationBuilder) addSynthesizedColsForUpdate() { // Allow mutation columns to be referenced by other computed mutation // columns (otherwise the scope will raise an error if a mutation column // is referenced). These do not need to be set back to true again because @@ -323,15 +310,8 @@ func (mb *mutationBuilder) addSynthesizedColsForUpdate(isUpsert bool) { true, /* applyOnUpdate */ ) - if isUpsert { - // Possibly round DECIMAL-related columns containing update values. Do - // this before evaluating computed expressions, since those may depend on - // the inserted columns. - mb.roundDecimalValues(mb.updateColIDs, false /* roundComputedCols */) - } else { - // Add assignment casts for default column values. - mb.addAssignmentCasts(mb.updateColIDs) - } + // Add assignment casts for default column values. + mb.addAssignmentCasts(mb.updateColIDs) // Disambiguate names so that references in the computed expression refer to // the correct columns. @@ -340,13 +320,8 @@ func (mb *mutationBuilder) addSynthesizedColsForUpdate(isUpsert bool) { // Add all computed columns in case their values have changed. mb.addSynthesizedComputedCols(mb.updateColIDs, true /* restrict */) - if isUpsert { - // Possibly round DECIMAL-related computed columns. - mb.roundDecimalValues(mb.updateColIDs, true /* roundComputedCols */) - } else { - // Add assignment casts for computed column values. - mb.addAssignmentCasts(mb.updateColIDs) - } + // Add assignment casts for computed column values. + mb.addAssignmentCasts(mb.updateColIDs) } // buildUpdate constructs an Update operator, possibly wrapped by a Project diff --git a/pkg/sql/opt/optgen/cmd/optgen/exprs_gen.go b/pkg/sql/opt/optgen/cmd/optgen/exprs_gen.go index 28280e741c8c..188e506d5ff3 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/exprs_gen.go +++ b/pkg/sql/opt/optgen/cmd/optgen/exprs_gen.go @@ -371,7 +371,7 @@ func (g *exprsGen) genExprFuncs(define *lang.DefineExpr) { // Generate the ProvidedPhysical method. fmt.Fprintf(g.w, "func (e *%s) ProvidedPhysical() *physical.Provided {\n", opTyp.name) - fmt.Fprintf(g.w, " return &e.grp.bestProps().provided\n") + fmt.Fprintf(g.w, " return e.grp.bestProps().provided\n") fmt.Fprintf(g.w, "}\n\n") // Generate the Cost method. @@ -479,7 +479,7 @@ func (g *exprsGen) genEnforcerFuncs(define *lang.DefineExpr) { // Generate the ProvidedPhysical method. fmt.Fprintf(g.w, "func (e *%s) ProvidedPhysical() *physical.Provided {\n", opTyp.name) - fmt.Fprintf(g.w, " return &e.best.provided\n") + fmt.Fprintf(g.w, " return e.best.provided\n") fmt.Fprintf(g.w, "}\n\n") // Generate the Cost method. diff --git a/pkg/sql/opt/optgen/cmd/optgen/testdata/exprs b/pkg/sql/opt/optgen/cmd/optgen/testdata/exprs index 05ce8ec487e5..2724c814476a 100644 --- a/pkg/sql/opt/optgen/cmd/optgen/testdata/exprs +++ b/pkg/sql/opt/optgen/cmd/optgen/testdata/exprs @@ -138,7 +138,7 @@ func (e *ProjectExpr) RequiredPhysical() *physical.Required { } func (e *ProjectExpr) ProvidedPhysical() *physical.Provided { - return &e.grp.bestProps().provided + return e.grp.bestProps().provided } func (e *ProjectExpr) Cost() Cost { @@ -352,7 +352,7 @@ func (e *SortExpr) RequiredPhysical() *physical.Required { } func (e *SortExpr) ProvidedPhysical() *physical.Provided { - return &e.best.provided + return e.best.provided } func (e *SortExpr) Cost() Cost { diff --git a/pkg/sql/opt/optgen/exprgen/expr_gen.go b/pkg/sql/opt/optgen/exprgen/expr_gen.go index 61f4094e4fc0..2e68c122b9cd 100644 --- a/pkg/sql/opt/optgen/exprgen/expr_gen.go +++ b/pkg/sql/opt/optgen/exprgen/expr_gen.go @@ -356,7 +356,7 @@ func convertSlice( func (eg *exprGen) populateBestProps(expr opt.Expr, required *physical.Required) memo.Cost { rel, _ := expr.(memo.RelExpr) if rel != nil { - if !xform.CanProvidePhysicalProps(rel, required) { + if !xform.CanProvidePhysicalProps(eg.f.EvalContext(), rel, required) { panic(errorf("operator %s cannot provide required props %s", rel.Op(), required)) } } diff --git a/pkg/sql/opt/ordering/BUILD.bazel b/pkg/sql/opt/ordering/BUILD.bazel index d469a43e00fb..4a28a8658c50 100644 --- a/pkg/sql/opt/ordering/BUILD.bazel +++ b/pkg/sql/opt/ordering/BUILD.bazel @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "ordering", srcs = [ + "distribute.go", "doc.go", "group_by.go", "interesting_orderings.go", diff --git a/pkg/sql/opt/ordering/distribute.go b/pkg/sql/opt/ordering/distribute.go new file mode 100644 index 000000000000..ae016e76e0bf --- /dev/null +++ b/pkg/sql/opt/ordering/distribute.go @@ -0,0 +1,34 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package ordering + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/opt" + "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" + "github.com/cockroachdb/cockroach/pkg/sql/opt/props" +) + +func distributeCanProvideOrdering(expr memo.RelExpr, required *props.OrderingChoice) bool { + // Distribute operator can always pass through ordering to its input. + return true +} + +func distributeBuildChildReqOrdering( + parent memo.RelExpr, required *props.OrderingChoice, childIdx int, +) props.OrderingChoice { + // We can pass through any required ordering to the input. + return *required +} + +func distributeBuildProvided(expr memo.RelExpr, required *props.OrderingChoice) opt.Ordering { + d := expr.(*memo.DistributeExpr) + return d.Input.ProvidedPhysical().Ordering +} diff --git a/pkg/sql/opt/ordering/lookup_join.go b/pkg/sql/opt/ordering/lookup_join.go index 104f65c00618..00a05b8b0c25 100644 --- a/pkg/sql/opt/ordering/lookup_join.go +++ b/pkg/sql/opt/ordering/lookup_join.go @@ -64,7 +64,16 @@ func lookupOrIndexJoinBuildChildReqOrdering( // // This case indicates that we didn't do a good job pushing down equalities // (see #36219), but it should be handled correctly here nevertheless. - return trimColumnGroups(&res, &child.Relational().FuncDeps) + res = trimColumnGroups(&res, &child.Relational().FuncDeps) + + // The propagation of FDs might not be perfect; in that case we need to + // simplify the required ordering, or risk passing through unnecessary columns + // in provided orderings. + if fds := &child.Relational().FuncDeps; res.CanSimplify(fds) { + res = res.Copy() + res.Simplify(fds) + } + return res } func indexJoinBuildProvided(expr memo.RelExpr, required *props.OrderingChoice) opt.Ordering { diff --git a/pkg/sql/opt/ordering/ordering.go b/pkg/sql/opt/ordering/ordering.go index 8296db950624..6f4826bbcc6c 100644 --- a/pkg/sql/opt/ordering/ordering.go +++ b/pkg/sql/opt/ordering/ordering.go @@ -216,6 +216,11 @@ func init() { buildChildReqOrdering: sortBuildChildReqOrdering, buildProvidedOrdering: sortBuildProvided, } + funcMap[opt.DistributeOp] = funcs{ + canProvideOrdering: distributeCanProvideOrdering, + buildChildReqOrdering: distributeBuildChildReqOrdering, + buildProvidedOrdering: distributeBuildProvided, + } funcMap[opt.InsertOp] = funcs{ canProvideOrdering: mutationCanProvideOrdering, buildChildReqOrdering: mutationBuildChildReqOrdering, diff --git a/pkg/sql/opt/props/BUILD.bazel b/pkg/sql/opt/props/BUILD.bazel index 3b83606a1552..303fb59b16e5 100644 --- a/pkg/sql/opt/props/BUILD.bazel +++ b/pkg/sql/opt/props/BUILD.bazel @@ -22,7 +22,7 @@ go_library( "//pkg/sql/opt", "//pkg/sql/opt/cat", "//pkg/sql/opt/constraint", - "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", "//pkg/sql/sem/tree", "//pkg/sql/types", "//pkg/util/buildutil", diff --git a/pkg/sql/opt/props/histogram.go b/pkg/sql/opt/props/histogram.go index 7d5ceb86ab76..c6b0b26b8d3a 100644 --- a/pkg/sql/opt/props/histogram.go +++ b/pkg/sql/opt/props/histogram.go @@ -22,7 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -824,7 +824,7 @@ func getRangesBeforeAndAfter( for i := range boundArr { var err error // Encode each bound value into a sortable byte format. - boundArrByte[i], err = rowenc.EncodeTableKey(nil, boundArr[i], encoding.Ascending) + boundArrByte[i], err = keyside.Encode(nil, boundArr[i], encoding.Ascending) if err != nil { return 0, 0, false } diff --git a/pkg/sql/opt/props/physical/BUILD.bazel b/pkg/sql/opt/props/physical/BUILD.bazel index 517e6452d5b6..24330efaa39f 100644 --- a/pkg/sql/opt/props/physical/BUILD.bazel +++ b/pkg/sql/opt/props/physical/BUILD.bazel @@ -3,24 +3,37 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "physical", srcs = [ + "distribution.go", "provided.go", "required.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical", visibility = ["//visibility:public"], deps = [ + "//pkg/roachpb:with-mocks", "//pkg/sql/opt", + "//pkg/sql/opt/cat", + "//pkg/sql/opt/constraint", "//pkg/sql/opt/props", + "//pkg/sql/sem/tree", ], ) go_test( name = "physical_test", size = "small", - srcs = ["required_test.go"], + srcs = [ + "distribution_test.go", + "required_test.go", + ], + embed = [":physical"], deps = [ - ":physical", + "//pkg/config/zonepb", + "//pkg/roachpb:with-mocks", "//pkg/sql/opt", "//pkg/sql/opt/props", + "//pkg/util/leaktest", + "//pkg/util/log", + "@in_gopkg_yaml_v2//:yaml_v2", ], ) diff --git a/pkg/sql/opt/props/physical/distribution.go b/pkg/sql/opt/props/physical/distribution.go new file mode 100644 index 000000000000..4f446f6fea9d --- /dev/null +++ b/pkg/sql/opt/props/physical/distribution.go @@ -0,0 +1,240 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package physical + +import ( + "bytes" + "sort" + + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" + "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" +) + +// Distribution represents the physical distribution of data for a relational +// operator. It is used to describe where the data will be physically located +// during execution, to enable more accurate costing of each operator by taking +// latency and network bandwidth into account. +type Distribution struct { + // Regions is the set of regions that make up this Distribution. They should + // be sorted in lexicographical order. + // TODO(rytaft): Consider abstracting this to a list of "neighborhoods" to + // support more different types of localities. + // TODO(rytaft): Consider mapping the region strings to integers and storing + // this as a FastIntSet. + Regions []string +} + +// Any is true if this Distribution allows any set of regions. +func (d Distribution) Any() bool { + return len(d.Regions) == 0 +} + +func (d Distribution) String() string { + var buf bytes.Buffer + d.format(&buf) + return buf.String() +} + +func (d Distribution) format(buf *bytes.Buffer) { + for i, r := range d.Regions { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString(r) + } +} + +// Equals returns true if the two Distributions are identical. +func (d Distribution) Equals(rhs Distribution) bool { + if len(d.Regions) != len(rhs.Regions) { + return false + } + + for i := range d.Regions { + if d.Regions[i] != rhs.Regions[i] { + return false + } + } + return true +} + +// Union unions the other distribution with the given distribution, +// removing duplicates. It assumes both distributions are sorted. +func (d Distribution) Union(rhs Distribution) Distribution { + regions := make([]string, 0, len(d.Regions)+len(rhs.Regions)) + l, r := 0, 0 + for l < len(d.Regions) && r < len(rhs.Regions) { + if d.Regions[l] == rhs.Regions[r] { + regions = append(regions, d.Regions[l]) + l++ + r++ + } else if d.Regions[l] < rhs.Regions[r] { + regions = append(regions, d.Regions[l]) + l++ + } else { + regions = append(regions, rhs.Regions[r]) + r++ + } + } + if l < len(d.Regions) { + regions = append(regions, d.Regions[l:]...) + } else if r < len(rhs.Regions) { + regions = append(regions, rhs.Regions[r:]...) + } + return Distribution{Regions: regions} +} + +const regionKey = "region" + +// FromLocality sets the Distribution with the region from the given locality +// (if any). +func (d *Distribution) FromLocality(locality roachpb.Locality) { + if region, ok := locality.Find(regionKey); ok { + d.Regions = []string{region} + } +} + +// FromIndexScan sets the Distribution that results from scanning the given +// index with the given constraint c (c can be nil). +func (d *Distribution) FromIndexScan( + evalCtx *tree.EvalContext, index cat.Index, c *constraint.Constraint, +) { + if index.Table().IsVirtualTable() { + // Virtual tables do not have zone configurations. + return + } + + var regions map[string]struct{} + for i, n := 0, index.PartitionCount(); i < n; i++ { + part := index.Partition(i) + + // If the index scan is constrained, see if we can prune this partition. + if c != nil { + prefixes := part.PartitionByListPrefixes() + var found bool + for _, datums := range prefixes { + if len(datums) == 0 { + // This indicates a DEFAULT value, so we can't easily prune this partition. + found = true + break + } + key := constraint.MakeCompositeKey(datums...) + var span constraint.Span + span.Init(key, constraint.IncludeBoundary, key, constraint.IncludeBoundary) + if c.IntersectsSpan(evalCtx, &span) { + found = true + break + } + } + if !found { + // This partition does not intersect the constraint, so skip it. + continue + } + } + + // Add the regions from this partition to the distribution. + zoneRegions := getRegionsFromZone(part.Zone()) + if len(regions) == 0 { + regions = zoneRegions + } else { + for r := range zoneRegions { + regions[r] = struct{}{} + } + } + } + + if len(regions) == 0 { + regions = getRegionsFromZone(index.Zone()) + } + + // Convert to a slice and sort regions. + d.Regions = make([]string, 0, len(regions)) + for r := range regions { + d.Regions = append(d.Regions, r) + } + sort.Strings(d.Regions) +} + +// getRegionsFromZone returns the regions of the given zone config, if any. It +// attempts to find the smallest set of regions likely to hold the leaseholder. +func getRegionsFromZone(zone cat.Zone) map[string]struct{} { + // First find any regional replica constraints. If there is exactly one, we + // can return early. + var regions map[string]struct{} + for i, n := 0, zone.ReplicaConstraintsCount(); i < n; i++ { + replicaConstraint := zone.ReplicaConstraints(i) + for j, m := 0, replicaConstraint.ConstraintCount(); j < m; j++ { + constraint := replicaConstraint.Constraint(j) + if region, ok := getRegionFromConstraint(constraint); ok { + if regions == nil { + regions = make(map[string]struct{}) + } + regions[region] = struct{}{} + } + } + } + if len(regions) == 1 { + return regions + } + + // Next check the voter replica constraints. Once again, if there is exactly + // one regional constraint, we can return early. + var voterRegions map[string]struct{} + for i, n := 0, zone.VoterConstraintsCount(); i < n; i++ { + replicaConstraint := zone.VoterConstraint(i) + for j, m := 0, replicaConstraint.ConstraintCount(); j < m; j++ { + constraint := replicaConstraint.Constraint(j) + if region, ok := getRegionFromConstraint(constraint); ok { + if voterRegions == nil { + voterRegions = make(map[string]struct{}) + } + voterRegions[region] = struct{}{} + } + } + } + if len(voterRegions) == 1 { + return voterRegions + } + + // Use the lease preferences as a tie breaker. We only really care about the + // first one, since subsequent lease preferences only apply in edge cases. + if zone.LeasePreferenceCount() > 0 { + leasePref := zone.LeasePreference(0) + for i, n := 0, leasePref.ConstraintCount(); i < n; i++ { + constraint := leasePref.Constraint(i) + if region, ok := getRegionFromConstraint(constraint); ok { + return map[string]struct{}{region: {}} + } + } + } + + if len(voterRegions) > 0 { + return voterRegions + } + return regions +} + +// getRegionFromConstraint returns the region and ok=true if the given +// constraint is a required region constraint. Otherwise, returns ok=false. +func getRegionFromConstraint(constraint cat.Constraint) (region string, ok bool) { + if constraint.GetKey() != regionKey { + // We only care about constraints on the region. + return "", false /* ok */ + } + if constraint.IsRequired() { + // The region is required. + return constraint.GetValue(), true /* ok */ + } + // The region is prohibited. + return "", false /* ok */ +} diff --git a/pkg/sql/opt/props/physical/distribution_test.go b/pkg/sql/opt/props/physical/distribution_test.go new file mode 100644 index 000000000000..b9e02744dabb --- /dev/null +++ b/pkg/sql/opt/props/physical/distribution_test.go @@ -0,0 +1,201 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package physical + +import ( + "fmt" + "reflect" + "sort" + "testing" + + "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "gopkg.in/yaml.v2" +) + +func TestUnion(t *testing.T) { + testCases := []struct { + leftDist []string + rightDist []string + expected []string + }{ + { + leftDist: []string{}, + rightDist: []string{}, + expected: []string{}, + }, + { + leftDist: []string{}, + rightDist: []string{"west"}, + expected: []string{"west"}, + }, + { + leftDist: []string{"east"}, + rightDist: []string{"east"}, + expected: []string{"east"}, + }, + { + leftDist: []string{"west"}, + rightDist: []string{"east"}, + expected: []string{"east", "west"}, + }, + { + leftDist: []string{"central", "east", "west"}, + rightDist: []string{"central", "west"}, + expected: []string{"central", "east", "west"}, + }, + } + for tcIdx, tc := range testCases { + t.Run(fmt.Sprintf("case%d", tcIdx+1), func(t *testing.T) { + leftDist := Distribution{Regions: tc.leftDist} + rightDist := Distribution{Regions: tc.rightDist} + expected := Distribution{Regions: tc.expected} + + res := leftDist.Union(rightDist) + if !res.Equals(expected) { + t.Errorf("expected '%s', got '%s'", expected, res) + } + }) + } +} + +func TestFromLocality(t *testing.T) { + testCases := []struct { + locality roachpb.Locality + expected []string + }{ + { + locality: roachpb.Locality{}, + expected: []string{}, + }, + { + locality: roachpb.Locality{Tiers: []roachpb.Tier{ + {Key: "region", Value: "west"}, + {Key: "zone", Value: "b"}, + }}, + expected: []string{"west"}, + }, + { + locality: roachpb.Locality{Tiers: []roachpb.Tier{ + {Key: "zone", Value: "b"}, + }}, + expected: []string{}, + }, + } + for tcIdx, tc := range testCases { + t.Run(fmt.Sprintf("case%d", tcIdx+1), func(t *testing.T) { + expected := Distribution{Regions: tc.expected} + + var res Distribution + res.FromLocality(tc.locality) + if !res.Equals(expected) { + t.Errorf("expected '%s', got '%s'", expected, res) + } + }) + } +} + +func TestGetRegionsFromZone(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + testCases := []struct { + constraints string + voterConstraints string + leasePrefs string + expected []string + }{ + {constraints: "[]", expected: []string{}}, + {constraints: "[+region=eu,+dc=uk]", expected: []string{"eu"}}, + {constraints: "[-region=us,+dc=east]", expected: []string{}}, + {constraints: "[-region=eu]", expected: []string{}}, + {constraints: "[+region=us]", expected: []string{"us"}}, + {constraints: "[+region=us,-region=eu]", expected: []string{"us"}}, + + {voterConstraints: "[+region=us,-dc=east]", expected: []string{"us"}}, + {voterConstraints: "[+region=us,+dc=west]", expected: []string{"us"}}, + {voterConstraints: "[+dc=east]", expected: []string{}}, + {voterConstraints: "[+dc=west,+ssd]", expected: []string{}}, + {voterConstraints: "[-region=eu,+dc=east]", expected: []string{}}, + {voterConstraints: "[+region=us,+dc=east,+rack=1,-ssd]", expected: []string{"us"}}, + + {constraints: `{"+region=us,+dc=east":3,"-dc=east":2}`, expected: []string{"us"}}, + {constraints: `{"+region=us,+dc=east":3,"+region=us,+dc=west":2}`, expected: []string{"us"}}, + {constraints: `{"+region=us,+dc=east":3,"+region=eu":2}`, expected: []string{"eu", "us"}}, + + {leasePrefs: "[[]]", expected: []string{}}, + {leasePrefs: "[[+dc=west]]", expected: []string{}}, + {leasePrefs: "[[+region=us]]", expected: []string{"us"}}, + {leasePrefs: "[[+region=us,+dc=east]]", expected: []string{"us"}}, + + {constraints: "[+region=eu]", voterConstraints: "[+region=eu]", + leasePrefs: "[[+dc=west]]", expected: []string{"eu"}}, + {constraints: "[+region=eu]", voterConstraints: "[+region=eu]", + leasePrefs: "[[+region=us]]", expected: []string{"eu"}}, + {constraints: "[+region=us]", voterConstraints: "[+region=us]", + leasePrefs: "[[+dc=west]]", expected: []string{"us"}}, + {constraints: "[+region=us]", voterConstraints: "[+region=us]", + leasePrefs: "[[+region=us]]", expected: []string{"us"}}, + {constraints: "[+dc=east]", voterConstraints: "[+region=us]", + leasePrefs: "[[+region=us]]", expected: []string{"us"}}, + {constraints: "[+dc=east]", voterConstraints: "[+dc=east]", + leasePrefs: "[[+region=us]]", expected: []string{"us"}}, + {constraints: "[+dc=east]", voterConstraints: "[+dc=east]", + leasePrefs: "[[+dc=east]]", expected: []string{}}, + {constraints: "[+region=us,+dc=east]", voterConstraints: "[-region=eu]", + leasePrefs: "[[+region=us,+dc=east]]", expected: []string{"us"}}, + {constraints: `{"+region=us":3,"+region=eu":2}`, + voterConstraints: `[+region=us]`, expected: []string{"us"}}, + {constraints: `{"+region=us":3,"+region=eu":2}`, + voterConstraints: `{"+region=us":1,"+region=eu":1}`, expected: []string{"eu", "us"}}, + {constraints: `{"+region=us":3,"+region=eu":2}`, + voterConstraints: `{"+region=us":1,"+region=eu":1}`, leasePrefs: "[[+region=us]]", expected: []string{"us"}}, + } + + for _, tc := range testCases { + zone := &zonepb.ZoneConfig{} + + if tc.constraints != "" { + constraintsList := &zonepb.ConstraintsList{} + if err := yaml.UnmarshalStrict([]byte(tc.constraints), constraintsList); err != nil { + t.Fatal(err) + } + zone.Constraints = constraintsList.Constraints + } + + if tc.voterConstraints != "" { + constraintsList := &zonepb.ConstraintsList{} + if err := yaml.UnmarshalStrict([]byte(tc.voterConstraints), constraintsList); err != nil { + t.Fatal(err) + } + zone.VoterConstraints = constraintsList.Constraints + } + + if tc.leasePrefs != "" { + if err := yaml.UnmarshalStrict([]byte(tc.leasePrefs), &zone.LeasePreferences); err != nil { + t.Fatal(err) + } + } + + regions := getRegionsFromZone(zone) + actual := make([]string, 0, len(regions)) + for r := range regions { + actual = append(actual, r) + } + sort.Strings(actual) + if !reflect.DeepEqual(actual, tc.expected) { + t.Errorf("constraints=%v, voterConstraints=%v, leasePrefs=%v: expected %v, got %v", + tc.constraints, tc.voterConstraints, tc.leasePrefs, tc.expected, actual) + } + } +} diff --git a/pkg/sql/opt/props/physical/provided.go b/pkg/sql/opt/props/physical/provided.go index 33022d287c24..fb8436be3ef6 100644 --- a/pkg/sql/opt/props/physical/provided.go +++ b/pkg/sql/opt/props/physical/provided.go @@ -36,14 +36,22 @@ type Provided struct { // See the documentation for the opt/ordering package for some examples. Ordering opt.Ordering - // Note: we store a Provided structure in-place within each group because the - // struct is very small (see memo.bestProps). If we add more fields here, that - // decision needs to be revisited. + // Distribution is a distribution that needs to be maintained on the rows + // produced by this operator in order to satisfy its required distribution. If + // there is a required distribution, the provided distribution must match it + // exactly. + // + // The provided distribution is not yet used when building the DistSQL plan, + // but eventually it should inform the decision about whether to plan + // processors locally or remotely. Currently, it is used to determine whether + // a Distribute operator is needed between this operator and its parent, which + // can affect the cost of a plan. + Distribution Distribution } // Equals returns true if the two sets of provided properties are identical. func (p *Provided) Equals(other *Provided) bool { - return p.Ordering.Equals(other.Ordering) + return p.Ordering.Equals(other.Ordering) && p.Distribution.Equals(other.Distribution) } func (p *Provided) String() string { @@ -52,6 +60,19 @@ func (p *Provided) String() string { if len(p.Ordering) > 0 { buf.WriteString("[ordering: ") p.Ordering.Format(&buf) + if p.Distribution.Any() { + buf.WriteByte(']') + } else { + buf.WriteString(", ") + } + } + + if !p.Distribution.Any() { + if len(p.Ordering) == 0 { + buf.WriteByte('[') + } + buf.WriteString("distribution: ") + p.Distribution.format(&buf) buf.WriteByte(']') } diff --git a/pkg/sql/opt/props/physical/required.go b/pkg/sql/opt/props/physical/required.go index 87447c091bde..6611b8de8fd8 100644 --- a/pkg/sql/opt/props/physical/required.go +++ b/pkg/sql/opt/props/physical/required.go @@ -53,6 +53,14 @@ type Required struct { // float64 representation, and can be converted to an integer number of rows // using math.Ceil. LimitHint float64 + + // Distribution specifies the physical distribution of result rows. This is + // defined as the set of regions that may contain result rows. If + // Distribution is not defined, then no particular distribution is required. + // Currently, the only operator in a plan tree that has a required + // distribution is the root, since data must always be returned to the gateway + // region. + Distribution Distribution } // MinRequired are the default physical properties that require nothing and @@ -62,7 +70,7 @@ var MinRequired = &Required{} // Defined is true if any physical property is defined. If none is defined, then // this is an instance of MinRequired. func (p *Required) Defined() bool { - return !p.Presentation.Any() || !p.Ordering.Any() || p.LimitHint != 0 + return !p.Presentation.Any() || !p.Ordering.Any() || p.LimitHint != 0 || !p.Distribution.Any() } // ColSet returns the set of columns used by any of the physical properties. @@ -96,6 +104,9 @@ func (p *Required) String() string { if p.LimitHint != 0 { output("limit hint", func(buf *bytes.Buffer) { fmt.Fprintf(buf, "%.2f", p.LimitHint) }) } + if !p.Distribution.Any() { + output("distribution", p.Distribution.format) + } // Handle empty properties case. if buf.Len() == 0 { @@ -106,7 +117,8 @@ func (p *Required) String() string { // Equals returns true if the two physical properties are identical. func (p *Required) Equals(rhs *Required) bool { - return p.Presentation.Equals(rhs.Presentation) && p.Ordering.Equals(&rhs.Ordering) && p.LimitHint == rhs.LimitHint + return p.Presentation.Equals(rhs.Presentation) && p.Ordering.Equals(&rhs.Ordering) && + p.LimitHint == rhs.LimitHint && p.Distribution.Equals(rhs.Distribution) } // Presentation specifies the naming, membership (including duplicates), and diff --git a/pkg/sql/opt/xform/BUILD.bazel b/pkg/sql/opt/xform/BUILD.bazel index 36bb828274be..e46751d061a1 100644 --- a/pkg/sql/opt/xform/BUILD.bazel +++ b/pkg/sql/opt/xform/BUILD.bazel @@ -30,6 +30,7 @@ go_library( "//pkg/sql/opt", "//pkg/sql/opt/cat", "//pkg/sql/opt/constraint", + "//pkg/sql/opt/distribution", "//pkg/sql/opt/idxconstraint", "//pkg/sql/opt/invertedexpr", "//pkg/sql/opt/invertedidx", diff --git a/pkg/sql/opt/xform/coster.go b/pkg/sql/opt/xform/coster.go index dc6a4922c547..f6e6027f1661 100644 --- a/pkg/sql/opt/xform/coster.go +++ b/pkg/sql/opt/xform/coster.go @@ -466,6 +466,9 @@ func (c *coster) ComputeCost(candidate memo.RelExpr, required *physical.Required case opt.SortOp: cost = c.computeSortCost(candidate.(*memo.SortExpr), required) + case opt.DistributeOp: + cost = c.computeDistributeCost(candidate.(*memo.DistributeExpr), required) + case opt.ScanOp: cost = c.computeScanCost(candidate.(*memo.ScanExpr), required) @@ -642,6 +645,14 @@ func (c *coster) computeSortCost(sort *memo.SortExpr, required *physical.Require return cost } +func (c *coster) computeDistributeCost( + distribute *memo.DistributeExpr, required *physical.Required, +) memo.Cost { + // TODO(rytaft): Compute a real cost here. Currently we just add a tiny cost + // as a placeholder. + return cpuCostFactor +} + func (c *coster) computeScanCost(scan *memo.ScanExpr, required *physical.Required) memo.Cost { if scan.Flags.ForceIndex && scan.Flags.Index != scan.Index || scan.Flags.ForceZigzag { // If we are forcing an index, any other index has a very high cost. In diff --git a/pkg/sql/opt/xform/optimizer.go b/pkg/sql/opt/xform/optimizer.go index 74f4f9c8f50d..74b7e54d1e7b 100644 --- a/pkg/sql/opt/xform/optimizer.go +++ b/pkg/sql/opt/xform/optimizer.go @@ -15,6 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" + "github.com/cockroachdb/cockroach/pkg/sql/opt/distribution" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/norm" "github.com/cockroachdb/cockroach/pkg/sql/opt/ordering" @@ -499,7 +500,7 @@ func (o *Optimizer) optimizeGroupMember( // properties? That case is taken care of by enforceProps, which will // recursively optimize the group with property subsets and then add // enforcers to provide the remainder. - if CanProvidePhysicalProps(member, required) { + if CanProvidePhysicalProps(o.evalCtx, member, required) { var cost memo.Cost for i, n := 0, member.ChildCount(); i < n; i++ { // Given required parent properties, get the properties required from @@ -579,6 +580,12 @@ func (o *Optimizer) enforceProps( // stripped by recursively optimizing the group with successively fewer // properties. The properties are stripped off in a heuristic order, from // least likely to be expensive to enforce to most likely. + if !required.Distribution.Any() { + enforcer := &memo.DistributeExpr{Input: member} + memberProps := BuildChildPhysicalProps(o.mem, enforcer, 0, required) + return o.optimizeEnforcer(state, enforcer, required, member, memberProps) + } + if !required.Ordering.Any() { // Try Sort enforcer that requires no ordering from its input. enforcer := &memo.SortExpr{Input: member} @@ -633,7 +640,7 @@ func (o *Optimizer) optimizeEnforcer( // shouldExplore ensures that exploration is only triggered for optimizeGroup // calls that will not recurse via a call from enforceProps. func (o *Optimizer) shouldExplore(required *physical.Required) bool { - return required.Ordering.Any() + return required.Ordering.Any() && required.Distribution.Any() } // setLowestCostTree traverses the memo and recursively updates child pointers @@ -711,6 +718,7 @@ func (o *Optimizer) setLowestCostTree(parent opt.Expr, parentProps *physical.Req // BuildProvided relies on ProvidedPhysical() being set in the children, so // it must run after the recursive calls on the children. provided.Ordering = ordering.BuildProvided(relParent, &parentProps.Ordering) + provided.Distribution = distribution.BuildProvided(o.evalCtx, relParent, &parentProps.Distribution) o.mem.SetBestProps(relParent, parentProps, &provided, relCost) } diff --git a/pkg/sql/opt/xform/physical_props.go b/pkg/sql/opt/xform/physical_props.go index 6afaab5722e2..ee25b56d2d70 100644 --- a/pkg/sql/opt/xform/physical_props.go +++ b/pkg/sql/opt/xform/physical_props.go @@ -14,6 +14,7 @@ import ( "math" "github.com/cockroachdb/cockroach/pkg/sql/opt" + "github.com/cockroachdb/cockroach/pkg/sql/opt/distribution" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/opt/ordering" "github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical" @@ -31,10 +32,14 @@ import ( // Operators that do this should return true from the appropriate canProvide // method and then pass through that property in the buildChildPhysicalProps // method. -func CanProvidePhysicalProps(e memo.RelExpr, required *physical.Required) bool { +func CanProvidePhysicalProps( + evalCtx *tree.EvalContext, e memo.RelExpr, required *physical.Required, +) bool { // All operators can provide the Presentation and LimitHint properties, so no // need to check for that. - return e.Op() == opt.SortOp || ordering.CanProvide(e, &required.Ordering) + canProvideOrdering := e.Op() == opt.SortOp || ordering.CanProvide(e, &required.Ordering) + canProvideDistribution := e.Op() == opt.DistributeOp || distribution.CanProvide(evalCtx, e, &required.Distribution) + return canProvideOrdering && canProvideDistribution } // BuildChildPhysicalProps returns the set of physical properties required of @@ -80,6 +85,7 @@ func BuildChildPhysicalProps( } childProps.Ordering = ordering.BuildChildRequired(parent, &parentProps.Ordering, nth) + childProps.Distribution = distribution.BuildChildRequired(parent, &parentProps.Distribution, nth) switch parent.Op() { case opt.LimitOp: diff --git a/pkg/sql/opt/xform/testdata/coster/zone b/pkg/sql/opt/xform/testdata/coster/zone index b60fd84b51ee..e951327cd8ca 100644 --- a/pkg/sql/opt/xform/testdata/coster/zone +++ b/pkg/sql/opt/xform/testdata/coster/zone @@ -43,6 +43,7 @@ scan t.public.abc ├── cost: 1084.62 ├── key: (1) ├── fd: (1)-->(2,3), (2,3)~~>(1) + ├── distribution: central ├── prune: (1-3) └── interesting orderings: (+1) (+2,+3,+1) @@ -50,15 +51,25 @@ scan t.public.abc opt format=show-all locality=(region=central) SELECT * FROM abc WHERE b=10 ---- -scan t.public.abc@bc1 +distribute ├── columns: a:1(int!null) b:2(int!null) c:3(string) - ├── constraint: /2/3: [/10 - /10] ├── stats: [rows=10, distinct(2)=1, null(2)=0] - ├── cost: 25.22 + ├── cost: 25.25 ├── key: (1) ├── fd: ()-->(2), (1)-->(3), (2,3)~~>(1) + ├── distribution: central + ├── input distribution: east ├── prune: (1,3) - └── interesting orderings: (+1 opt(2)) (+3,+1 opt(2)) + ├── interesting orderings: (+1 opt(2)) (+3,+1 opt(2)) + └── scan t.public.abc@bc1 + ├── columns: t.public.abc.a:1(int!null) t.public.abc.b:2(int!null) t.public.abc.c:3(string) + ├── constraint: /2/3: [/10 - /10] + ├── stats: [rows=10, distinct(2)=1, null(2)=0] + ├── cost: 25.22 + ├── key: (1) + ├── fd: ()-->(2), (1)-->(3), (2,3)~~>(1) + ├── prune: (1,3) + └── interesting orderings: (+1 opt(2)) (+3,+1 opt(2)) # With locality in east, use bc1 index. opt format=show-all locality=(region=east) @@ -71,6 +82,7 @@ scan t.public.abc@bc1 ├── cost: 24.52 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: east ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -85,6 +97,7 @@ scan t.public.abc@bc2 ├── cost: 24.52 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: west ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -106,15 +119,25 @@ scan t.public.abc@bc1 opt format=show-all locality=(region=central) SELECT b, c FROM abc WHERE b=10 ---- -scan t.public.abc@bc1 +distribute ├── columns: b:2(int!null) c:3(string) - ├── constraint: /2/3: [/10 - /10] ├── stats: [rows=10, distinct(2)=1, null(2)=0] - ├── cost: 25.02 + ├── cost: 25.05 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: central + ├── input distribution: east ├── prune: (3) - └── interesting orderings: (+3 opt(2)) + ├── interesting orderings: (+3 opt(2)) + └── scan t.public.abc@bc1 + ├── columns: t.public.abc.b:2(int!null) t.public.abc.c:3(string) + ├── constraint: /2/3: [/10 - /10] + ├── stats: [rows=10, distinct(2)=1, null(2)=0] + ├── cost: 25.02 + ├── lax-key: (3) + ├── fd: ()-->(2) + ├── prune: (3) + └── interesting orderings: (+3 opt(2)) # -------------------------------------------------- # Multiple constraints. @@ -143,6 +166,7 @@ scan t.public.abc ├── cost: 1084.62 ├── key: (1) ├── fd: (1)-->(2,3), (2,3)~~>(1) + ├── distribution: us ├── prune: (1-3) └── interesting orderings: (+1) (+2,+3,+1) @@ -157,6 +181,7 @@ scan t.public.abc@bc1 ├── cost: 24.77 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -171,6 +196,7 @@ scan t.public.abc@bc1 ├── cost: 24.52 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -186,6 +212,7 @@ scan t.public.abc@bc2 ├── cost: 24.52 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -213,6 +240,7 @@ scan t.public.abc@bc1 ├── cost: 24.52 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -228,6 +256,7 @@ scan t.public.abc@bc2 ├── cost: 24.52 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -247,59 +276,99 @@ ALTER INDEX abc@bc2 CONFIGURE ZONE USING constraints='[+region=eu,+region=us,+dc opt format=show-all locality=(region=us) SELECT b, c FROM abc WHERE b=10 ---- -scan t.public.abc@bc1 +distribute ├── columns: b:2(int!null) c:3(string) - ├── constraint: /2/3: [/10 - /10] ├── stats: [rows=10, distinct(2)=1, null(2)=0] - ├── cost: 24.52 + ├── cost: 24.55 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us + ├── input distribution: ap,us ├── prune: (3) - └── interesting orderings: (+3 opt(2)) + ├── interesting orderings: (+3 opt(2)) + └── scan t.public.abc@bc1 + ├── columns: t.public.abc.b:2(int!null) t.public.abc.c:3(string) + ├── constraint: /2/3: [/10 - /10] + ├── stats: [rows=10, distinct(2)=1, null(2)=0] + ├── cost: 24.52 + ├── lax-key: (3) + ├── fd: ()-->(2) + ├── prune: (3) + └── interesting orderings: (+3 opt(2)) # With locality in eu, use bc2, since it's prohibited with bc1. opt format=show-all locality=(region=eu) SELECT b, c FROM abc WHERE b=10 ---- -scan t.public.abc@bc2 +distribute ├── columns: b:2(int!null) c:3(string) - ├── constraint: /2/3: [/10 - /10] ├── stats: [rows=10, distinct(2)=1, null(2)=0] - ├── cost: 24.52 + ├── cost: 24.55 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: eu + ├── input distribution: eu,us ├── prune: (3) - └── interesting orderings: (+3 opt(2)) + ├── interesting orderings: (+3 opt(2)) + └── scan t.public.abc@bc2 + ├── columns: t.public.abc.b:2(int!null) t.public.abc.c:3(string) + ├── constraint: /2/3: [/10 - /10] + ├── stats: [rows=10, distinct(2)=1, null(2)=0] + ├── cost: 24.52 + ├── lax-key: (3) + ├── fd: ()-->(2) + ├── prune: (3) + └── interesting orderings: (+3 opt(2)) # With locality in us + east, use bc2, since it matches both tiers, even though # "us" match is after "eu" in list. opt format=show-all locality=(region=us,dc=east) SELECT b, c FROM abc WHERE b=10 ---- -scan t.public.abc@bc2 +distribute ├── columns: b:2(int!null) c:3(string) - ├── constraint: /2/3: [/10 - /10] ├── stats: [rows=10, distinct(2)=1, null(2)=0] - ├── cost: 24.52 + ├── cost: 24.55 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us + ├── input distribution: eu,us ├── prune: (3) - └── interesting orderings: (+3 opt(2)) + ├── interesting orderings: (+3 opt(2)) + └── scan t.public.abc@bc2 + ├── columns: t.public.abc.b:2(int!null) t.public.abc.c:3(string) + ├── constraint: /2/3: [/10 - /10] + ├── stats: [rows=10, distinct(2)=1, null(2)=0] + ├── cost: 24.52 + ├── lax-key: (3) + ├── fd: ()-->(2) + ├── prune: (3) + └── interesting orderings: (+3 opt(2)) # With locality in ap + east, use bc1, since ap is not in list of regions for # bc2, even though dc=east matches. opt format=show-all locality=(region=ap,dc=east) SELECT b, c FROM abc WHERE b=10 ---- -scan t.public.abc@bc1 +distribute ├── columns: b:2(int!null) c:3(string) - ├── constraint: /2/3: [/10 - /10] ├── stats: [rows=10, distinct(2)=1, null(2)=0] - ├── cost: 24.77 + ├── cost: 24.8 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: ap + ├── input distribution: ap,us ├── prune: (3) - └── interesting orderings: (+3 opt(2)) + ├── interesting orderings: (+3 opt(2)) + └── scan t.public.abc@bc1 + ├── columns: t.public.abc.b:2(int!null) t.public.abc.c:3(string) + ├── constraint: /2/3: [/10 - /10] + ├── stats: [rows=10, distinct(2)=1, null(2)=0] + ├── cost: 24.77 + ├── lax-key: (3) + ├── fd: ()-->(2) + ├── prune: (3) + └── interesting orderings: (+3 opt(2)) exec-ddl ALTER INDEX abc@bc1 CONFIGURE ZONE USING constraints='[-region=eu,+dc=east]' @@ -320,6 +389,7 @@ scan t.public.abc@bc1 ├── cost: 24.52 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -334,6 +404,7 @@ scan t.public.abc@bc2 ├── cost: 24.52 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: eu ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -370,6 +441,7 @@ inner-join (lookup t.public.xy@y2) ├── cost: 427.6 ├── key: (1,6) ├── fd: ()-->(2,7), (1)-->(3), (2,3)~~>(1), (2)==(7), (7)==(2) + ├── distribution: us ├── prune: (1,3,6) ├── interesting orderings: (+1 opt(2)) (+3,+1 opt(2)) (+6 opt(7)) ├── scan t.public.abc@bc2 @@ -379,6 +451,7 @@ inner-join (lookup t.public.xy@y2) │ ├── cost: 24.62 │ ├── key: (1) │ ├── fd: ()-->(2), (1)-->(3), (2,3)~~>(1) + │ ├── distribution: us │ ├── prune: (1,3) │ └── interesting orderings: (+1 opt(2)) (+3,+1 opt(2)) └── filters @@ -408,6 +481,7 @@ inner-join (lookup t.public.xy@y1) ├── cost: 427.6 ├── key: (1,6) ├── fd: ()-->(2,7), (1)-->(3), (2,3)~~>(1), (2)==(7), (7)==(2) + ├── distribution: us ├── prune: (1,3,6) ├── interesting orderings: (+1 opt(2)) (+3,+1 opt(2)) (+6 opt(7)) ├── scan t.public.abc@bc2 @@ -417,6 +491,7 @@ inner-join (lookup t.public.xy@y1) │ ├── cost: 24.62 │ ├── key: (1) │ ├── fd: ()-->(2), (1)-->(3), (2,3)~~>(1) + │ ├── distribution: us │ ├── prune: (1,3) │ └── interesting orderings: (+1 opt(2)) (+3,+1 opt(2)) └── filters @@ -450,6 +525,7 @@ scan t.public.abc ├── cost: 1125.02 ├── key: (1) ├── fd: (1)-->(2,3), (2,3)~~>(1) + ├── distribution: central ├── prune: (1-3) └── interesting orderings: (+1) (+2,+3,+1) @@ -457,15 +533,25 @@ scan t.public.abc opt format=show-all locality=(region=central) SELECT b, c FROM abc WHERE b=10 ---- -scan t.public.abc@bc1 +distribute ├── columns: b:2(int!null) c:3(string) - ├── constraint: /2/3: [/10 - /10] ├── stats: [rows=10, distinct(2)=1, null(2)=0] - ├── cost: 25.02 + ├── cost: 25.05 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: central + ├── input distribution: east ├── prune: (3) - └── interesting orderings: (+3 opt(2)) + ├── interesting orderings: (+3 opt(2)) + └── scan t.public.abc@bc1 + ├── columns: t.public.abc.b:2(int!null) t.public.abc.c:3(string) + ├── constraint: /2/3: [/10 - /10] + ├── stats: [rows=10, distinct(2)=1, null(2)=0] + ├── cost: 25.02 + ├── lax-key: (3) + ├── fd: ()-->(2) + ├── prune: (3) + └── interesting orderings: (+3 opt(2)) # With locality in east, use bc1 index. opt format=show-all locality=(region=east) @@ -478,6 +564,7 @@ scan t.public.abc@bc1 ├── cost: 24.8533333 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: east ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -492,6 +579,7 @@ scan t.public.abc@bc2 ├── cost: 24.8533333 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: west ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -521,6 +609,7 @@ scan t.public.abc ├── cost: 1125.02 ├── key: (1) ├── fd: (1)-->(2,3), (2,3)~~>(1) + ├── distribution: us ├── prune: (1-3) └── interesting orderings: (+1) (+2,+3,+1) @@ -535,6 +624,7 @@ scan t.public.abc@bc1 ├── cost: 24.9366667 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -549,6 +639,7 @@ scan t.public.abc@bc1 ├── cost: 24.8533333 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -563,6 +654,7 @@ scan t.public.abc@bc2 ├── cost: 24.8533333 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -595,6 +687,7 @@ scan t.public.abc ├── cost: 1104.82 ├── key: (1) ├── fd: (1)-->(2,3), (2,3)~~>(1) + ├── distribution: us ├── prune: (1-3) └── interesting orderings: (+1) (+2,+3,+1) @@ -609,6 +702,7 @@ scan t.public.abc@bc1 ├── cost: 24.77 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -623,6 +717,7 @@ scan t.public.abc@bc1 ├── cost: 24.6866667 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -637,6 +732,7 @@ scan t.public.abc@bc2 ├── cost: 24.6866667 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -666,6 +762,7 @@ scan t.public.abc@bc2 ├── cost: 24.52 ├── lax-key: (3) ├── fd: ()-->(2) + ├── distribution: us ├── prune: (3) └── interesting orderings: (+3 opt(2)) @@ -718,6 +815,7 @@ locality-optimized-search ├── cost: 3.468444 ├── key: () ├── fd: ()-->(1-4) + ├── distribution: east ├── prune: (1,2) ├── scan t.public.abc_part@bc_idx │ ├── columns: t.public.abc_part.r:7(string!null) t.public.abc_part.a:8(int!null) t.public.abc_part.b:9(int!null) t.public.abc_part.c:10(string!null) @@ -755,6 +853,7 @@ anti-join (lookup abc_part@bc_idx [as=a2]) ├── cost: 18.1443257 ├── key: () ├── fd: ()-->(1-4) + ├── distribution: east ├── anti-join (lookup abc_part@bc_idx [as=a2]) │ ├── columns: a1.r:1!null a1.a:2!null a1.b:3!null a1.c:4!null │ ├── lookup expression @@ -766,6 +865,7 @@ anti-join (lookup abc_part@bc_idx [as=a2]) │ ├── cost: 10.8432087 │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: east │ ├── locality-optimized-search │ │ ├── columns: a1.r:1!null a1.a:2!null a1.b:3!null a1.c:4!null │ │ ├── left columns: a1.r:13 a1.a:14 a1.b:15 a1.c:16 @@ -775,6 +875,7 @@ anti-join (lookup abc_part@bc_idx [as=a2]) │ │ ├── cost: 3.468444 │ │ ├── key: () │ │ ├── fd: ()-->(1-4) + │ │ ├── distribution: east │ │ ├── scan abc_part@bc_idx [as=a1] │ │ │ ├── columns: a1.r:13!null a1.a:14!null a1.b:15!null a1.c:16!null │ │ │ ├── constraint: /13/15/16: [/'east'/1/'foo' - /'east'/1/'foo'] diff --git a/pkg/sql/opt/xform/testdata/external/tpce b/pkg/sql/opt/xform/testdata/external/tpce index 0b17af9fe311..ae2c7a5cb80c 100644 --- a/pkg/sql/opt/xform/testdata/external/tpce +++ b/pkg/sql/opt/xform/testdata/external/tpce @@ -6181,12 +6181,12 @@ upsert trade_history ├── upsert-mapping: │ ├── column1:6 => trade_history.th_t_id:1 │ ├── column2:7 => th_dts:2 - │ └── column3:8 => trade_history.th_st_id:3 + │ └── th_st_id_cast:9 => trade_history.th_st_id:3 ├── input binding: &1 ├── cardinality: [0 - 0] ├── volatile, mutations ├── values - │ ├── columns: column1:6!null column2:7!null column3:8!null + │ ├── columns: column1:6!null column2:7!null th_st_id_cast:9!null │ ├── cardinality: [3 - 3] │ ├── (0, '2020-06-17 22:27:42.148484', 'SBMT') │ ├── (0, '2020-06-20 22:27:42.148484', 'PNDG') @@ -6194,26 +6194,26 @@ upsert trade_history └── f-k-checks ├── f-k-checks-item: trade_history(th_t_id) -> trade(t_id) │ └── anti-join (lookup trade) - │ ├── columns: th_t_id:9!null - │ ├── key columns: [9] = [10] + │ ├── columns: th_t_id:10!null + │ ├── key columns: [10] = [11] │ ├── lookup columns are key │ ├── cardinality: [0 - 3] │ ├── with-scan &1 - │ │ ├── columns: th_t_id:9!null + │ │ ├── columns: th_t_id:10!null │ │ ├── mapping: - │ │ │ └── column1:6 => th_t_id:9 + │ │ │ └── column1:6 => th_t_id:10 │ │ └── cardinality: [3 - 3] │ └── filters (true) └── f-k-checks-item: trade_history(th_st_id) -> status_type(st_id) └── anti-join (lookup status_type) - ├── columns: th_st_id:27!null - ├── key columns: [27] = [28] + ├── columns: th_st_id:28!null + ├── key columns: [28] = [29] ├── lookup columns are key ├── cardinality: [0 - 3] ├── with-scan &1 - │ ├── columns: th_st_id:27!null + │ ├── columns: th_st_id:28!null │ ├── mapping: - │ │ └── column3:8 => th_st_id:27 + │ │ └── th_st_id_cast:9 => th_st_id:28 │ └── cardinality: [3 - 3] └── filters (true) diff --git a/pkg/sql/opt/xform/testdata/external/tpce-no-stats b/pkg/sql/opt/xform/testdata/external/tpce-no-stats index d12c296839eb..bcd1815a077f 100644 --- a/pkg/sql/opt/xform/testdata/external/tpce-no-stats +++ b/pkg/sql/opt/xform/testdata/external/tpce-no-stats @@ -6204,12 +6204,12 @@ upsert trade_history ├── upsert-mapping: │ ├── column1:6 => trade_history.th_t_id:1 │ ├── column2:7 => th_dts:2 - │ └── column3:8 => trade_history.th_st_id:3 + │ └── th_st_id_cast:9 => trade_history.th_st_id:3 ├── input binding: &1 ├── cardinality: [0 - 0] ├── volatile, mutations ├── values - │ ├── columns: column1:6!null column2:7!null column3:8!null + │ ├── columns: column1:6!null column2:7!null th_st_id_cast:9!null │ ├── cardinality: [3 - 3] │ ├── (0, '2020-06-17 22:27:42.148484', 'SBMT') │ ├── (0, '2020-06-20 22:27:42.148484', 'PNDG') @@ -6217,26 +6217,26 @@ upsert trade_history └── f-k-checks ├── f-k-checks-item: trade_history(th_t_id) -> trade(t_id) │ └── anti-join (lookup trade) - │ ├── columns: th_t_id:9!null - │ ├── key columns: [9] = [10] + │ ├── columns: th_t_id:10!null + │ ├── key columns: [10] = [11] │ ├── lookup columns are key │ ├── cardinality: [0 - 3] │ ├── with-scan &1 - │ │ ├── columns: th_t_id:9!null + │ │ ├── columns: th_t_id:10!null │ │ ├── mapping: - │ │ │ └── column1:6 => th_t_id:9 + │ │ │ └── column1:6 => th_t_id:10 │ │ └── cardinality: [3 - 3] │ └── filters (true) └── f-k-checks-item: trade_history(th_st_id) -> status_type(st_id) └── anti-join (lookup status_type) - ├── columns: th_st_id:27!null - ├── key columns: [27] = [28] + ├── columns: th_st_id:28!null + ├── key columns: [28] = [29] ├── lookup columns are key ├── cardinality: [0 - 3] ├── with-scan &1 - │ ├── columns: th_st_id:27!null + │ ├── columns: th_st_id:28!null │ ├── mapping: - │ │ └── column3:8 => th_st_id:27 + │ │ └── th_st_id_cast:9 => th_st_id:28 │ └── cardinality: [3 - 3] └── filters (true) diff --git a/pkg/sql/opt/xform/testdata/external/trading b/pkg/sql/opt/xform/testdata/external/trading index ca67b237c730..ea28e3222e2a 100644 --- a/pkg/sql/opt/xform/testdata/external/trading +++ b/pkg/sql/opt/xform/testdata/external/trading @@ -714,7 +714,7 @@ project │ │ ├── stats: [rows=478.646617] │ │ ├── key: (3,5) │ │ ├── fd: ()-->(1,2,4,20,21), (3,5)-->(6,7) - │ │ ├── ordering: -3 opt(1,2,4) [actual: -3] + │ │ ├── ordering: -3 opt(1,2,4,20,21) [actual: -3] │ │ ├── limit hint: 100.00 │ │ ├── index-join transactiondetails │ │ │ ├── columns: transactiondetails.dealerid:1!null transactiondetails.isbuy:2!null transactiondate:3!null cardid:4!null quantity:5!null sellprice:6!null buyprice:7!null @@ -848,7 +848,7 @@ project │ │ │ │ ├── stats: [rows=19000] │ │ │ │ ├── key: (1) │ │ │ │ ├── fd: ()-->(42,43), (1)-->(2-6), (2,4,5)~~>(1,3,6) - │ │ │ │ ├── ordering: +1 + │ │ │ │ ├── ordering: +1 opt(42,43) [actual: +1] │ │ │ │ ├── select │ │ │ │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null │ │ │ │ │ ├── immutable @@ -1267,42 +1267,42 @@ VALUES (1, FALSE, '2020-03-01', 'the-account', 'the-customer', '70F03EB1-4F58-4C upsert transactions ├── columns: ├── arbiter indexes: transactionsprimarykey - ├── canary column: dealerid:17 - ├── fetch columns: dealerid:17 isbuy:18 date:19 accountname:20 customername:21 operationid:22 version:23 + ├── canary column: dealerid:19 + ├── fetch columns: dealerid:19 isbuy:20 date:21 accountname:22 customername:23 operationid:24 version:25 ├── insert-mapping: │ ├── column1:10 => dealerid:1 │ ├── column2:11 => isbuy:2 │ ├── column3:12 => date:3 - │ ├── column4:13 => accountname:4 - │ ├── column5:14 => customername:5 + │ ├── accountname_cast:16 => accountname:4 + │ ├── customername_cast:17 => customername:5 │ ├── column6:15 => operationid:6 - │ └── version_default:16 => version:7 + │ └── version_default:18 => version:7 ├── update-mapping: - │ ├── column4:13 => accountname:4 - │ ├── column5:14 => customername:5 + │ ├── accountname_cast:16 => accountname:4 + │ ├── customername_cast:17 => customername:5 │ └── column6:15 => operationid:6 ├── cardinality: [0 - 0] ├── volatile, mutations └── left-join (cross) - ├── columns: column1:10!null column2:11!null column3:12!null column4:13!null column5:14!null column6:15!null version_default:16 dealerid:17 isbuy:18 date:19 accountname:20 customername:21 operationid:22 version:23 + ├── columns: column1:10!null column2:11!null column3:12!null column6:15!null accountname_cast:16!null customername_cast:17!null version_default:18 dealerid:19 isbuy:20 date:21 accountname:22 customername:23 operationid:24 version:25 ├── cardinality: [1 - 1] ├── multiplicity: left-rows(exactly-one), right-rows(exactly-one) ├── volatile ├── key: () - ├── fd: ()-->(10-23) + ├── fd: ()-->(10-12,15-25) ├── values - │ ├── columns: column1:10!null column2:11!null column3:12!null column4:13!null column5:14!null column6:15!null version_default:16 + │ ├── columns: column1:10!null column2:11!null column3:12!null column6:15!null accountname_cast:16!null customername_cast:17!null version_default:18 │ ├── cardinality: [1 - 1] │ ├── volatile │ ├── key: () - │ ├── fd: ()-->(10-16) - │ └── (1, false, '2020-03-01 00:00:00+00:00', 'the-account', 'the-customer', '70f03eb1-4f58-4c26-b72d-c524a9d537dd', cluster_logical_timestamp()) + │ ├── fd: ()-->(10-12,15-18) + │ └── (1, false, '2020-03-01 00:00:00+00:00', '70f03eb1-4f58-4c26-b72d-c524a9d537dd', 'the-account', 'the-customer', cluster_logical_timestamp()) ├── scan transactions - │ ├── columns: dealerid:17!null isbuy:18!null date:19!null accountname:20!null customername:21!null operationid:22 version:23!null - │ ├── constraint: /17/18/19: [/1/false/'2020-03-01 00:00:00+00:00' - /1/false/'2020-03-01 00:00:00+00:00'] + │ ├── columns: dealerid:19!null isbuy:20!null date:21!null accountname:22!null customername:23!null operationid:24 version:25!null + │ ├── constraint: /19/20/21: [/1/false/'2020-03-01 00:00:00+00:00' - /1/false/'2020-03-01 00:00:00+00:00'] │ ├── cardinality: [0 - 1] │ ├── key: () - │ └── fd: ()-->(17-23) + │ └── fd: ()-->(19-25) └── filters (true) # Insert structured data (c=CardId, q=Quantity, s=SellPrice, b=BuyPrice) @@ -1323,69 +1323,69 @@ FROM updates upsert transactiondetails ├── columns: ├── arbiter indexes: detailsprimarykey - ├── canary column: transactiondetails.dealerid:23 - ├── fetch columns: transactiondetails.dealerid:23 transactiondetails.isbuy:24 transactiondetails.transactiondate:25 transactiondetails.cardid:26 quantity:27 transactiondetails.sellprice:28 transactiondetails.buyprice:29 transactiondetails.version:30 + ├── canary column: transactiondetails.dealerid:21 + ├── fetch columns: transactiondetails.dealerid:21 transactiondetails.isbuy:22 transactiondetails.transactiondate:23 transactiondetails.cardid:24 quantity:25 sellprice:26 buyprice:27 transactiondetails.version:28 ├── insert-mapping: │ ├── "?column?":13 => transactiondetails.dealerid:2 │ ├── bool:14 => transactiondetails.isbuy:3 │ ├── current_timestamp:15 => transactiondetails.transactiondate:4 │ ├── int8:16 => transactiondetails.cardid:5 │ ├── int8:17 => quantity:6 - │ ├── sellprice:21 => transactiondetails.sellprice:7 - │ ├── buyprice:22 => transactiondetails.buyprice:8 + │ ├── numeric:18 => sellprice:7 + │ ├── numeric:19 => buyprice:8 │ └── version_default:20 => transactiondetails.version:9 ├── update-mapping: - │ ├── sellprice:21 => transactiondetails.sellprice:7 - │ └── buyprice:22 => transactiondetails.buyprice:8 + │ ├── numeric:18 => sellprice:7 + │ └── numeric:19 => buyprice:8 ├── input binding: &2 ├── cardinality: [0 - 0] ├── volatile, mutations ├── project - │ ├── columns: upsert_dealerid:33 upsert_isbuy:34 upsert_transactiondate:35 upsert_cardid:36 "?column?":13!null bool:14!null current_timestamp:15!null int8:16!null int8:17!null version_default:20 sellprice:21 buyprice:22 transactiondetails.dealerid:23 transactiondetails.isbuy:24 transactiondetails.transactiondate:25 transactiondetails.cardid:26 quantity:27 transactiondetails.sellprice:28 transactiondetails.buyprice:29 transactiondetails.version:30 + │ ├── columns: upsert_dealerid:31 upsert_isbuy:32 upsert_transactiondate:33 upsert_cardid:34 "?column?":13!null bool:14!null current_timestamp:15!null int8:16!null int8:17!null numeric:18!null numeric:19!null version_default:20 transactiondetails.dealerid:21 transactiondetails.isbuy:22 transactiondetails.transactiondate:23 transactiondetails.cardid:24 quantity:25 sellprice:26 buyprice:27 transactiondetails.version:28 │ ├── cardinality: [1 - 2] │ ├── volatile │ ├── key: (16,17) - │ ├── fd: ()-->(13-15), (16,17)-->(20-30), (23-27)-->(28-30), (23)-->(33), (23,24)-->(34), (23,25)-->(35), (16,23,26)-->(36) + │ ├── fd: ()-->(13-15), (16,17)-->(18-28), (21-25)-->(26-28), (21)-->(31), (21,22)-->(32), (21,23)-->(33), (16,21,24)-->(34) │ ├── left-join (lookup transactiondetails) - │ │ ├── columns: "?column?":13!null bool:14!null current_timestamp:15!null int8:16!null int8:17!null version_default:20 sellprice:21 buyprice:22 transactiondetails.dealerid:23 transactiondetails.isbuy:24 transactiondetails.transactiondate:25 transactiondetails.cardid:26 quantity:27 transactiondetails.sellprice:28 transactiondetails.buyprice:29 transactiondetails.version:30 - │ │ ├── key columns: [13 14 15 16 17] = [23 24 25 26 27] + │ │ ├── columns: "?column?":13!null bool:14!null current_timestamp:15!null int8:16!null int8:17!null numeric:18!null numeric:19!null version_default:20 transactiondetails.dealerid:21 transactiondetails.isbuy:22 transactiondetails.transactiondate:23 transactiondetails.cardid:24 quantity:25 sellprice:26 buyprice:27 transactiondetails.version:28 + │ │ ├── key columns: [13 14 15 16 17] = [21 22 23 24 25] │ │ ├── lookup columns are key │ │ ├── cardinality: [1 - 2] │ │ ├── volatile │ │ ├── key: (16,17) - │ │ ├── fd: ()-->(13-15), (16,17)-->(20-30), (23-27)-->(28-30) + │ │ ├── fd: ()-->(13-15), (16,17)-->(18-28), (21-25)-->(26-28) │ │ ├── ensure-upsert-distinct-on - │ │ │ ├── columns: "?column?":13!null bool:14!null current_timestamp:15!null int8:16!null int8:17!null version_default:20 sellprice:21 buyprice:22 + │ │ │ ├── columns: "?column?":13!null bool:14!null current_timestamp:15!null int8:16!null int8:17!null numeric:18!null numeric:19!null version_default:20 │ │ │ ├── grouping columns: int8:16!null int8:17!null │ │ │ ├── error: "UPSERT or INSERT...ON CONFLICT command cannot affect row a second time" │ │ │ ├── cardinality: [1 - 2] │ │ │ ├── volatile │ │ │ ├── key: (16,17) - │ │ │ ├── fd: ()-->(13-15), (16,17)-->(13-15,20-22) + │ │ │ ├── fd: ()-->(13-15), (16,17)-->(13-15,18-20) │ │ │ ├── project - │ │ │ │ ├── columns: sellprice:21 buyprice:22 version_default:20 "?column?":13!null bool:14!null current_timestamp:15!null int8:16!null int8:17!null + │ │ │ │ ├── columns: version_default:20 "?column?":13!null bool:14!null current_timestamp:15!null int8:16!null int8:17!null numeric:18!null numeric:19!null │ │ │ │ ├── cardinality: [2 - 2] │ │ │ │ ├── volatile │ │ │ │ ├── fd: ()-->(13-15) │ │ │ │ ├── values - │ │ │ │ │ ├── columns: detail_b:60!null detail_c:61!null detail_q:62!null detail_s:63!null + │ │ │ │ │ ├── columns: detail_b:58!null detail_c:59!null detail_q:60!null detail_s:61!null │ │ │ │ │ ├── cardinality: [2 - 2] │ │ │ │ │ ├── ('2.29', '49833', '4', '2.89') │ │ │ │ │ └── ('17.59', '29483', '2', '18.93') │ │ │ │ └── projections - │ │ │ │ ├── crdb_internal.round_decimal_values(detail_s:63::STRING::DECIMAL(10,4), 4) [as=sellprice:21, outer=(63), immutable] - │ │ │ │ ├── crdb_internal.round_decimal_values(detail_b:60::STRING::DECIMAL(10,4), 4) [as=buyprice:22, outer=(60), immutable] │ │ │ │ ├── cluster_logical_timestamp() [as=version_default:20, volatile] │ │ │ │ ├── 1 [as="?column?":13] │ │ │ │ ├── false [as=bool:14] │ │ │ │ ├── '2017-05-10 13:00:00+00:00' [as=current_timestamp:15] - │ │ │ │ ├── detail_c:61::STRING::INT8 [as=int8:16, outer=(61), immutable] - │ │ │ │ └── detail_q:62::STRING::INT8 [as=int8:17, outer=(62), immutable] + │ │ │ │ ├── detail_c:59::STRING::INT8 [as=int8:16, outer=(59), immutable] + │ │ │ │ ├── detail_q:60::STRING::INT8 [as=int8:17, outer=(60), immutable] + │ │ │ │ ├── detail_s:61::STRING::DECIMAL(10,4) [as=numeric:18, outer=(61), immutable] + │ │ │ │ └── detail_b:58::STRING::DECIMAL(10,4) [as=numeric:19, outer=(58), immutable] │ │ │ └── aggregations - │ │ │ ├── first-agg [as=sellprice:21, outer=(21)] - │ │ │ │ └── sellprice:21 - │ │ │ ├── first-agg [as=buyprice:22, outer=(22)] - │ │ │ │ └── buyprice:22 + │ │ │ ├── first-agg [as=numeric:18, outer=(18)] + │ │ │ │ └── numeric:18 + │ │ │ ├── first-agg [as=numeric:19, outer=(19)] + │ │ │ │ └── numeric:19 │ │ │ ├── first-agg [as=version_default:20, outer=(20)] │ │ │ │ └── version_default:20 │ │ │ ├── const-agg [as="?column?":13, outer=(13)] @@ -1396,35 +1396,35 @@ upsert transactiondetails │ │ │ └── current_timestamp:15 │ │ └── filters (true) │ └── projections - │ ├── CASE WHEN transactiondetails.dealerid:23 IS NULL THEN "?column?":13 ELSE transactiondetails.dealerid:23 END [as=upsert_dealerid:33, outer=(13,23)] - │ ├── CASE WHEN transactiondetails.dealerid:23 IS NULL THEN bool:14 ELSE transactiondetails.isbuy:24 END [as=upsert_isbuy:34, outer=(14,23,24)] - │ ├── CASE WHEN transactiondetails.dealerid:23 IS NULL THEN current_timestamp:15 ELSE transactiondetails.transactiondate:25 END [as=upsert_transactiondate:35, outer=(15,23,25)] - │ └── CASE WHEN transactiondetails.dealerid:23 IS NULL THEN int8:16 ELSE transactiondetails.cardid:26 END [as=upsert_cardid:36, outer=(16,23,26)] + │ ├── CASE WHEN transactiondetails.dealerid:21 IS NULL THEN "?column?":13 ELSE transactiondetails.dealerid:21 END [as=upsert_dealerid:31, outer=(13,21)] + │ ├── CASE WHEN transactiondetails.dealerid:21 IS NULL THEN bool:14 ELSE transactiondetails.isbuy:22 END [as=upsert_isbuy:32, outer=(14,21,22)] + │ ├── CASE WHEN transactiondetails.dealerid:21 IS NULL THEN current_timestamp:15 ELSE transactiondetails.transactiondate:23 END [as=upsert_transactiondate:33, outer=(15,21,23)] + │ └── CASE WHEN transactiondetails.dealerid:21 IS NULL THEN int8:16 ELSE transactiondetails.cardid:24 END [as=upsert_cardid:34, outer=(16,21,24)] └── f-k-checks ├── f-k-checks-item: transactiondetails(dealerid,isbuy,transactiondate) -> transactions(dealerid,isbuy,date) │ └── anti-join (lookup transactions) - │ ├── columns: dealerid:39 isbuy:40 transactiondate:41 - │ ├── key columns: [39 40 41] = [42 43 44] + │ ├── columns: dealerid:37 isbuy:38 transactiondate:39 + │ ├── key columns: [37 38 39] = [40 41 42] │ ├── lookup columns are key │ ├── cardinality: [0 - 2] │ ├── with-scan &2 - │ │ ├── columns: dealerid:39 isbuy:40 transactiondate:41 + │ │ ├── columns: dealerid:37 isbuy:38 transactiondate:39 │ │ ├── mapping: - │ │ │ ├── upsert_dealerid:33 => dealerid:39 - │ │ │ ├── upsert_isbuy:34 => isbuy:40 - │ │ │ └── upsert_transactiondate:35 => transactiondate:41 + │ │ │ ├── upsert_dealerid:31 => dealerid:37 + │ │ │ ├── upsert_isbuy:32 => isbuy:38 + │ │ │ └── upsert_transactiondate:33 => transactiondate:39 │ │ └── cardinality: [1 - 2] │ └── filters (true) └── f-k-checks-item: transactiondetails(cardid) -> cards(id) └── anti-join (lookup cards) - ├── columns: cardid:51 - ├── key columns: [51] = [52] + ├── columns: cardid:49 + ├── key columns: [49] = [50] ├── lookup columns are key ├── cardinality: [0 - 2] ├── with-scan &2 - │ ├── columns: cardid:51 + │ ├── columns: cardid:49 │ ├── mapping: - │ │ └── upsert_cardid:36 => cardid:51 + │ │ └── upsert_cardid:34 => cardid:49 │ └── cardinality: [1 - 2] └── filters (true) diff --git a/pkg/sql/opt/xform/testdata/external/trading-mutation b/pkg/sql/opt/xform/testdata/external/trading-mutation index 1a04ce2ec3d7..005cf25428fb 100644 --- a/pkg/sql/opt/xform/testdata/external/trading-mutation +++ b/pkg/sql/opt/xform/testdata/external/trading-mutation @@ -718,7 +718,7 @@ project │ │ ├── stats: [rows=478.646617] │ │ ├── key: (3,5) │ │ ├── fd: ()-->(1,2,4,24,25), (3,5)-->(6,7) - │ │ ├── ordering: -3 opt(1,2,4) [actual: -3] + │ │ ├── ordering: -3 opt(1,2,4,24,25) [actual: -3] │ │ ├── limit hint: 100.00 │ │ ├── index-join transactiondetails │ │ │ ├── columns: transactiondetails.dealerid:1!null transactiondetails.isbuy:2!null transactiondate:3!null cardid:4!null quantity:5!null sellprice:6!null buyprice:7!null @@ -852,7 +852,7 @@ project │ │ │ │ ├── stats: [rows=19000] │ │ │ │ ├── key: (1) │ │ │ │ ├── fd: ()-->(48,49), (1)-->(2-6), (2,4,5)~~>(1,3,6) - │ │ │ │ ├── ordering: +1 + │ │ │ │ ├── ordering: +1 opt(48,49) [actual: +1] │ │ │ │ ├── select │ │ │ │ │ ├── columns: id:1!null name:2!null rarity:3 setname:4 number:5!null isfoil:6!null │ │ │ │ │ ├── immutable @@ -1272,44 +1272,44 @@ VALUES (1, FALSE, '2020-03-01', 'the-account', 'the-customer', '70F03EB1-4F58-4C upsert transactions ├── columns: ├── arbiter indexes: transactionsprimarykey - ├── canary column: dealerid:20 - ├── fetch columns: dealerid:20 isbuy:21 date:22 accountname:23 customername:24 operationid:25 version:26 olddate:27 extra:28 + ├── canary column: dealerid:22 + ├── fetch columns: dealerid:22 isbuy:23 date:24 accountname:25 customername:26 operationid:27 version:28 olddate:29 extra:30 ├── insert-mapping: │ ├── column1:12 => dealerid:1 │ ├── column2:13 => isbuy:2 │ ├── column3:14 => date:3 - │ ├── column4:15 => accountname:4 - │ ├── column5:16 => customername:5 + │ ├── accountname_cast:18 => accountname:4 + │ ├── customername_cast:19 => customername:5 │ ├── column6:17 => operationid:6 - │ ├── version_default:18 => version:7 - │ └── olddate_default:19 => olddate:8 + │ ├── version_default:20 => version:7 + │ └── olddate_default:21 => olddate:8 ├── update-mapping: - │ ├── column4:15 => accountname:4 - │ ├── column5:16 => customername:5 + │ ├── accountname_cast:18 => accountname:4 + │ ├── customername_cast:19 => customername:5 │ ├── column6:17 => operationid:6 - │ └── olddate_default:19 => olddate:8 + │ └── olddate_default:21 => olddate:8 ├── cardinality: [0 - 0] ├── volatile, mutations └── left-join (cross) - ├── columns: column1:12!null column2:13!null column3:14!null column4:15!null column5:16!null column6:17!null version_default:18 olddate_default:19!null dealerid:20 isbuy:21 date:22 accountname:23 customername:24 operationid:25 version:26 olddate:27 extra:28 + ├── columns: column1:12!null column2:13!null column3:14!null column6:17!null accountname_cast:18!null customername_cast:19!null version_default:20 olddate_default:21!null dealerid:22 isbuy:23 date:24 accountname:25 customername:26 operationid:27 version:28 olddate:29 extra:30 ├── cardinality: [1 - 1] ├── multiplicity: left-rows(exactly-one), right-rows(exactly-one) ├── volatile ├── key: () - ├── fd: ()-->(12-28) + ├── fd: ()-->(12-14,17-30) ├── values - │ ├── columns: column1:12!null column2:13!null column3:14!null column4:15!null column5:16!null column6:17!null version_default:18 olddate_default:19!null + │ ├── columns: column1:12!null column2:13!null column3:14!null column6:17!null accountname_cast:18!null customername_cast:19!null version_default:20 olddate_default:21!null │ ├── cardinality: [1 - 1] │ ├── volatile │ ├── key: () - │ ├── fd: ()-->(12-19) - │ └── (1, false, '2020-03-01 00:00:00+00:00', 'the-account', 'the-customer', '70f03eb1-4f58-4c26-b72d-c524a9d537dd', cluster_logical_timestamp(), '0001-01-01 00:00:00') + │ ├── fd: ()-->(12-14,17-21) + │ └── (1, false, '2020-03-01 00:00:00+00:00', '70f03eb1-4f58-4c26-b72d-c524a9d537dd', 'the-account', 'the-customer', cluster_logical_timestamp(), '0001-01-01 00:00:00') ├── scan transactions - │ ├── columns: dealerid:20!null isbuy:21!null date:22!null accountname:23!null customername:24!null operationid:25 version:26!null olddate:27 extra:28 - │ ├── constraint: /20/21/22: [/1/false/'2020-03-01 00:00:00+00:00' - /1/false/'2020-03-01 00:00:00+00:00'] + │ ├── columns: dealerid:22!null isbuy:23!null date:24!null accountname:25!null customername:26!null operationid:27 version:28!null olddate:29 extra:30 + │ ├── constraint: /22/23/24: [/1/false/'2020-03-01 00:00:00+00:00' - /1/false/'2020-03-01 00:00:00+00:00'] │ ├── cardinality: [0 - 1] │ ├── key: () - │ └── fd: ()-->(20-28) + │ └── fd: ()-->(22-30) └── filters (true) # Insert structured data (c=CardId, q=Quantity, s=SellPrice, b=BuyPrice) @@ -1330,76 +1330,76 @@ FROM updates upsert transactiondetails ├── columns: ├── arbiter indexes: detailsprimarykey - ├── canary column: transactiondetails.dealerid:27 - ├── fetch columns: transactiondetails.dealerid:27 transactiondetails.isbuy:28 transactiondetails.transactiondate:29 transactiondetails.cardid:30 quantity:31 transactiondetails.sellprice:32 transactiondetails.buyprice:33 transactiondetails.version:34 discount:35 transactiondetails.extra:36 + ├── canary column: transactiondetails.dealerid:25 + ├── fetch columns: transactiondetails.dealerid:25 transactiondetails.isbuy:26 transactiondetails.transactiondate:27 transactiondetails.cardid:28 quantity:29 sellprice:30 buyprice:31 transactiondetails.version:32 discount:33 transactiondetails.extra:34 ├── insert-mapping: │ ├── "?column?":15 => transactiondetails.dealerid:2 │ ├── bool:16 => transactiondetails.isbuy:3 │ ├── current_timestamp:17 => transactiondetails.transactiondate:4 │ ├── int8:18 => transactiondetails.cardid:5 │ ├── int8:19 => quantity:6 - │ ├── sellprice:24 => transactiondetails.sellprice:7 - │ ├── buyprice:25 => transactiondetails.buyprice:8 + │ ├── numeric:20 => sellprice:7 + │ ├── numeric:21 => buyprice:8 │ ├── version_default:22 => transactiondetails.version:9 - │ └── discount_default:26 => discount:10 + │ └── discount_cast:24 => discount:10 ├── update-mapping: - │ ├── sellprice:24 => transactiondetails.sellprice:7 - │ ├── buyprice:25 => transactiondetails.buyprice:8 - │ └── discount_default:26 => discount:10 + │ ├── numeric:20 => sellprice:7 + │ ├── numeric:21 => buyprice:8 + │ └── discount_cast:24 => discount:10 ├── input binding: &2 ├── cardinality: [0 - 0] ├── volatile, mutations ├── project - │ ├── columns: upsert_dealerid:39 upsert_isbuy:40 upsert_transactiondate:41 upsert_cardid:42 "?column?":15!null bool:16!null current_timestamp:17!null int8:18!null int8:19!null version_default:22 sellprice:24 buyprice:25 discount_default:26!null transactiondetails.dealerid:27 transactiondetails.isbuy:28 transactiondetails.transactiondate:29 transactiondetails.cardid:30 quantity:31 transactiondetails.sellprice:32 transactiondetails.buyprice:33 transactiondetails.version:34 discount:35 transactiondetails.extra:36 + │ ├── columns: upsert_dealerid:37 upsert_isbuy:38 upsert_transactiondate:39 upsert_cardid:40 "?column?":15!null bool:16!null current_timestamp:17!null int8:18!null int8:19!null numeric:20!null numeric:21!null version_default:22 discount_cast:24!null transactiondetails.dealerid:25 transactiondetails.isbuy:26 transactiondetails.transactiondate:27 transactiondetails.cardid:28 quantity:29 sellprice:30 buyprice:31 transactiondetails.version:32 discount:33 transactiondetails.extra:34 │ ├── cardinality: [1 - 2] │ ├── volatile │ ├── key: (18,19) - │ ├── fd: ()-->(15-17,26), (18,19)-->(22,24,25,27-36), (27-31)-->(32-36), (27)-->(39), (27,28)-->(40), (27,29)-->(41), (18,27,30)-->(42) + │ ├── fd: ()-->(15-17,24), (18,19)-->(20-22,25-34), (25-29)-->(30-34), (25)-->(37), (25,26)-->(38), (25,27)-->(39), (18,25,28)-->(40) │ ├── left-join (lookup transactiondetails) - │ │ ├── columns: "?column?":15!null bool:16!null current_timestamp:17!null int8:18!null int8:19!null version_default:22 sellprice:24 buyprice:25 discount_default:26!null transactiondetails.dealerid:27 transactiondetails.isbuy:28 transactiondetails.transactiondate:29 transactiondetails.cardid:30 quantity:31 transactiondetails.sellprice:32 transactiondetails.buyprice:33 transactiondetails.version:34 discount:35 transactiondetails.extra:36 - │ │ ├── key columns: [15 16 17 18 19] = [27 28 29 30 31] + │ │ ├── columns: "?column?":15!null bool:16!null current_timestamp:17!null int8:18!null int8:19!null numeric:20!null numeric:21!null version_default:22 discount_cast:24!null transactiondetails.dealerid:25 transactiondetails.isbuy:26 transactiondetails.transactiondate:27 transactiondetails.cardid:28 quantity:29 sellprice:30 buyprice:31 transactiondetails.version:32 discount:33 transactiondetails.extra:34 + │ │ ├── key columns: [15 16 17 18 19] = [25 26 27 28 29] │ │ ├── lookup columns are key │ │ ├── cardinality: [1 - 2] │ │ ├── volatile │ │ ├── key: (18,19) - │ │ ├── fd: ()-->(15-17,26), (18,19)-->(22,24,25,27-36), (27-31)-->(32-36) + │ │ ├── fd: ()-->(15-17,24), (18,19)-->(20-22,25-34), (25-29)-->(30-34) │ │ ├── ensure-upsert-distinct-on - │ │ │ ├── columns: "?column?":15!null bool:16!null current_timestamp:17!null int8:18!null int8:19!null version_default:22 sellprice:24 buyprice:25 discount_default:26!null + │ │ │ ├── columns: "?column?":15!null bool:16!null current_timestamp:17!null int8:18!null int8:19!null numeric:20!null numeric:21!null version_default:22 discount_cast:24!null │ │ │ ├── grouping columns: int8:18!null int8:19!null │ │ │ ├── error: "UPSERT or INSERT...ON CONFLICT command cannot affect row a second time" │ │ │ ├── cardinality: [1 - 2] │ │ │ ├── volatile │ │ │ ├── key: (18,19) - │ │ │ ├── fd: ()-->(15-17,26), (18,19)-->(15-17,22,24-26) + │ │ │ ├── fd: ()-->(15-17,24), (18,19)-->(15-17,20-22,24) │ │ │ ├── project - │ │ │ │ ├── columns: sellprice:24 buyprice:25 discount_default:26!null version_default:22 "?column?":15!null bool:16!null current_timestamp:17!null int8:18!null int8:19!null + │ │ │ │ ├── columns: discount_cast:24!null version_default:22 "?column?":15!null bool:16!null current_timestamp:17!null int8:18!null int8:19!null numeric:20!null numeric:21!null │ │ │ │ ├── cardinality: [2 - 2] │ │ │ │ ├── volatile - │ │ │ │ ├── fd: ()-->(15-17,26) + │ │ │ │ ├── fd: ()-->(15-17,24) │ │ │ │ ├── values - │ │ │ │ │ ├── columns: detail_b:68!null detail_c:69!null detail_q:70!null detail_s:71!null + │ │ │ │ │ ├── columns: detail_b:66!null detail_c:67!null detail_q:68!null detail_s:69!null │ │ │ │ │ ├── cardinality: [2 - 2] │ │ │ │ │ ├── ('2.29', '49833', '4', '2.89') │ │ │ │ │ └── ('17.59', '29483', '2', '18.93') │ │ │ │ └── projections - │ │ │ │ ├── crdb_internal.round_decimal_values(detail_s:71::STRING::DECIMAL(10,4), 4) [as=sellprice:24, outer=(71), immutable] - │ │ │ │ ├── crdb_internal.round_decimal_values(detail_b:68::STRING::DECIMAL(10,4), 4) [as=buyprice:25, outer=(68), immutable] - │ │ │ │ ├── 0.0000 [as=discount_default:26] + │ │ │ │ ├── 0.0000 [as=discount_cast:24] │ │ │ │ ├── cluster_logical_timestamp() [as=version_default:22, volatile] │ │ │ │ ├── 1 [as="?column?":15] │ │ │ │ ├── false [as=bool:16] │ │ │ │ ├── '2017-05-10 13:00:00+00:00' [as=current_timestamp:17] - │ │ │ │ ├── detail_c:69::STRING::INT8 [as=int8:18, outer=(69), immutable] - │ │ │ │ └── detail_q:70::STRING::INT8 [as=int8:19, outer=(70), immutable] + │ │ │ │ ├── detail_c:67::STRING::INT8 [as=int8:18, outer=(67), immutable] + │ │ │ │ ├── detail_q:68::STRING::INT8 [as=int8:19, outer=(68), immutable] + │ │ │ │ ├── detail_s:69::STRING::DECIMAL(10,4) [as=numeric:20, outer=(69), immutable] + │ │ │ │ └── detail_b:66::STRING::DECIMAL(10,4) [as=numeric:21, outer=(66), immutable] │ │ │ └── aggregations - │ │ │ ├── first-agg [as=sellprice:24, outer=(24)] - │ │ │ │ └── sellprice:24 - │ │ │ ├── first-agg [as=buyprice:25, outer=(25)] - │ │ │ │ └── buyprice:25 + │ │ │ ├── first-agg [as=numeric:20, outer=(20)] + │ │ │ │ └── numeric:20 + │ │ │ ├── first-agg [as=numeric:21, outer=(21)] + │ │ │ │ └── numeric:21 │ │ │ ├── first-agg [as=version_default:22, outer=(22)] │ │ │ │ └── version_default:22 - │ │ │ ├── first-agg [as=discount_default:26, outer=(26)] - │ │ │ │ └── discount_default:26 + │ │ │ ├── first-agg [as=discount_cast:24, outer=(24)] + │ │ │ │ └── discount_cast:24 │ │ │ ├── const-agg [as="?column?":15, outer=(15)] │ │ │ │ └── "?column?":15 │ │ │ ├── const-agg [as=bool:16, outer=(16)] @@ -1408,35 +1408,35 @@ upsert transactiondetails │ │ │ └── current_timestamp:17 │ │ └── filters (true) │ └── projections - │ ├── CASE WHEN transactiondetails.dealerid:27 IS NULL THEN "?column?":15 ELSE transactiondetails.dealerid:27 END [as=upsert_dealerid:39, outer=(15,27)] - │ ├── CASE WHEN transactiondetails.dealerid:27 IS NULL THEN bool:16 ELSE transactiondetails.isbuy:28 END [as=upsert_isbuy:40, outer=(16,27,28)] - │ ├── CASE WHEN transactiondetails.dealerid:27 IS NULL THEN current_timestamp:17 ELSE transactiondetails.transactiondate:29 END [as=upsert_transactiondate:41, outer=(17,27,29)] - │ └── CASE WHEN transactiondetails.dealerid:27 IS NULL THEN int8:18 ELSE transactiondetails.cardid:30 END [as=upsert_cardid:42, outer=(18,27,30)] + │ ├── CASE WHEN transactiondetails.dealerid:25 IS NULL THEN "?column?":15 ELSE transactiondetails.dealerid:25 END [as=upsert_dealerid:37, outer=(15,25)] + │ ├── CASE WHEN transactiondetails.dealerid:25 IS NULL THEN bool:16 ELSE transactiondetails.isbuy:26 END [as=upsert_isbuy:38, outer=(16,25,26)] + │ ├── CASE WHEN transactiondetails.dealerid:25 IS NULL THEN current_timestamp:17 ELSE transactiondetails.transactiondate:27 END [as=upsert_transactiondate:39, outer=(17,25,27)] + │ └── CASE WHEN transactiondetails.dealerid:25 IS NULL THEN int8:18 ELSE transactiondetails.cardid:28 END [as=upsert_cardid:40, outer=(18,25,28)] └── f-k-checks ├── f-k-checks-item: transactiondetails(dealerid,isbuy,transactiondate) -> transactions(dealerid,isbuy,date) │ └── anti-join (lookup transactions) - │ ├── columns: dealerid:45 isbuy:46 transactiondate:47 - │ ├── key columns: [45 46 47] = [48 49 50] + │ ├── columns: dealerid:43 isbuy:44 transactiondate:45 + │ ├── key columns: [43 44 45] = [46 47 48] │ ├── lookup columns are key │ ├── cardinality: [0 - 2] │ ├── with-scan &2 - │ │ ├── columns: dealerid:45 isbuy:46 transactiondate:47 + │ │ ├── columns: dealerid:43 isbuy:44 transactiondate:45 │ │ ├── mapping: - │ │ │ ├── upsert_dealerid:39 => dealerid:45 - │ │ │ ├── upsert_isbuy:40 => isbuy:46 - │ │ │ └── upsert_transactiondate:41 => transactiondate:47 + │ │ │ ├── upsert_dealerid:37 => dealerid:43 + │ │ │ ├── upsert_isbuy:38 => isbuy:44 + │ │ │ └── upsert_transactiondate:39 => transactiondate:45 │ │ └── cardinality: [1 - 2] │ └── filters (true) └── f-k-checks-item: transactiondetails(cardid) -> cards(id) └── anti-join (lookup cards) - ├── columns: cardid:59 - ├── key columns: [59] = [60] + ├── columns: cardid:57 + ├── key columns: [57] = [58] ├── lookup columns are key ├── cardinality: [0 - 2] ├── with-scan &2 - │ ├── columns: cardid:59 + │ ├── columns: cardid:57 │ ├── mapping: - │ │ └── upsert_cardid:42 => cardid:59 + │ │ └── upsert_cardid:40 => cardid:57 │ └── cardinality: [1 - 2] └── filters (true) diff --git a/pkg/sql/opt/xform/testdata/physprops/distribution b/pkg/sql/opt/xform/testdata/physprops/distribution new file mode 100644 index 000000000000..258044d744ac --- /dev/null +++ b/pkg/sql/opt/xform/testdata/physprops/distribution @@ -0,0 +1,271 @@ +# Tests for distribution property with a non-partitioned table. + +exec-ddl +CREATE TABLE abc ( + a INT PRIMARY KEY, + b INT, + c STRING, + UNIQUE INDEX bc (b, c) +) +---- + +exec-ddl +ALTER TABLE abc CONFIGURE ZONE USING constraints='[+region=us,+dc=central,+rack=1]' +---- + +exec-ddl +ALTER INDEX abc@bc CONFIGURE ZONE USING constraints='[+region=us,-region=eu,+region=ap]' +---- + +opt locality=(region=us) +SELECT a FROM abc +---- +scan abc + ├── columns: a:1!null + ├── key: (1) + └── distribution: us + +opt locality=(region=eu) +SELECT a FROM abc +---- +distribute + ├── columns: a:1!null + ├── key: (1) + ├── distribution: eu + ├── input distribution: us + └── scan abc + ├── columns: a:1!null + └── key: (1) + +opt locality=(region=us) +SELECT a FROM abc WHERE b = 1 +---- +distribute + ├── columns: a:1!null + ├── key: (1) + ├── distribution: us + ├── input distribution: ap,us + └── project + ├── columns: a:1!null + ├── key: (1) + └── scan abc@bc + ├── columns: a:1!null b:2!null + ├── constraint: /2/3: [/1 - /1] + ├── key: (1) + └── fd: ()-->(2) + +opt locality=(region=us) +SELECT * FROM abc WHERE a > 10 +---- +scan abc + ├── columns: a:1!null b:2 c:3 + ├── constraint: /1: [/11 - ] + ├── key: (1) + ├── fd: (1)-->(2,3), (2,3)~~>(1) + └── distribution: us + +opt locality=(region=ap) +SELECT * FROM abc WHERE a > 10 +---- +distribute + ├── columns: a:1!null b:2 c:3 + ├── key: (1) + ├── fd: (1)-->(2,3), (2,3)~~>(1) + ├── distribution: ap + ├── input distribution: us + └── scan abc + ├── columns: a:1!null b:2 c:3 + ├── constraint: /1: [/11 - ] + ├── key: (1) + └── fd: (1)-->(2,3), (2,3)~~>(1) + +# Combined with sorting. +opt locality=(region=ap) +SELECT * FROM abc WHERE a > 10 ORDER BY c +---- +distribute + ├── columns: a:1!null b:2 c:3 + ├── key: (1) + ├── fd: (1)-->(2,3), (2,3)~~>(1) + ├── ordering: +3 + ├── distribution: ap + ├── input distribution: us + └── sort + ├── columns: a:1!null b:2 c:3 + ├── key: (1) + ├── fd: (1)-->(2,3), (2,3)~~>(1) + ├── ordering: +3 + └── scan abc + ├── columns: a:1!null b:2 c:3 + ├── constraint: /1: [/11 - ] + ├── key: (1) + └── fd: (1)-->(2,3), (2,3)~~>(1) + + +# Tests for distribution property with a partitioned table. + +exec-ddl +CREATE TABLE abc_part ( + r STRING NOT NULL CHECK (r IN ('east', 'west', 'central')), + t INT NOT NULL CHECK (t IN (1, 2, 3)), + a INT PRIMARY KEY, + b INT, + c INT, + d INT, + UNIQUE WITHOUT INDEX (c), + UNIQUE INDEX c_idx (r, t, c) PARTITION BY LIST (r, t) ( + PARTITION east VALUES IN (('east', 1), ('east', 2)), + PARTITION west VALUES IN (('west', DEFAULT)), + PARTITION default VALUES IN (DEFAULT) + ), + INDEX d_idx (r, d) PARTITION BY LIST (r) ( + PARTITION east VALUES IN (('east')), + PARTITION west VALUES IN (('west')), + PARTITION central VALUES IN (('central')) + ) +) +---- + +exec-ddl +ALTER PARTITION "east" OF INDEX abc_part@c_idx CONFIGURE ZONE USING + num_voters = 5, + voter_constraints = '{+region=east: 2}', + lease_preferences = '[[+region=east]]' +---- + +exec-ddl +ALTER PARTITION "west" OF INDEX abc_part@c_idx CONFIGURE ZONE USING + num_voters = 5, + voter_constraints = '{+region=west: 2}', + lease_preferences = '[[+region=west]]' +---- + +exec-ddl +ALTER PARTITION "default" OF INDEX abc_part@c_idx CONFIGURE ZONE USING + num_voters = 5, + lease_preferences = '[[+region=central]]'; +---- + +exec-ddl +ALTER PARTITION "east" OF INDEX abc_part@d_idx CONFIGURE ZONE USING + num_voters = 5, + voter_constraints = '{+region=east: 2}', + lease_preferences = '[[+region=east]]' +---- + +exec-ddl +ALTER PARTITION "west" OF INDEX abc_part@d_idx CONFIGURE ZONE USING + num_voters = 5, + voter_constraints = '{+region=west: 2}', + lease_preferences = '[[+region=west]]'; +---- + +exec-ddl +ALTER PARTITION "central" OF INDEX abc_part@d_idx CONFIGURE ZONE USING + num_voters = 5, + voter_constraints = '{+region=central: 2}', + lease_preferences = '[[+region=central]]'; +---- + +opt locality=(region=east) +SELECT a FROM abc_part WHERE r = 'east' AND t = 1 +---- +project + ├── columns: a:3!null + ├── key: (3) + ├── distribution: east + └── scan abc_part@c_idx + ├── columns: r:1!null t:2!null a:3!null + ├── constraint: /1/2/5: [/'east'/1 - /'east'/1] + ├── key: (3) + ├── fd: ()-->(1,2) + └── distribution: east + +opt locality=(region=west) +SELECT a FROM abc_part WHERE r = 'east' AND t = 1 +---- +distribute + ├── columns: a:3!null + ├── key: (3) + ├── distribution: west + ├── input distribution: east + └── project + ├── columns: a:3!null + ├── key: (3) + └── scan abc_part@c_idx + ├── columns: r:1!null t:2!null a:3!null + ├── constraint: /1/2/5: [/'east'/1 - /'east'/1] + ├── key: (3) + └── fd: ()-->(1,2) + +opt locality=(region=west) +SELECT a FROM abc_part WHERE r IN ('east', 'west') AND d = 10 +---- +distribute + ├── columns: a:3!null + ├── key: (3) + ├── distribution: west + ├── input distribution: east,west + └── project + ├── columns: a:3!null + ├── key: (3) + └── scan abc_part@d_idx + ├── columns: r:1!null a:3!null d:6!null + ├── constraint: /1/6/3 + │ ├── [/'east'/10 - /'east'/10] + │ └── [/'west'/10 - /'west'/10] + ├── key: (3) + └── fd: ()-->(6), (3)-->(1) + +opt locality=(region=east) +SELECT a FROM abc_part WHERE d = 10 +---- +distribute + ├── columns: a:3!null + ├── key: (3) + ├── distribution: east + ├── input distribution: central,east,west + └── project + ├── columns: a:3!null + ├── key: (3) + └── scan abc_part@d_idx + ├── columns: a:3!null d:6!null + ├── constraint: /1/6/3 + │ ├── [/'central'/10 - /'central'/10] + │ ├── [/'east'/10 - /'east'/10] + │ └── [/'west'/10 - /'west'/10] + ├── key: (3) + └── fd: ()-->(6) + +# Combined with sorting. +opt locality=(region=east) +SELECT a FROM abc_part WHERE d = 10 ORDER BY c +---- +distribute + ├── columns: a:3!null [hidden: c:5] + ├── key: (3) + ├── fd: (3)-->(5), (5)~~>(3) + ├── ordering: +5 + ├── distribution: east + ├── input distribution: central,east,west + └── sort + ├── columns: a:3!null c:5 + ├── key: (3) + ├── fd: (3)-->(5), (5)~~>(3) + ├── ordering: +5 + └── project + ├── columns: a:3!null c:5 + ├── key: (3) + ├── fd: (3)-->(5), (5)~~>(3) + └── index-join abc_part + ├── columns: a:3!null c:5 d:6!null + ├── key: (3) + ├── fd: ()-->(6), (3)-->(5), (5)~~>(3) + └── scan abc_part@d_idx + ├── columns: a:3!null d:6!null + ├── constraint: /1/6/3 + │ ├── [/'central'/10 - /'central'/10] + │ ├── [/'east'/10 - /'east'/10] + │ └── [/'west'/10 - /'west'/10] + ├── key: (3) + └── fd: ()-->(6) diff --git a/pkg/sql/opt/xform/testdata/physprops/ordering b/pkg/sql/opt/xform/testdata/physprops/ordering index baeeeeb2a3cb..98678c57e8ee 100644 --- a/pkg/sql/opt/xform/testdata/physprops/ordering +++ b/pkg/sql/opt/xform/testdata/physprops/ordering @@ -2398,7 +2398,7 @@ distinct-on ├── project │ ├── columns: "lookup_join_const_col_@9":14!null a:1!null b:5!null │ ├── fd: ()-->(14), (1)==(5), (5)==(1) - │ ├── ordering: +(1|5) [actual: +1] + │ ├── ordering: +(1|5) opt(14) [actual: +1] │ ├── inner-join (lookup t44469_b@t44469_b_b_idx) │ │ ├── columns: a:1!null b:5!null │ │ ├── flags: force lookup join (into right side) @@ -2448,3 +2448,113 @@ project │ └── k:1 > 0 [outer=(1)] └── projections └── 1 [as="?column?":6] + +# Regression test for #73968. Lookup join needs to simplify the ordering +# required of its child. +exec-ddl +CREATE TABLE t73968 ( + k INT PRIMARY KEY, + name STRING, + x STRING AS (CAST(k AS STRING)) VIRTUAL, + y STRING AS (lower(name)) VIRTUAL, + UNIQUE (y), + UNIQUE (x) +); +---- + +exec-ddl +ALTER TABLE t73968 INJECT STATISTICS e'[ + { + "columns": ["k"], + "created_at": "2000-01-01 00:00:00+00:00", + "distinct_count": 1000000, + "name": "__auto__", + "null_count": 0, + "row_count": 1000000000 + } +]'; +---- + +opt +SELECT + t2.crdb_internal_mvcc_timestamp +FROM + t73968 AS t1 JOIN t73968 AS t2 ON t1.name = t2.name + AND t1.y = t2.y + AND t1.x = t2.y + AND t1.k = t2.k + AND t1.x = t2.x +ORDER BY + t2.x, t2.k +LIMIT + 56 +---- +project + ├── columns: crdb_internal_mvcc_timestamp:11 [hidden: k:7!null x:9!null] + ├── cardinality: [0 - 56] + ├── immutable + ├── key: (7) + ├── fd: (7)-->(9,11) + ├── ordering: +9,+7 [actual: +9] + └── limit + ├── columns: k:1!null name:2!null x:3!null k:7!null name:8!null x:9!null crdb_internal_mvcc_timestamp:11 + ├── internal-ordering: +(3|9),+(1|7) + ├── cardinality: [0 - 56] + ├── immutable + ├── key: (7) + ├── fd: (1)-->(2,3), (7)-->(8,9,11), (2)==(8), (8)==(2), (1)==(7), (7)==(1), (3)==(9), (9)==(3) + ├── ordering: +(3|9),+(1|7) [actual: +3] + ├── inner-join (lookup t73968) + │ ├── columns: k:1!null name:2!null x:3!null k:7!null name:8!null x:9!null crdb_internal_mvcc_timestamp:11 + │ ├── key columns: [7] = [7] + │ ├── lookup columns are key + │ ├── immutable + │ ├── key: (7) + │ ├── fd: (1)-->(2,3), (7)-->(8,9,11), (2)==(8), (8)==(2), (1)==(7), (7)==(1), (3)==(9), (9)==(3) + │ ├── ordering: +(3|9),+(1|7) [actual: +3] + │ ├── limit hint: 56.00 + │ ├── inner-join (lookup t73968@t73968_x_key) + │ │ ├── columns: k:1!null name:2 x:3!null k:7!null x:9!null + │ │ ├── key columns: [3] = [9] + │ │ ├── lookup columns are key + │ │ ├── immutable + │ │ ├── key: (7) + │ │ ├── fd: (1)-->(2,3), (7)-->(9), (9)-->(7), (1)==(7), (7)==(1), (3)==(9), (9)==(3) + │ │ ├── ordering: +(3|9) [actual: +3] + │ │ ├── limit hint: 200.00 + │ │ ├── sort + │ │ │ ├── columns: k:1!null name:2 x:3!null + │ │ │ ├── immutable + │ │ │ ├── key: (1) + │ │ │ ├── fd: (1)-->(2,3) + │ │ │ ├── ordering: +3 + │ │ │ ├── limit hint: 2100.00 + │ │ │ └── project + │ │ │ ├── columns: x:3!null k:1!null name:2 + │ │ │ ├── immutable + │ │ │ ├── key: (1) + │ │ │ ├── fd: (1)-->(2,3) + │ │ │ ├── select + │ │ │ │ ├── columns: k:1!null name:2 + │ │ │ │ ├── immutable + │ │ │ │ ├── key: (1) + │ │ │ │ ├── fd: (1)-->(2) + │ │ │ │ ├── scan t73968 + │ │ │ │ │ ├── columns: k:1!null name:2 + │ │ │ │ │ ├── computed column expressions + │ │ │ │ │ │ ├── x:3 + │ │ │ │ │ │ │ └── k:1::STRING + │ │ │ │ │ │ └── y:4 + │ │ │ │ │ │ └── lower(name:2) + │ │ │ │ │ ├── key: (1) + │ │ │ │ │ └── fd: (1)-->(2) + │ │ │ │ └── filters + │ │ │ │ └── k:1::STRING = lower(name:2) [outer=(1,2), immutable] + │ │ │ └── projections + │ │ │ └── k:1::STRING [as=x:3, outer=(1), immutable] + │ │ └── filters + │ │ └── k:1 = k:7 [outer=(1,7), fd=(1)==(7), (7)==(1)] + │ └── filters + │ ├── name:2 = name:8 [outer=(2,8), fd=(2)==(8), (8)==(2)] + │ └── k:7::STRING = lower(name:8) [outer=(7,8), immutable] + └── 56 diff --git a/pkg/sql/opt/xform/testdata/rules/groupby b/pkg/sql/opt/xform/testdata/rules/groupby index d81b9808ffb1..82c631f19b24 100644 --- a/pkg/sql/opt/xform/testdata/rules/groupby +++ b/pkg/sql/opt/xform/testdata/rules/groupby @@ -2172,7 +2172,7 @@ memo (optimized, ~5KB, required=[presentation: u:2,v:3,w:4] [ordering: +4]) memo SELECT (SELECT w FROM kuvw WHERE v=1 AND x=u) FROM xyz ORDER BY x+1, x ---- -memo (optimized, ~32KB, required=[presentation: w:12] [ordering: +13,+1]) +memo (optimized, ~31KB, required=[presentation: w:12] [ordering: +13,+1]) ├── G1: (project G2 G3 x) │ ├── [presentation: w:12] [ordering: +13,+1] │ │ ├── best: (sort G1) @@ -2212,14 +2212,14 @@ memo (optimized, ~32KB, required=[presentation: w:12] [ordering: +13,+1]) ├── G10: (filters G19) ├── G11: (filters) ├── G12: (project G8 G20 x) - │ ├── [ordering: +1] + │ ├── [ordering: +1 opt(14)] │ │ ├── best: (project G8="[ordering: +1]" G20 x) │ │ └── cost: 1074.34 │ └── [] │ ├── best: (project G8 G20 x) │ └── cost: 1074.34 ├── G13: (project G8 G20 x) - │ ├── [ordering: +1] + │ ├── [ordering: +1 opt(15)] │ │ ├── best: (project G8="[ordering: +1]" G20 x) │ │ └── cost: 1074.34 │ └── [] @@ -2246,7 +2246,7 @@ memo (optimized, ~32KB, required=[presentation: w:12] [ordering: +13,+1]) memo INSERT INTO xyz SELECT v, w, 1.0 FROM kuvw ON CONFLICT (x) DO NOTHING ---- -memo (optimized, ~25KB, required=[]) +memo (optimized, ~24KB, required=[]) ├── G1: (insert G2 G3 G4 xyz) │ └── [] │ ├── best: (insert G2 G3 G4 xyz) diff --git a/pkg/sql/opt/xform/testdata/rules/join b/pkg/sql/opt/xform/testdata/rules/join index 5c8abe2fb3f3..7861a0a57767 100644 --- a/pkg/sql/opt/xform/testdata/rules/join +++ b/pkg/sql/opt/xform/testdata/rules/join @@ -182,7 +182,7 @@ inner-join (merge) memo expect=ReorderJoins SELECT * FROM abc, stu, xyz WHERE abc.a=stu.s AND stu.s=xyz.x ---- -memo (optimized, ~42KB, required=[presentation: a:1,b:2,c:3,s:7,t:8,u:9,x:12,y:13,z:14]) +memo (optimized, ~41KB, required=[presentation: a:1,b:2,c:3,s:7,t:8,u:9,x:12,y:13,z:14]) ├── G1: (inner-join G2 G3 G4) (inner-join G3 G2 G4) (inner-join G5 G6 G7) (inner-join G6 G5 G7) (inner-join G8 G9 G7) (inner-join G9 G8 G7) (merge-join G2 G3 G10 inner-join,+1,+7) (merge-join G3 G2 G10 inner-join,+7,+1) (lookup-join G3 G10 abc@ab,keyCols=[7],outCols=(1-3,7-9,12-14)) (merge-join G5 G6 G10 inner-join,+7,+12) (merge-join G6 G5 G10 inner-join,+12,+7) (lookup-join G6 G10 stu,keyCols=[12],outCols=(1-3,7-9,12-14)) (merge-join G8 G9 G10 inner-join,+7,+12) (lookup-join G8 G10 xyz@xy,keyCols=[7],outCols=(1-3,7-9,12-14)) (merge-join G9 G8 G10 inner-join,+12,+7) │ └── [presentation: a:1,b:2,c:3,s:7,t:8,u:9,x:12,y:13,z:14] │ ├── best: (merge-join G5="[ordering: +7]" G6="[ordering: +(1|12)]" G10 inner-join,+7,+12) @@ -244,7 +244,7 @@ memo (optimized, ~42KB, required=[presentation: a:1,b:2,c:3,s:7,t:8,u:9,x:12,y:1 memo SELECT * FROM abc, stu, xyz, pqr WHERE a = 1 ---- -memo (optimized, ~28KB, required=[presentation: a:1,b:2,c:3,s:7,t:8,u:9,x:12,y:13,z:14,p:18,q:19,r:20,s:21,t:22]) +memo (optimized, ~27KB, required=[presentation: a:1,b:2,c:3,s:7,t:8,u:9,x:12,y:13,z:14,p:18,q:19,r:20,s:21,t:22]) ├── G1: (inner-join G2 G3 G4) (inner-join G3 G2 G4) │ └── [presentation: a:1,b:2,c:3,s:7,t:8,u:9,x:12,y:13,z:14,p:18,q:19,r:20,s:21,t:22] │ ├── best: (inner-join G3 G2 G4) @@ -1823,7 +1823,7 @@ inner-join (lookup xyz@xy) memo SELECT * FROM stu AS l JOIN stu AS r ON (l.s, l.t, l.u) = (r.s, r.t, r.u) ---- -memo (optimized, ~18KB, required=[presentation: s:1,t:2,u:3,s:6,t:7,u:8]) +memo (optimized, ~17KB, required=[presentation: s:1,t:2,u:3,s:6,t:7,u:8]) ├── G1: (inner-join G2 G3 G4) (inner-join G3 G2 G4) (merge-join G2 G3 G5 inner-join,+1,+2,+3,+6,+7,+8) (merge-join G2 G3 G5 inner-join,+3,+2,+1,+8,+7,+6) (lookup-join G2 G5 stu [as=r],keyCols=[1 2 3],outCols=(1-3,6-8)) (lookup-join G2 G5 stu@uts [as=r],keyCols=[3 2 1],outCols=(1-3,6-8)) (merge-join G3 G2 G5 inner-join,+6,+7,+8,+1,+2,+3) (merge-join G3 G2 G5 inner-join,+8,+7,+6,+3,+2,+1) (lookup-join G3 G5 stu [as=l],keyCols=[6 7 8],outCols=(1-3,6-8)) (lookup-join G3 G5 stu@uts [as=l],keyCols=[8 7 6],outCols=(1-3,6-8)) │ └── [presentation: s:1,t:2,u:3,s:6,t:7,u:8] │ ├── best: (merge-join G2="[ordering: +1,+2,+3]" G3="[ordering: +6,+7,+8]" G5 inner-join,+1,+2,+3,+6,+7,+8) @@ -2030,7 +2030,7 @@ left-join (merge) memo SELECT * FROM abc JOIN xyz ON a=b ---- -memo (optimized, ~16KB, required=[presentation: a:1,b:2,c:3,x:7,y:8,z:9]) +memo (optimized, ~15KB, required=[presentation: a:1,b:2,c:3,x:7,y:8,z:9]) ├── G1: (inner-join G2 G3 G4) (inner-join G3 G2 G4) │ └── [presentation: a:1,b:2,c:3,x:7,y:8,z:9] │ ├── best: (inner-join G3 G2 G4) @@ -5346,7 +5346,7 @@ WHERE n.name = 'Upper West Side' OR n.name = 'Upper East Side' GROUP BY n.name, n.geom ---- -memo (optimized, ~33KB, required=[presentation: name:16,popn_per_sqkm:22]) +memo (optimized, ~32KB, required=[presentation: name:16,popn_per_sqkm:22]) ├── G1: (project G2 G3 name) │ └── [presentation: name:16,popn_per_sqkm:22] │ ├── best: (project G2 G3 name) @@ -8205,7 +8205,7 @@ SELECT 1 FROM (VALUES (1), (1)) JOIN (VALUES (1), (1), (1)) ON true UNION ALL SELECT 1 FROM (VALUES (1), (1), (1)) JOIN (VALUES (1), (1)) ON true ---- -memo (optimized, ~21KB, required=[presentation: ?column?:7]) +memo (optimized, ~20KB, required=[presentation: ?column?:7]) ├── G1: (union-all G2 G3) │ └── [presentation: ?column?:7] │ ├── best: (union-all G2 G3) @@ -8851,6 +8851,7 @@ anti-join (lookup abc_part) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4) + ├── distribution: east ├── anti-join (lookup abc_part) │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── lookup expression @@ -8861,6 +8862,7 @@ anti-join (lookup abc_part) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: east │ ├── locality-optimized-search │ │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -8868,6 +8870,7 @@ anti-join (lookup abc_part) │ │ ├── cardinality: [0 - 1] │ │ ├── key: () │ │ ├── fd: ()-->(1-4) + │ │ ├── distribution: east │ │ ├── scan def_part │ │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ │ ├── constraint: /13/14: [/'east'/1 - /'east'/1] @@ -8899,6 +8902,7 @@ anti-join (lookup abc_part) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4) + ├── distribution: west ├── anti-join (lookup abc_part) │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── lookup expression @@ -8909,6 +8913,7 @@ anti-join (lookup abc_part) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: west │ ├── locality-optimized-search │ │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -8916,6 +8921,7 @@ anti-join (lookup abc_part) │ │ ├── cardinality: [0 - 1] │ │ ├── key: () │ │ ├── fd: ()-->(1-4) + │ │ ├── distribution: west │ │ ├── scan def_part │ │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ │ ├── constraint: /13/14: [/'west'/1 - /'west'/1] @@ -8947,6 +8953,7 @@ anti-join (lookup abc_part@b_idx) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4) + ├── distribution: east ├── anti-join (lookup abc_part@b_idx) │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── lookup expression @@ -8957,6 +8964,7 @@ anti-join (lookup abc_part@b_idx) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: east │ ├── locality-optimized-search │ │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -8964,6 +8972,7 @@ anti-join (lookup abc_part@b_idx) │ │ ├── cardinality: [0 - 1] │ │ ├── key: () │ │ ├── fd: ()-->(1-4) + │ │ ├── distribution: east │ │ ├── scan def_part │ │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ │ ├── constraint: /13/14: [/'east'/10 - /'east'/10] @@ -8995,6 +9004,7 @@ anti-join (lookup abc_part) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4) + ├── distribution: east ├── anti-join (lookup abc_part) │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── lookup expression @@ -9005,6 +9015,7 @@ anti-join (lookup abc_part) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: east │ ├── locality-optimized-search │ │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9012,6 +9023,7 @@ anti-join (lookup abc_part) │ │ ├── cardinality: [0 - 1] │ │ ├── key: () │ │ ├── fd: ()-->(1-4) + │ │ ├── distribution: east │ │ ├── scan def_part │ │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ │ ├── constraint: /13/14: [/'east'/1 - /'east'/1] @@ -9035,38 +9047,44 @@ anti-join (lookup abc_part) opt locality=(region=east) expect=GenerateLocalityOptimizedAntiJoin SELECT * FROM def_part WHERE NOT EXISTS (SELECT * FROM abc_part WHERE e = a) AND f = 10 ---- -anti-join (lookup abc_part) +distribute ├── columns: r:1!null d:2!null e:3 f:4!null - ├── lookup expression - │ └── filters - │ ├── e:3 = a:8 [outer=(3,8), constraints=(/3: (/NULL - ]; /8: (/NULL - ]), fd=(3)==(8), (8)==(3)] - │ └── abc_part.r:7 IN ('central', 'west') [outer=(7), constraints=(/7: [/'central' - /'central'] [/'west' - /'west']; tight)] - ├── lookup columns are key ├── key: (2) ├── fd: ()-->(4), (2)-->(1,3), (3)~~>(1,2) - ├── anti-join (lookup abc_part) - │ ├── columns: def_part.r:1!null d:2!null e:3 f:4!null - │ ├── lookup expression - │ │ └── filters - │ │ ├── e:3 = a:8 [outer=(3,8), constraints=(/3: (/NULL - ]; /8: (/NULL - ]), fd=(3)==(8), (8)==(3)] - │ │ └── abc_part.r:7 = 'east' [outer=(7), constraints=(/7: [/'east' - /'east']; tight), fd=()-->(7)] - │ ├── lookup columns are key - │ ├── key: (2) - │ ├── fd: ()-->(4), (2)-->(1,3), (3)~~>(1,2) - │ ├── index-join def_part - │ │ ├── columns: def_part.r:1!null d:2!null e:3 f:4!null - │ │ ├── key: (2) - │ │ ├── fd: ()-->(4), (2)-->(1,3), (3)~~>(1,2) - │ │ └── scan def_part@f_idx - │ │ ├── columns: def_part.r:1!null d:2!null f:4!null - │ │ ├── constraint: /1/4/2 - │ │ │ ├── [/'central'/10 - /'central'/10] - │ │ │ ├── [/'east'/10 - /'east'/10] - │ │ │ └── [/'west'/10 - /'west'/10] - │ │ ├── key: (2) - │ │ └── fd: ()-->(4), (2)-->(1) - │ └── filters (true) - └── filters (true) + ├── distribution: east + ├── input distribution: central,east,west + └── anti-join (lookup abc_part) + ├── columns: def_part.r:1!null d:2!null e:3 f:4!null + ├── lookup expression + │ └── filters + │ ├── e:3 = a:8 [outer=(3,8), constraints=(/3: (/NULL - ]; /8: (/NULL - ]), fd=(3)==(8), (8)==(3)] + │ └── abc_part.r:7 IN ('central', 'west') [outer=(7), constraints=(/7: [/'central' - /'central'] [/'west' - /'west']; tight)] + ├── lookup columns are key + ├── key: (2) + ├── fd: ()-->(4), (2)-->(1,3), (3)~~>(1,2) + ├── anti-join (lookup abc_part) + │ ├── columns: def_part.r:1!null d:2!null e:3 f:4!null + │ ├── lookup expression + │ │ └── filters + │ │ ├── e:3 = a:8 [outer=(3,8), constraints=(/3: (/NULL - ]; /8: (/NULL - ]), fd=(3)==(8), (8)==(3)] + │ │ └── abc_part.r:7 = 'east' [outer=(7), constraints=(/7: [/'east' - /'east']; tight), fd=()-->(7)] + │ ├── lookup columns are key + │ ├── key: (2) + │ ├── fd: ()-->(4), (2)-->(1,3), (3)~~>(1,2) + │ ├── index-join def_part + │ │ ├── columns: def_part.r:1!null d:2!null e:3 f:4!null + │ │ ├── key: (2) + │ │ ├── fd: ()-->(4), (2)-->(1,3), (3)~~>(1,2) + │ │ └── scan def_part@f_idx + │ │ ├── columns: def_part.r:1!null d:2!null f:4!null + │ │ ├── constraint: /1/4/2 + │ │ │ ├── [/'central'/10 - /'central'/10] + │ │ │ ├── [/'east'/10 - /'east'/10] + │ │ │ └── [/'west'/10 - /'west'/10] + │ │ ├── key: (2) + │ │ └── fd: ()-->(4), (2)-->(1) + │ └── filters (true) + └── filters (true) # Optimization applies even though the lookup join may have more than one # matching row. @@ -9082,6 +9100,7 @@ anti-join (lookup abc_part@c_idx) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4) + ├── distribution: east ├── anti-join (lookup abc_part@c_idx) │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── lookup expression @@ -9091,6 +9110,7 @@ anti-join (lookup abc_part@c_idx) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: east │ ├── locality-optimized-search │ │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9098,6 +9118,7 @@ anti-join (lookup abc_part@c_idx) │ │ ├── cardinality: [0 - 1] │ │ ├── key: () │ │ ├── fd: ()-->(1-4) + │ │ ├── distribution: east │ │ ├── scan def_part │ │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ │ ├── constraint: /13/14: [/'east'/10 - /'east'/10] @@ -9130,6 +9151,7 @@ anti-join (lookup abc_part@c_idx) ├── immutable ├── key: () ├── fd: ()-->(1-4) + ├── distribution: east ├── anti-join (lookup abc_part@c_idx) │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── lookup expression @@ -9140,6 +9162,7 @@ anti-join (lookup abc_part@c_idx) │ ├── immutable │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: east │ ├── locality-optimized-search │ │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9147,6 +9170,7 @@ anti-join (lookup abc_part@c_idx) │ │ ├── cardinality: [0 - 1] │ │ ├── key: () │ │ ├── fd: ()-->(1-4) + │ │ ├── distribution: east │ │ ├── scan def_part │ │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ │ ├── constraint: /13/14: [/'east'/10 - /'east'/10] @@ -9184,6 +9208,7 @@ semi-join (lookup abc_part) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4) + ├── distribution: central ├── locality-optimized-search │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9191,6 +9216,7 @@ semi-join (lookup abc_part) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: central │ ├── scan def_part │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ ├── constraint: /13/14: [/'central'/1 - /'central'/1] @@ -9229,6 +9255,7 @@ inner-join (lookup abc_part) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4,7-10), (8)==(3), (3)==(8) + ├── distribution: east ├── locality-optimized-search │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9236,6 +9263,7 @@ inner-join (lookup abc_part) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: east │ ├── scan def_part │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ ├── constraint: /13/14: [/'east'/1 - /'east'/1] @@ -9270,6 +9298,7 @@ left-join (lookup abc_part) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4,7-10) + ├── distribution: west ├── locality-optimized-search │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9277,6 +9306,7 @@ left-join (lookup abc_part) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: west │ ├── scan def_part │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ ├── constraint: /13/14: [/'west'/1 - /'west'/1] @@ -9311,6 +9341,7 @@ semi-join (lookup abc_part) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4) + ├── distribution: central ├── locality-optimized-search │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9318,6 +9349,7 @@ semi-join (lookup abc_part) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: central │ ├── scan def_part │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ ├── constraint: /13/14: [/'central'/1 - /'central'/1] @@ -9352,6 +9384,7 @@ semi-join (lookup abc_part) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4) + ├── distribution: west ├── locality-optimized-search │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9359,6 +9392,7 @@ semi-join (lookup abc_part) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: west │ ├── scan def_part │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ ├── constraint: /13/14: [/'west'/1 - /'west'/1] @@ -9387,6 +9421,7 @@ inner-join (lookup abc_part) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4,7-10), (9)==(4), (4)==(9) + ├── distribution: east ├── inner-join (lookup abc_part@b_idx) │ ├── columns: def_part.r:1!null d:2!null e:3 f:4!null abc_part.r:7!null a:8!null b:9!null │ ├── lookup expression @@ -9401,6 +9436,7 @@ inner-join (lookup abc_part) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4,7-9), (9)==(4), (4)==(9) + │ ├── distribution: east │ ├── locality-optimized-search │ │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9408,6 +9444,7 @@ inner-join (lookup abc_part) │ │ ├── cardinality: [0 - 1] │ │ ├── key: () │ │ ├── fd: ()-->(1-4) + │ │ ├── distribution: east │ │ ├── scan def_part │ │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ │ ├── constraint: /13/14: [/'east'/10 - /'east'/10] @@ -9443,6 +9480,7 @@ inner-join (lookup abc_part) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4,7-10), (8)==(3), (3)==(8) + ├── distribution: east ├── locality-optimized-search │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9450,6 +9488,7 @@ inner-join (lookup abc_part) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: east │ ├── scan def_part │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ ├── constraint: /13/14: [/'east'/1 - /'east'/1] @@ -9471,32 +9510,38 @@ inner-join (lookup abc_part) opt locality=(region=east) expect=GenerateLocalityOptimizedLookupJoin SELECT * FROM def_part INNER JOIN abc_part ON e = a WHERE f = 10 ---- -inner-join (lookup abc_part) +distribute ├── columns: r:1!null d:2!null e:3!null f:4!null r:7!null a:8!null b:9 c:10 - ├── lookup expression - │ └── filters - │ ├── e:3 = a:8 [outer=(3,8), constraints=(/3: (/NULL - ]; /8: (/NULL - ]), fd=(3)==(8), (8)==(3)] - │ └── abc_part.r:7 = 'east' [outer=(7), constraints=(/7: [/'east' - /'east']; tight), fd=()-->(7)] - ├── remote lookup expression - │ └── filters - │ ├── e:3 = a:8 [outer=(3,8), constraints=(/3: (/NULL - ]; /8: (/NULL - ]), fd=(3)==(8), (8)==(3)] - │ └── abc_part.r:7 IN ('central', 'west') [outer=(7), constraints=(/7: [/'central' - /'central'] [/'west' - /'west']; tight)] - ├── lookup columns are key ├── key: (2) ├── fd: ()-->(4), (2)-->(1,3), (3)-->(1,2), (8)-->(7,9,10), (9)~~>(7,8,10), (3)==(8), (8)==(3) - ├── index-join def_part - │ ├── columns: def_part.r:1!null d:2!null e:3 f:4!null - │ ├── key: (2) - │ ├── fd: ()-->(4), (2)-->(1,3), (3)~~>(1,2) - │ └── scan def_part@f_idx - │ ├── columns: def_part.r:1!null d:2!null f:4!null - │ ├── constraint: /1/4/2 - │ │ ├── [/'central'/10 - /'central'/10] - │ │ ├── [/'east'/10 - /'east'/10] - │ │ └── [/'west'/10 - /'west'/10] - │ ├── key: (2) - │ └── fd: ()-->(4), (2)-->(1) - └── filters (true) + ├── distribution: east + ├── input distribution: central,east,west + └── inner-join (lookup abc_part) + ├── columns: def_part.r:1!null d:2!null e:3!null f:4!null abc_part.r:7!null a:8!null b:9 c:10 + ├── lookup expression + │ └── filters + │ ├── e:3 = a:8 [outer=(3,8), constraints=(/3: (/NULL - ]; /8: (/NULL - ]), fd=(3)==(8), (8)==(3)] + │ └── abc_part.r:7 = 'east' [outer=(7), constraints=(/7: [/'east' - /'east']; tight), fd=()-->(7)] + ├── remote lookup expression + │ └── filters + │ ├── e:3 = a:8 [outer=(3,8), constraints=(/3: (/NULL - ]; /8: (/NULL - ]), fd=(3)==(8), (8)==(3)] + │ └── abc_part.r:7 IN ('central', 'west') [outer=(7), constraints=(/7: [/'central' - /'central'] [/'west' - /'west']; tight)] + ├── lookup columns are key + ├── key: (2) + ├── fd: ()-->(4), (2)-->(1,3), (3)-->(1,2), (8)-->(7,9,10), (9)~~>(7,8,10), (3)==(8), (8)==(3) + ├── index-join def_part + │ ├── columns: def_part.r:1!null d:2!null e:3 f:4!null + │ ├── key: (2) + │ ├── fd: ()-->(4), (2)-->(1,3), (3)~~>(1,2) + │ └── scan def_part@f_idx + │ ├── columns: def_part.r:1!null d:2!null f:4!null + │ ├── constraint: /1/4/2 + │ │ ├── [/'central'/10 - /'central'/10] + │ │ ├── [/'east'/10 - /'east'/10] + │ │ └── [/'west'/10 - /'west'/10] + │ ├── key: (2) + │ └── fd: ()-->(4), (2)-->(1) + └── filters (true) # Optimization applies for a semi join even though the lookup join may have more # than one matching row. @@ -9516,6 +9561,7 @@ semi-join (lookup abc_part@c_idx) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4) + ├── distribution: east ├── locality-optimized-search │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9523,6 +9569,7 @@ semi-join (lookup abc_part@c_idx) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: east │ ├── scan def_part │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ ├── constraint: /13/14: [/'east'/10 - /'east'/10] @@ -9554,6 +9601,7 @@ semi-join (lookup abc_part@c_idx) ├── immutable ├── key: () ├── fd: ()-->(1-4) + ├── distribution: east ├── locality-optimized-search │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9561,6 +9609,7 @@ semi-join (lookup abc_part@c_idx) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: east │ ├── scan def_part │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ ├── constraint: /13/14: [/'east'/10 - /'east'/10] @@ -9589,6 +9638,7 @@ inner-join (lookup abc_part) ├── lookup columns are key ├── key: (8) ├── fd: ()-->(1-4,10), (8)-->(7,9), (9)~~>(7,8), (4)==(10), (10)==(4) + ├── distribution: east ├── inner-join (lookup abc_part@c_idx) │ ├── columns: def_part.r:1!null d:2!null e:3 f:4!null abc_part.r:7!null a:8!null c:10!null │ ├── lookup expression @@ -9597,6 +9647,7 @@ inner-join (lookup abc_part) │ │ └── abc_part.r:7 IN ('central', 'east', 'west') [outer=(7), constraints=(/7: [/'central' - /'central'] [/'east' - /'east'] [/'west' - /'west']; tight)] │ ├── key: (8) │ ├── fd: ()-->(1-4,10), (8)-->(7), (4)==(10), (10)==(4) + │ ├── distribution: east │ ├── locality-optimized-search │ │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9604,6 +9655,7 @@ inner-join (lookup abc_part) │ │ ├── cardinality: [0 - 1] │ │ ├── key: () │ │ ├── fd: ()-->(1-4) + │ │ ├── distribution: east │ │ ├── scan def_part │ │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ │ ├── constraint: /13/14: [/'east'/10 - /'east'/10] @@ -9635,6 +9687,7 @@ anti-join (lookup abc_part) ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-4) + ├── distribution: east ├── anti-join (lookup abc_part) │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ ├── lookup expression @@ -9645,6 +9698,7 @@ anti-join (lookup abc_part) │ ├── cardinality: [0 - 1] │ ├── key: () │ ├── fd: ()-->(1-4) + │ ├── distribution: east │ ├── locality-optimized-search │ │ ├── columns: def_part.r:1!null d:2!null e:3 f:4 │ │ ├── left columns: def_part.r:13 d:14 e:15 f:16 @@ -9652,6 +9706,7 @@ anti-join (lookup abc_part) │ │ ├── cardinality: [0 - 1] │ │ ├── key: () │ │ ├── fd: ()-->(1-4) + │ │ ├── distribution: east │ │ ├── scan def_part │ │ │ ├── columns: def_part.r:13!null d:14!null e:15 f:16 │ │ │ ├── constraint: /13/14: [/'east'/1 - /'east'/1] diff --git a/pkg/sql/opt/xform/testdata/rules/join_order b/pkg/sql/opt/xform/testdata/rules/join_order index f219f6ae03ea..9e201c3bb063 100644 --- a/pkg/sql/opt/xform/testdata/rules/join_order +++ b/pkg/sql/opt/xform/testdata/rules/join_order @@ -312,7 +312,7 @@ New expression 3 of 3: memo join-limit=0 expect-not=ReorderJoins SELECT * FROM bx, cy, abc WHERE a = 1 AND abc.b = bx.b AND abc.c = cy.c ---- -memo (optimized, ~28KB, required=[presentation: b:1,x:2,c:5,y:6,a:9,b:10,c:11,d:12]) +memo (optimized, ~27KB, required=[presentation: b:1,x:2,c:5,y:6,a:9,b:10,c:11,d:12]) ├── G1: (inner-join G2 G3 G4) (merge-join G2 G3 G5 inner-join,+1,+10) │ └── [presentation: b:1,x:2,c:5,y:6,a:9,b:10,c:11,d:12] │ ├── best: (merge-join G2="[ordering: +1]" G3 G5 inner-join,+1,+10) @@ -365,7 +365,7 @@ memo (optimized, ~28KB, required=[presentation: b:1,x:2,c:5,y:6,a:9,b:10,c:11,d: memo join-limit=2 SELECT * FROM bx, cy, abc WHERE a = 1 AND abc.b = bx.b AND abc.c = cy.c ---- -memo (optimized, ~45KB, required=[presentation: b:1,x:2,c:5,y:6,a:9,b:10,c:11,d:12]) +memo (optimized, ~44KB, required=[presentation: b:1,x:2,c:5,y:6,a:9,b:10,c:11,d:12]) ├── G1: (inner-join G2 G3 G4) (inner-join G3 G2 G4) (inner-join G5 G6 G7) (inner-join G6 G5 G7) (merge-join G2 G3 G8 inner-join,+1,+10) (merge-join G3 G2 G8 inner-join,+10,+1) (lookup-join G3 G8 bx,keyCols=[10],outCols=(1,2,5,6,9-12)) (merge-join G5 G6 G8 inner-join,+5,+11) (merge-join G6 G5 G8 inner-join,+11,+5) (lookup-join G6 G8 cy,keyCols=[11],outCols=(1,2,5,6,9-12)) │ └── [presentation: b:1,x:2,c:5,y:6,a:9,b:10,c:11,d:12] │ ├── best: (lookup-join G3 G8 bx,keyCols=[10],outCols=(1,2,5,6,9-12)) diff --git a/pkg/sql/opt/xform/testdata/rules/scan b/pkg/sql/opt/xform/testdata/rules/scan index 2bc34c48465f..a3efc61852be 100644 --- a/pkg/sql/opt/xform/testdata/rules/scan +++ b/pkg/sql/opt/xform/testdata/rules/scan @@ -47,7 +47,7 @@ scan a,rev memo SELECT k,f FROM a ORDER BY k DESC LIMIT 10 ---- -memo (optimized, ~5KB, required=[presentation: k:1,f:3] [ordering: -1]) +memo (optimized, ~4KB, required=[presentation: k:1,f:3] [ordering: -1]) ├── G1: (limit G2 G3 ordering=-1) (scan a,rev,cols=(1,3),lim=10(rev)) (top-k G2 &{10 -1 }) │ ├── [presentation: k:1,f:3] [ordering: -1] │ │ ├── best: (scan a,rev,cols=(1,3),lim=10(rev)) @@ -526,6 +526,7 @@ project ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(3) + ├── distribution: east └── locality-optimized-search ├── columns: a:3!null b:4!null ├── left columns: a:11 b:12 @@ -533,6 +534,7 @@ project ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(3,4) + ├── distribution: east ├── scan abc_part@b_idx │ ├── columns: a:11!null b:12!null │ ├── constraint: /9/12: [/'east'/1 - /'east'/1] @@ -556,6 +558,7 @@ index-join abc_part ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-6) + ├── distribution: west └── locality-optimized-search ├── columns: r:1!null a:3!null b:4!null ├── left columns: r:9 a:11 b:12 @@ -563,6 +566,7 @@ index-join abc_part ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1,3,4) + ├── distribution: west ├── scan abc_part@b_idx │ ├── columns: r:9!null a:11!null b:12!null │ ├── constraint: /9/12: [/'west'/1 - /'west'/1] @@ -586,6 +590,7 @@ index-join abc_part ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-6) + ├── distribution: central └── locality-optimized-search ├── columns: r:1!null a:3!null b:4!null ├── left columns: r:9 a:11 b:12 @@ -593,6 +598,7 @@ index-join abc_part ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1,3,4) + ├── distribution: central ├── scan abc_part@b_idx │ ├── columns: r:9!null a:11!null b:12!null │ ├── constraint: /9/12: [/'central'/1 - /'central'/1] @@ -616,6 +622,7 @@ index-join abc_part ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-6) + ├── distribution: east └── locality-optimized-search ├── columns: r:1!null t:2!null a:3!null c:5!null ├── left columns: r:9 t:10 a:11 c:13 @@ -623,6 +630,7 @@ index-join abc_part ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-3,5) + ├── distribution: east ├── scan abc_part@c_idx │ ├── columns: r:9!null t:10!null a:11!null c:13!null │ ├── constraint: /9/10/13 @@ -653,6 +661,7 @@ index-join abc_part ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-6) + ├── distribution: west └── locality-optimized-search ├── columns: r:1!null t:2!null a:3!null c:5!null ├── left columns: r:9 t:10 a:11 c:13 @@ -660,6 +669,7 @@ index-join abc_part ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-3,5) + ├── distribution: west ├── scan abc_part@c_idx │ ├── columns: r:9!null t:10!null a:11!null c:13!null │ ├── constraint: /9/10/13 @@ -690,6 +700,7 @@ index-join abc_part ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-6) + ├── distribution: central └── locality-optimized-search ├── columns: r:1!null t:2!null a:3!null c:5!null ├── left columns: r:9 t:10 a:11 c:13 @@ -697,6 +708,7 @@ index-join abc_part ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(1-3,5) + ├── distribution: central ├── scan abc_part@c_idx │ ├── columns: r:9!null t:10!null a:11!null c:13!null │ ├── constraint: /9/10/13 @@ -727,6 +739,7 @@ project ├── columns: a:3!null ├── cardinality: [0 - 2] ├── key: (3) + ├── distribution: east └── locality-optimized-search ├── columns: a:3!null b:4!null ├── left columns: a:11 b:12 @@ -734,6 +747,7 @@ project ├── cardinality: [0 - 2] ├── key: (3) ├── fd: (3)-->(4), (4)-->(3) + ├── distribution: east ├── scan abc_part@b_idx │ ├── columns: a:11!null b:12!null │ ├── constraint: /9/12: [/'east'/1 - /'east'/2] @@ -753,65 +767,85 @@ project opt locality=(region=east) expect-not=GenerateLocalityOptimizedScan SELECT a FROM abc_part WHERE b >= 0 AND b < 100001 ---- -project +distribute ├── columns: a:3!null ├── cardinality: [0 - 100001] ├── key: (3) - └── select - ├── columns: a:3!null b:4!null + ├── distribution: east + ├── input distribution: east,west + └── project + ├── columns: a:3!null ├── cardinality: [0 - 100001] ├── key: (3) - ├── fd: (3)-->(4), (4)-->(3) - ├── index-join abc_part - │ ├── columns: a:3!null b:4 - │ ├── key: (3) - │ ├── fd: (3)-->(4), (4)~~>(3) - │ └── scan abc_part@c_idx - │ ├── columns: a:3!null - │ ├── constraint: /1/2/5 - │ │ ├── [/'central'/1 - /'central'/3] - │ │ ├── [/'east'/1 - /'east'/3] - │ │ └── [/'west'/1 - /'west'/3] - │ └── key: (3) - └── filters - └── (b:4 >= 0) AND (b:4 < 100001) [outer=(4), constraints=(/4: [/0 - /100000]; tight)] + └── select + ├── columns: a:3!null b:4!null + ├── cardinality: [0 - 100001] + ├── key: (3) + ├── fd: (3)-->(4), (4)-->(3) + ├── index-join abc_part + │ ├── columns: a:3!null b:4 + │ ├── key: (3) + │ ├── fd: (3)-->(4), (4)~~>(3) + │ └── scan abc_part@c_idx + │ ├── columns: a:3!null + │ ├── constraint: /1/2/5 + │ │ ├── [/'central'/1 - /'central'/3] + │ │ ├── [/'east'/1 - /'east'/3] + │ │ └── [/'west'/1 - /'west'/3] + │ └── key: (3) + └── filters + └── (b:4 >= 0) AND (b:4 < 100001) [outer=(4), constraints=(/4: [/0 - /100000]; tight)] # The spans target all remote partitions. opt locality=(region=east) expect-not=GenerateLocalityOptimizedScan SELECT a FROM abc_part WHERE b = 1 AND r IN ('west', 'central') ---- -project +distribute ├── columns: a:3!null ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(3) - └── scan abc_part@b_idx - ├── columns: r:1!null a:3!null b:4!null - ├── constraint: /1/4 - │ ├── [/'central'/1 - /'central'/1] - │ └── [/'west'/1 - /'west'/1] + ├── distribution: east + ├── input distribution: central,west + └── project + ├── columns: a:3!null ├── cardinality: [0 - 1] ├── key: () - └── fd: ()-->(1,3,4) + ├── fd: ()-->(3) + └── scan abc_part@b_idx + ├── columns: r:1!null a:3!null b:4!null + ├── constraint: /1/4 + │ ├── [/'central'/1 - /'central'/1] + │ └── [/'west'/1 - /'west'/1] + ├── cardinality: [0 - 1] + ├── key: () + └── fd: ()-->(1,3,4) # The scan is limited. opt locality=(region=east) expect-not=GenerateLocalityOptimizedScan SELECT a FROM abc_part WHERE d = 1 LIMIT 1 ---- -project +distribute ├── columns: a:3!null ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(3) - └── scan abc_part@d_idx - ├── columns: a:3!null d:6!null - ├── constraint: /1/6/3 - │ ├── [/'central'/1 - /'central'/1] - │ ├── [/'east'/1 - /'east'/1] - │ └── [/'west'/1 - /'west'/1] - ├── limit: 1 + ├── distribution: east + ├── input distribution: central,east,west + └── project + ├── columns: a:3!null + ├── cardinality: [0 - 1] ├── key: () - └── fd: ()-->(3,6) + ├── fd: ()-->(3) + └── scan abc_part@d_idx + ├── columns: a:3!null d:6!null + ├── constraint: /1/6/3 + │ ├── [/'central'/1 - /'central'/1] + │ ├── [/'east'/1 - /'east'/1] + │ └── [/'west'/1 - /'west'/1] + ├── limit: 1 + ├── key: () + └── fd: ()-->(3,6) # The scan is limited, but b is known to be a key, so the limit is discarded. opt locality=(region=east) expect=GenerateLocalityOptimizedScan @@ -822,6 +856,7 @@ project ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(3) + ├── distribution: east └── locality-optimized-search ├── columns: a:3!null b:4!null ├── left columns: a:11 b:12 @@ -829,6 +864,7 @@ project ├── cardinality: [0 - 1] ├── key: () ├── fd: ()-->(3,4) + ├── distribution: east ├── scan abc_part@b_idx │ ├── columns: a:11!null b:12!null │ ├── constraint: /9/12: [/'east'/1 - /'east'/1] diff --git a/pkg/sql/opt/xform/testdata/rules/select b/pkg/sql/opt/xform/testdata/rules/select index a648d3633fc3..7e303ad13105 100644 --- a/pkg/sql/opt/xform/testdata/rules/select +++ b/pkg/sql/opt/xform/testdata/rules/select @@ -263,7 +263,7 @@ CREATE INDEX idx2 ON p (s) WHERE i > 0 memo expect=GeneratePartialIndexScans SELECT * FROM p WHERE i > 0 AND s = 'foo' ---- -memo (optimized, ~17KB, required=[presentation: k:1,i:2,f:3,s:4,b:5]) +memo (optimized, ~16KB, required=[presentation: k:1,i:2,f:3,s:4,b:5]) ├── G1: (select G2 G3) (index-join G4 p,cols=(1-5)) (index-join G5 p,cols=(1-5)) (index-join G6 p,cols=(1-5)) (index-join G7 p,cols=(1-5)) │ └── [presentation: k:1,i:2,f:3,s:4,b:5] │ ├── best: (index-join G4 p,cols=(1-5)) @@ -787,7 +787,7 @@ index-join b memo SELECT * FROM b WHERE v >= 1 AND v <= 10 ---- -memo (optimized, ~7KB, required=[presentation: k:1,u:2,v:3,j:4]) +memo (optimized, ~6KB, required=[presentation: k:1,u:2,v:3,j:4]) ├── G1: (select G2 G3) (index-join G4 b,cols=(1-4)) │ └── [presentation: k:1,u:2,v:3,j:4] │ ├── best: (index-join G4 b,cols=(1-4)) @@ -1710,7 +1710,7 @@ CREATE INDEX idx2 ON p (i) WHERE s = 'foo' memo SELECT i FROM p WHERE i = 3 AND s = 'foo' ---- -memo (optimized, ~21KB, required=[presentation: i:2]) +memo (optimized, ~20KB, required=[presentation: i:2]) ├── G1: (project G2 G3 i) │ └── [presentation: i:2] │ ├── best: (project G2 G3 i) @@ -4476,7 +4476,7 @@ CREATE INVERTED INDEX idx ON pi (j) WHERE s IN ('foo', 'bar') memo expect-not=GenerateInvertedIndexScans SELECT * FROM pi WHERE j @> '{"a": "b"}' AND s = 'baz' ---- -memo (optimized, ~8KB, required=[presentation: k:1,s:2,j:3]) +memo (optimized, ~7KB, required=[presentation: k:1,s:2,j:3]) ├── G1: (select G2 G3) │ └── [presentation: k:1,s:2,j:3] │ ├── best: (select G2 G3) @@ -5912,7 +5912,7 @@ select memo SELECT p,q,r,s FROM pqr WHERE q = 1 AND r = 1 AND s = 'foo' ---- -memo (optimized, ~36KB, required=[presentation: p:1,q:2,r:3,s:4]) +memo (optimized, ~35KB, required=[presentation: p:1,q:2,r:3,s:4]) ├── G1: (select G2 G3) (select G4 G5) (select G6 G7) (select G8 G9) (select G10 G9) (lookup-join G11 G12 pqr,keyCols=[1],outCols=(1-4)) (zigzag-join G3 pqr@q pqr@s) (zigzag-join G3 pqr@q pqr@rs) (lookup-join G13 G9 pqr,keyCols=[1],outCols=(1-4)) │ └── [presentation: p:1,q:2,r:3,s:4] │ ├── best: (zigzag-join G3 pqr@q pqr@s) @@ -7806,7 +7806,7 @@ CREATE TABLE t58390 ( memo SELECT * FROM t58390 WHERE a > 1 OR b > 1 ---- -memo (optimized, ~22KB, required=[presentation: k:1,a:2,b:3,c:4]) +memo (optimized, ~21KB, required=[presentation: k:1,a:2,b:3,c:4]) ├── G1: (select G2 G3) (index-join G4 t58390,cols=(1-4)) (distinct-on G5 G6 cols=(1)) (distinct-on G5 G6 cols=(1),ordering=+1) │ └── [presentation: k:1,a:2,b:3,c:4] │ ├── best: (select G2 G3) @@ -7921,7 +7921,7 @@ JOIN t61795 AS t2 ON t1.c = t1.b AND t1.b = t2.b WHERE t1.a = 10 OR t2.b != abs(t2.b) ORDER BY t1.b ASC ---- -memo (optimized, ~33KB, required=[presentation: a:1] [ordering: +2]) +memo (optimized, ~32KB, required=[presentation: a:1] [ordering: +2]) ├── G1: (project G2 G3 a b) │ ├── [presentation: a:1] [ordering: +2] │ │ ├── best: (sort G1) diff --git a/pkg/sql/opt/xform/testdata/rules/set b/pkg/sql/opt/xform/testdata/rules/set index 02ed119b4576..0170264f5f46 100644 --- a/pkg/sql/opt/xform/testdata/rules/set +++ b/pkg/sql/opt/xform/testdata/rules/set @@ -21,7 +21,7 @@ CREATE TABLE kuvw ( memo expect=GenerateStreamingSetOp SELECT u,v,w FROM kuvw UNION SELECT w,v,u FROM kuvw ---- -memo (optimized, ~11KB, required=[presentation: u:13,v:14,w:15]) +memo (optimized, ~10KB, required=[presentation: u:13,v:14,w:15]) ├── G1: (union G2 G3) (union G2 G3 ordering=+13,+14,+15) (union G2 G3 ordering=+15,+14,+13) (union G2 G3 ordering=+14,+15,+13) (union G2 G3 ordering=+14,+13,+15) │ └── [presentation: u:13,v:14,w:15] │ ├── best: (union G2="[ordering: +2,+3,+4]" G3="[ordering: +10,+9,+8]" ordering=+13,+14,+15) diff --git a/pkg/sql/opt_catalog.go b/pkg/sql/opt_catalog.go index 1b1c08f36482..1f8856b8594f 100644 --- a/pkg/sql/opt_catalog.go +++ b/pkg/sql/opt_catalog.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" @@ -1255,7 +1256,7 @@ func (oi *optIndex) init( } // Get the partition values. - var a rowenc.DatumAlloc + var a tree.DatumAlloc for _, valueEncBuf := range values { t, _, err := rowenc.DecodePartitionTuple( &a, oi.tab.codec, oi.tab.desc, oi.idx, oi.idx.GetPartitioning(), @@ -1665,8 +1666,8 @@ type optForeignKeyConstraint struct { validity descpb.ConstraintValidity match descpb.ForeignKeyReference_Match - deleteAction descpb.ForeignKeyReference_Action - updateAction descpb.ForeignKeyReference_Action + deleteAction catpb.ForeignKeyAction + updateAction catpb.ForeignKeyAction } var _ cat.ForeignKeyConstraint = &optForeignKeyConstraint{} @@ -2301,11 +2302,11 @@ func collectTypes(col catalog.Column) (descpb.IDs, error) { // mapGeneratedAsIdentityType maps a descpb.GeneratedAsIdentityType into corresponding // cat.GeneratedAsIdentityType. This is a helper function for the read access to // the GeneratedAsIdentityType attribute for descpb.ColumnDescriptor. -func mapGeneratedAsIdentityType(inType descpb.GeneratedAsIdentityType) cat.GeneratedAsIdentityType { - mapGeneratedAsIdentityType := map[descpb.GeneratedAsIdentityType]cat.GeneratedAsIdentityType{ - descpb.GeneratedAsIdentityType_NOT_IDENTITY_COLUMN: cat.NotGeneratedAsIdentity, - descpb.GeneratedAsIdentityType_GENERATED_ALWAYS: cat.GeneratedAlwaysAsIdentity, - descpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT: cat.GeneratedByDefaultAsIdentity, +func mapGeneratedAsIdentityType(inType catpb.GeneratedAsIdentityType) cat.GeneratedAsIdentityType { + mapGeneratedAsIdentityType := map[catpb.GeneratedAsIdentityType]cat.GeneratedAsIdentityType{ + catpb.GeneratedAsIdentityType_NOT_IDENTITY_COLUMN: cat.NotGeneratedAsIdentity, + catpb.GeneratedAsIdentityType_GENERATED_ALWAYS: cat.GeneratedAlwaysAsIdentity, + catpb.GeneratedAsIdentityType_GENERATED_BY_DEFAULT: cat.GeneratedByDefaultAsIdentity, } return mapGeneratedAsIdentityType[inType] } diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index e7fc58df7418..701c4fb03cbe 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -88,15 +88,8 @@ func (ef *execFactory) ConstructScan( scan := ef.planner.Scan() colCfg := makeScanColumnsConfig(table, params.NeededCols) - // initTable checks that the current user has the correct privilege to access - // the table. However, the privilege has already been checked in optbuilder, - // and does not need to be rechecked. In fact, it's an error to check the - // privilege if the table was originally part of a view, since lower privilege - // users might be able to access a view that uses a higher privilege table. - ef.planner.skipSelectPrivilegeChecks = true - defer func() { ef.planner.skipSelectPrivilegeChecks = false }() ctx := ef.planner.extendedEvalCtx.Ctx() - if err := scan.initTable(ctx, ef.planner, tabDesc, nil, colCfg); err != nil { + if err := scan.initTable(ctx, ef.planner, tabDesc, colCfg); err != nil { return nil, err } @@ -610,7 +603,7 @@ func (ef *execFactory) ConstructIndexJoin( tableScan := ef.planner.Scan() ctx := ef.planner.extendedEvalCtx.Ctx() - if err := tableScan.initTable(ctx, ef.planner, tabDesc, nil, colCfg); err != nil { + if err := tableScan.initTable(ctx, ef.planner, tabDesc, colCfg); err != nil { return nil, err } @@ -658,7 +651,7 @@ func (ef *execFactory) ConstructLookupJoin( tableScan := ef.planner.Scan() ctx := ef.planner.extendedEvalCtx.Ctx() - if err := tableScan.initTable(ctx, ef.planner, tabDesc, nil, colCfg); err != nil { + if err := tableScan.initTable(ctx, ef.planner, tabDesc, colCfg); err != nil { return nil, err } @@ -741,7 +734,7 @@ func (ef *execFactory) constructVirtualTableLookupJoin( // column analysis. colCfg := makeScanColumnsConfig(table, lookupCols) ctx := ef.planner.extendedEvalCtx.Ctx() - if err := tableScan.initTable(ctx, ef.planner, tableDesc, nil, colCfg); err != nil { + if err := tableScan.initTable(ctx, ef.planner, tableDesc, colCfg); err != nil { return nil, err } tableScan.index = idx @@ -793,7 +786,7 @@ func (ef *execFactory) ConstructInvertedJoin( tableScan := ef.planner.Scan() ctx := ef.planner.extendedEvalCtx.Ctx() - if err := tableScan.initTable(ctx, ef.planner, tabDesc, nil, colCfg); err != nil { + if err := tableScan.initTable(ctx, ef.planner, tabDesc, colCfg); err != nil { return nil, err } tableScan.index = idx @@ -853,12 +846,12 @@ func (ef *execFactory) constructScanForZigzag( } for c, ok := cols.Next(0); ok; c, ok = cols.Next(c + 1) { - colCfg.wantedColumns = append(colCfg.wantedColumns, tree.ColumnID(tableDesc.PublicColumns()[c].GetID())) + colCfg.wantedColumns = append(colCfg.wantedColumns, tableDesc.PublicColumns()[c].GetID()) } scan := ef.planner.Scan() ctx := ef.planner.extendedEvalCtx.Ctx() - if err := scan.initTable(ctx, ef.planner, tableDesc, nil, colCfg); err != nil { + if err := scan.initTable(ctx, ef.planner, tableDesc, colCfg); err != nil { return nil, err } diff --git a/pkg/sql/parser/parse_test.go b/pkg/sql/parser/parse_test.go index 8c428b7fba23..1813f20ba31e 100644 --- a/pkg/sql/parser/parse_test.go +++ b/pkg/sql/parser/parse_test.go @@ -443,6 +443,8 @@ func TestUnimplementedSyntax(t *testing.T) { {`SET CONSTRAINTS foo`, 0, `set constraints`, ``}, {`SET foo FROM CURRENT`, 0, `set from current`, ``}, + {`CREATE MATERIALIZED VIEW a AS SELECT 1 WITH NO DATA`, 74083, ``, ``}, + {`CREATE TABLE a(x INT[][])`, 32552, ``, ``}, {`CREATE TABLE a(x INT[1][2])`, 32552, ``, ``}, {`CREATE TABLE a(x INT ARRAY[1][2])`, 32552, ``, ``}, @@ -512,7 +514,6 @@ func TestUnimplementedSyntax(t *testing.T) { {`CREATE INDEX a ON b(a DESC NULLS FIRST)`, 6224, ``, ``}, {`INSERT INTO foo(a, a.b) VALUES (1,2)`, 27792, ``, ``}, - {`INSERT INTO foo VALUES (1,2) ON CONFLICT ON CONSTRAINT a DO NOTHING`, 28161, ``, ``}, {`SELECT * FROM ROWS FROM (a(b) AS (d))`, 0, `ROWS FROM with col_def_list`, ``}, diff --git a/pkg/sql/parser/sql.y b/pkg/sql/parser/sql.y index a25268d18d12..0e8147bfe2ed 100644 --- a/pkg/sql/parser/sql.y +++ b/pkg/sql/parser/sql.y @@ -819,7 +819,7 @@ func (u *sqlSymUnion) setVar() *tree.SetVar { %token NAN NAME NAMES NATURAL NEVER NEW_DB_NAME NEXT NO NOCANCELQUERY NOCONTROLCHANGEFEED %token NOCONTROLJOB NOCREATEDB NOCREATELOGIN NOCREATEROLE NOLOGIN NOMODIFYCLUSTERSETTING %token NO_INDEX_JOIN NO_ZIGZAG_JOIN NO_FULL_SCAN NONE NONVOTERS NORMAL NOT NOTHING NOTNULL -%token NOVIEWACTIVITY NOWAIT NULL NULLIF NULLS NUMERIC +%token NOVIEWACTIVITY NOVIEWACTIVITYREDACTED NOWAIT NULL NULLIF NULLS NUMERIC %token OF OFF OFFSET OID OIDS OIDVECTOR ON ONLY OPT OPTION OPTIONS OR %token ORDER ORDINALITY OTHERS OUT OUTER OVER OVERLAPS OVERLAY OWNED OWNER OPERATOR @@ -854,7 +854,7 @@ func (u *sqlSymUnion) setVar() *tree.SetVar { %token UNBOUNDED UNCOMMITTED UNION UNIQUE UNKNOWN UNLOGGED UNSPLIT %token UPDATE UPSERT UNTIL USE USER USERS USING UUID -%token VALID VALIDATE VALUE VALUES VARBIT VARCHAR VARIADIC VIEW VARYING VIEWACTIVITY VIRTUAL VISIBLE VOTERS +%token VALID VALIDATE VALUE VALUES VARBIT VARCHAR VARIADIC VIEW VARYING VIEWACTIVITY VIEWACTIVITYREDACTED VIRTUAL VISIBLE VOTERS %token WHEN WHERE WINDOW WITH WITHIN WITHOUT WORK WRITE @@ -1199,7 +1199,7 @@ func (u *sqlSymUnion) setVar() *tree.SetVar { %type <[]tree.RangePartition> range_partitions %type opt_all_clause %type opt_privileges_clause -%type distinct_clause +%type distinct_clause opt_with_data %type distinct_on_clause %type opt_column_list insert_column_list opt_stats_columns query_stats_cols %type sort_clause single_sort_clause opt_sort_clause @@ -7634,7 +7634,7 @@ create_view_stmt: Replace: false, } } -| CREATE MATERIALIZED VIEW view_name opt_column_list AS select_stmt +| CREATE MATERIALIZED VIEW view_name opt_column_list AS select_stmt opt_with_data { name := $4.unresolvedObjectName().ToTableName() $$.val = &tree.CreateView{ @@ -7644,7 +7644,7 @@ create_view_stmt: Materialized: true, } } -| CREATE MATERIALIZED VIEW IF NOT EXISTS view_name opt_column_list AS select_stmt +| CREATE MATERIALIZED VIEW IF NOT EXISTS view_name opt_column_list AS select_stmt opt_with_data { name := $7.unresolvedObjectName().ToTableName() $$.val = &tree.CreateView{ @@ -7657,6 +7657,20 @@ create_view_stmt: } | CREATE opt_temp opt_view_recursive VIEW error // SHOW HELP: CREATE VIEW +opt_with_data: + WITH NO DATA error + { + return unimplementedWithIssue(sqllex, 74083) + } +| WITH DATA + { + $$.val = true + } +| /* EMPTY */ + { + $$.val = true + } + role_option: CREATEROLE { @@ -7714,6 +7728,14 @@ role_option: { $$.val = tree.KVOption{Key: tree.Name($1), Value: nil} } +| VIEWACTIVITYREDACTED + { + $$.val = tree.KVOption{Key: tree.Name($1), Value: nil} + } +| NOVIEWACTIVITYREDACTED + { + $$.val = tree.KVOption{Key: tree.Name($1), Value: nil} + } | CANCELQUERY { $$.val = tree.KVOption{Key: tree.Name($1), Value: nil} @@ -9054,7 +9076,21 @@ on_conflict: Where: tree.NewWhere(tree.AstWhere, $11.expr()), } } -| ON CONFLICT ON CONSTRAINT constraint_name { return unimplementedWithIssue(sqllex, 28161) } +| ON CONFLICT ON CONSTRAINT constraint_name DO NOTHING + { + $$.val = &tree.OnConflict{ + Constraint: tree.Name($5), + DoNothing: true, + } + } +| ON CONFLICT ON CONSTRAINT constraint_name DO UPDATE SET set_clause_list opt_where_clause + { + $$.val = &tree.OnConflict{ + Constraint: tree.Name($5), + Exprs: $9.updateExprs(), + Where: tree.NewWhere(tree.AstWhere, $10.expr()), + } + } returning_clause: RETURNING target_list @@ -13437,6 +13473,7 @@ unreserved_keyword: | NOMODIFYCLUSTERSETTING | NONVOTERS | NOVIEWACTIVITY +| NOVIEWACTIVITYREDACTED | NOWAIT | NULLS | IGNORE_FOREIGN_KEYS @@ -13595,6 +13632,7 @@ unreserved_keyword: | VARYING | VIEW | VIEWACTIVITY +| VIEWACTIVITYREDACTED | VISIBLE | VOTERS | WITHIN diff --git a/pkg/sql/parser/testdata/create_view b/pkg/sql/parser/testdata/create_view index 42526eb5bed4..90817688e860 100644 --- a/pkg/sql/parser/testdata/create_view +++ b/pkg/sql/parser/testdata/create_view @@ -104,6 +104,14 @@ CREATE MATERIALIZED VIEW a AS SELECT (*) FROM b -- fully parenthesized CREATE MATERIALIZED VIEW a AS SELECT * FROM b -- literals removed CREATE MATERIALIZED VIEW _ AS SELECT * FROM _ -- identifiers removed +parse +CREATE MATERIALIZED VIEW a AS SELECT * FROM b WITH DATA +---- +CREATE MATERIALIZED VIEW a AS SELECT * FROM b -- normalized! +CREATE MATERIALIZED VIEW a AS SELECT (*) FROM b -- fully parenthesized +CREATE MATERIALIZED VIEW a AS SELECT * FROM b -- literals removed +CREATE MATERIALIZED VIEW _ AS SELECT * FROM _ -- identifiers removed + parse CREATE MATERIALIZED VIEW IF NOT EXISTS a AS SELECT * FROM b ---- @@ -112,6 +120,14 @@ CREATE MATERIALIZED VIEW IF NOT EXISTS a AS SELECT (*) FROM b -- fully parenthes CREATE MATERIALIZED VIEW IF NOT EXISTS a AS SELECT * FROM b -- literals removed CREATE MATERIALIZED VIEW IF NOT EXISTS _ AS SELECT * FROM _ -- identifiers removed +parse +CREATE MATERIALIZED VIEW IF NOT EXISTS a AS SELECT * FROM b WITH DATA +---- +CREATE MATERIALIZED VIEW IF NOT EXISTS a AS SELECT * FROM b -- normalized! +CREATE MATERIALIZED VIEW IF NOT EXISTS a AS SELECT (*) FROM b -- fully parenthesized +CREATE MATERIALIZED VIEW IF NOT EXISTS a AS SELECT * FROM b -- literals removed +CREATE MATERIALIZED VIEW IF NOT EXISTS _ AS SELECT * FROM _ -- identifiers removed + parse REFRESH MATERIALIZED VIEW a.b ---- diff --git a/pkg/sql/parser/testdata/upsert b/pkg/sql/parser/testdata/upsert index 5a1c30848a89..6418d1860c06 100644 --- a/pkg/sql/parser/testdata/upsert +++ b/pkg/sql/parser/testdata/upsert @@ -221,3 +221,19 @@ INSERT INTO a VALUES (1) ON CONFLICT (a) DO UPDATE SET (a, b) = (SELECT 1, 2) RE INSERT INTO a VALUES ((1)) ON CONFLICT (a) DO UPDATE SET (a, b) = ((SELECT (1), (2))) RETURNING NOTHING -- fully parenthesized INSERT INTO a VALUES (_) ON CONFLICT (a) DO UPDATE SET (a, b) = (SELECT _, _) RETURNING NOTHING -- literals removed INSERT INTO _ VALUES (1) ON CONFLICT (_) DO UPDATE SET (_, _) = (SELECT 1, 2) RETURNING NOTHING -- identifiers removed + +parse +INSERT INTO a VALUES (1) ON CONFLICT ON CONSTRAINT "foo" DO UPDATE SET (a, b) = (SELECT 1, 2) RETURNING NOTHING +---- +INSERT INTO a VALUES (1) ON CONFLICT ON CONSTRAINT foo DO UPDATE SET (a, b) = (SELECT 1, 2) RETURNING NOTHING -- normalized! +INSERT INTO a VALUES ((1)) ON CONFLICT ON CONSTRAINT foo DO UPDATE SET (a, b) = ((SELECT (1), (2))) RETURNING NOTHING -- fully parenthesized +INSERT INTO a VALUES (_) ON CONFLICT ON CONSTRAINT foo DO UPDATE SET (a, b) = (SELECT _, _) RETURNING NOTHING -- literals removed +INSERT INTO _ VALUES (1) ON CONFLICT ON CONSTRAINT _ DO UPDATE SET (_, _) = (SELECT 1, 2) RETURNING NOTHING -- identifiers removed + +parse +INSERT INTO a VALUES (1) ON CONFLICT ON CONSTRAINT "foo" DO NOTHING +---- +INSERT INTO a VALUES (1) ON CONFLICT ON CONSTRAINT foo DO NOTHING -- normalized! +INSERT INTO a VALUES ((1)) ON CONFLICT ON CONSTRAINT foo DO NOTHING -- fully parenthesized +INSERT INTO a VALUES (_) ON CONFLICT ON CONSTRAINT foo DO NOTHING -- literals removed +INSERT INTO _ VALUES (1) ON CONFLICT ON CONSTRAINT _ DO NOTHING -- identifiers removed diff --git a/pkg/sql/partition.go b/pkg/sql/partition.go index 3164452e425c..26e824e3a3db 100644 --- a/pkg/sql/partition.go +++ b/pkg/sql/partition.go @@ -58,7 +58,7 @@ func partitionByFromTableDescImpl( } // Copy the LIST of the PARTITION BY clause. - a := &rowenc.DatumAlloc{} + a := &tree.DatumAlloc{} err := part.ForEachList(func(name string, values [][]byte, subPartitioning catalog.Partitioning) (err error) { lp := tree.ListPartition{ Name: tree.UnrestrictedName(name), diff --git a/pkg/sql/partition_utils.go b/pkg/sql/partition_utils.go index e10c1241f63b..e4d67b5ddfc8 100644 --- a/pkg/sql/partition_utils.go +++ b/pkg/sql/partition_utils.go @@ -95,7 +95,7 @@ func GenerateSubzoneSpans( return nil, nil } - a := &rowenc.DatumAlloc{} + a := &tree.DatumAlloc{} subzoneIndexByIndexID := make(map[descpb.IndexID]int32) subzoneIndexByPartition := make(map[string]int32) @@ -182,7 +182,7 @@ func GenerateSubzoneSpans( // highest precedence first and the interval.Range payloads are each a // `zonepb.Subzone` with the PartitionName set. func indexCoveringsForPartitioning( - a *rowenc.DatumAlloc, + a *tree.DatumAlloc, codec keys.SQLCodec, tableDesc catalog.TableDescriptor, idx catalog.Index, diff --git a/pkg/sql/pg_catalog.go b/pkg/sql/pg_catalog.go index 634abdbecfce..789e2a4174ea 100644 --- a/pkg/sql/pg_catalog.go +++ b/pkg/sql/pg_catalog.go @@ -27,11 +27,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catconstants" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catformat" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catprivilege" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" + "github.com/cockroachdb/cockroach/pkg/sql/commenter" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" @@ -779,12 +781,12 @@ var ( fkActionSetNull = tree.NewDString("n") fkActionSetDefault = tree.NewDString("d") - fkActionMap = map[descpb.ForeignKeyReference_Action]tree.Datum{ - descpb.ForeignKeyReference_NO_ACTION: fkActionNone, - descpb.ForeignKeyReference_RESTRICT: fkActionRestrict, - descpb.ForeignKeyReference_CASCADE: fkActionCascade, - descpb.ForeignKeyReference_SET_NULL: fkActionSetNull, - descpb.ForeignKeyReference_SET_DEFAULT: fkActionSetDefault, + fkActionMap = map[catpb.ForeignKeyAction]tree.Datum{ + catpb.ForeignKeyAction_NO_ACTION: fkActionNone, + catpb.ForeignKeyAction_RESTRICT: fkActionRestrict, + catpb.ForeignKeyAction_CASCADE: fkActionCascade, + catpb.ForeignKeyAction_SET_NULL: fkActionSetNull, + catpb.ForeignKeyAction_SET_DEFAULT: fkActionSetDefault, } fkMatchTypeFull = tree.NewDString("f") @@ -1518,7 +1520,7 @@ https://www.postgresql.org/docs/9.5/catalog-pg-description.html`, objID := comment[0] objSubID := comment[1] description := comment[2] - commentType := tree.MustBeDInt(comment[3]) + commentType := keys.CommentType(tree.MustBeDInt(comment[3])) classOid := oidZero @@ -1566,7 +1568,7 @@ https://www.postgresql.org/docs/9.5/catalog-pg-shdescription.html`, return err } for _, comment := range comments { - commentType := tree.MustBeDInt(comment[3]) + commentType := keys.CommentType(tree.MustBeDInt(comment[3])) if commentType != keys.DatabaseCommentType { // Only database comments are exported in this table. continue @@ -4369,3 +4371,8 @@ func stringOid(s string) *tree.DOid { h.writeStr(s) return h.getOid() } + +//MakeConstraintOidBuilder constructs an OID builder. +func MakeConstraintOidBuilder() commenter.ConstraintOidBuilder { + return makeOidHasher() +} diff --git a/pkg/sql/pgwire/BUILD.bazel b/pkg/sql/pgwire/BUILD.bazel index 5544eca765e6..a43d461f3e71 100644 --- a/pkg/sql/pgwire/BUILD.bazel +++ b/pkg/sql/pgwire/BUILD.bazel @@ -62,7 +62,7 @@ go_library( "//pkg/util/timeutil", "//pkg/util/timeutil/pgdate", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_logtags//:logtags", "@com_github_cockroachdb_redact//:redact", @@ -129,7 +129,7 @@ go_test( "//pkg/util/randutil", "//pkg/util/timeutil", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_datadriven//:datadriven", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_errors//stdstrings", diff --git a/pkg/sql/pgwire/encoding_test.go b/pkg/sql/pgwire/encoding_test.go index ef23ad656810..e2b3af12e9a0 100644 --- a/pkg/sql/pgwire/encoding_test.go +++ b/pkg/sql/pgwire/encoding_test.go @@ -21,7 +21,7 @@ import ( "testing" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/sql/colconv" diff --git a/pkg/sql/pgwire/testdata/pgtest/notice b/pkg/sql/pgwire/testdata/pgtest/notice index c3f3b48f8328..5dc5c3a21bf9 100644 --- a/pkg/sql/pgwire/testdata/pgtest/notice +++ b/pkg/sql/pgwire/testdata/pgtest/notice @@ -55,7 +55,7 @@ Query {"String": "DROP INDEX t_x_idx"} until crdb_only CommandComplete ---- -{"Severity":"NOTICE","SeverityUnlocalized":"","Code":"00000","Message":"the data for dropped indexes is reclaimed asynchronously","Detail":"","Hint":"The reclamation delay can be customized in the zone configuration for the table.","Position":0,"InternalPosition":0,"InternalQuery":"","Where":"","SchemaName":"","TableName":"","ColumnName":"","DataTypeName":"","ConstraintName":"","File":"drop_index.go","Line":538,"Routine":"dropIndexByName","UnknownFields":null} +{"Severity":"NOTICE","SeverityUnlocalized":"","Code":"00000","Message":"the data for dropped indexes is reclaimed asynchronously","Detail":"","Hint":"The reclamation delay can be customized in the zone configuration for the table.","Position":0,"InternalPosition":0,"InternalQuery":"","Where":"","SchemaName":"","TableName":"","ColumnName":"","DataTypeName":"","ConstraintName":"","File":"drop_index.go","Line":545,"Routine":"dropIndexByName","UnknownFields":null} {"Type":"CommandComplete","CommandTag":"DROP INDEX"} until noncrdb_only diff --git a/pkg/sql/pgwire/types.go b/pkg/sql/pgwire/types.go index c3059f6c9687..76f965fb5a1c 100644 --- a/pkg/sql/pgwire/types.go +++ b/pkg/sql/pgwire/types.go @@ -14,13 +14,12 @@ import ( "context" "encoding/binary" "math" - "math/big" "net" "strconv" "strings" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/lex" @@ -418,7 +417,7 @@ func writeBinaryDecimal(b *writeBuffer, v *apd.Decimal) { alloc := struct { pgNum pgwirebase.PGNumeric - bigI big.Int + bigI apd.BigInt }{ pgNum: pgwirebase.PGNumeric{ // Since we use 2000 as the exponent limits in tree.DecimalCtx, this diff --git a/pkg/sql/physicalplan/BUILD.bazel b/pkg/sql/physicalplan/BUILD.bazel index 5f8a224e4e13..2f5bff8910d5 100644 --- a/pkg/sql/physicalplan/BUILD.bazel +++ b/pkg/sql/physicalplan/BUILD.bazel @@ -75,6 +75,7 @@ go_test( "//pkg/util/log", "//pkg/util/randutil", "//pkg/util/uuid", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/sql/physicalplan/aggregator_funcs_test.go b/pkg/sql/physicalplan/aggregator_funcs_test.go index a77318748c36..8e0549659681 100644 --- a/pkg/sql/physicalplan/aggregator_funcs_test.go +++ b/pkg/sql/physicalplan/aggregator_funcs_test.go @@ -14,9 +14,9 @@ import ( "context" "fmt" "math" - "math/big" "testing" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" @@ -47,7 +47,7 @@ var ( // decimal calculations. diffCtx = tree.DecimalCtx.WithPrecision(0) // Use to check for 1ulp. - bigOne = big.NewInt(1) + bigOne = apd.NewBigInt(1) // floatPrecFmt is the format string with a precision of 3 (after // decimal point) specified for float comparisons. Float aggregation // operations involve unavoidable off-by-last-few-digits errors, which diff --git a/pkg/sql/planhook.go b/pkg/sql/planhook.go index 93dedf8e025d..262cade7561e 100644 --- a/pkg/sql/planhook.go +++ b/pkg/sql/planhook.go @@ -100,7 +100,7 @@ type PlanHookState interface { CreateSchemaNamespaceEntry(ctx context.Context, schemaNameKey roachpb.Key, schemaID descpb.ID) error MigrationJobDeps() migration.JobDeps - SpanConfigReconciliationJobDeps() spanconfig.ReconciliationDependencies + SpanConfigReconciler() spanconfig.Reconciler BufferClientNotice(ctx context.Context, notice pgnotice.Notice) } diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index 3901aeb0de95..fca148585d47 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -32,7 +32,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/querycache" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/transform" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" @@ -192,10 +191,6 @@ type planner struct { // 2. Disable the use of the table cache in tests. avoidLeasedDescriptors bool - // If set, the planner should skip checking for the SELECT privilege when - // initializing plans to read from a table. This should be used with care. - skipSelectPrivilegeChecks bool - // autoCommit indicates whether we're planning for an implicit transaction. // If autoCommit is true, the plan is allowed (but not required) to commit the // transaction along with other KV operations. Committing the txn might be @@ -225,7 +220,7 @@ type planner struct { // Use a common datum allocator across all the plan nodes. This separates the // plan lifetime from the lifetime of returned results allowing plan nodes to // be pool allocated. - alloc *rowenc.DatumAlloc + alloc *tree.DatumAlloc // optPlanningCtx stores the optimizer planning context, which contains // data structures that can be reused between queries (for efficiency). @@ -343,7 +338,7 @@ func newInternalPlanner( ts = readTimestamp.GoTime() } - p := &planner{execCfg: execCfg, alloc: &rowenc.DatumAlloc{}} + p := &planner{execCfg: execCfg, alloc: &tree.DatumAlloc{}} p.txn = txn p.stmt = Statement{} @@ -562,9 +557,9 @@ func (p *planner) MigrationJobDeps() migration.JobDeps { return p.execCfg.MigrationJobDeps } -// SpanConfigReconciliationJobDeps returns the spanconfig.ReconciliationJobDeps. -func (p *planner) SpanConfigReconciliationJobDeps() spanconfig.ReconciliationDependencies { - return p.execCfg.SpanConfigReconciliationJobDeps +// SpanConfigReconciler returns the spanconfig.Reconciler. +func (p *planner) SpanConfigReconciler() spanconfig.Reconciler { + return p.execCfg.SpanConfigReconciler } // GetTypeFromValidSQLSyntax implements the tree.EvalPlanner interface. diff --git a/pkg/sql/planner_test.go b/pkg/sql/planner_test.go index d83ac296c160..0b3a34346638 100644 --- a/pkg/sql/planner_test.go +++ b/pkg/sql/planner_test.go @@ -15,7 +15,6 @@ import ( "reflect" "testing" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -25,7 +24,7 @@ func TestTypeAsString(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() - p := planner{alloc: &rowenc.DatumAlloc{}} + p := planner{alloc: &tree.DatumAlloc{}} testData := []struct { expr tree.Expr expected string diff --git a/pkg/sql/privilege/privilege.go b/pkg/sql/privilege/privilege.go index 38de4d58b6e6..2d9f9485ecc3 100644 --- a/pkg/sql/privilege/privilege.go +++ b/pkg/sql/privilege/privilege.go @@ -45,6 +45,17 @@ const ( RULE Kind = 12 ) +// Privilege represents a privilege parsed from an Access Privilege Inquiry +// Function's privilege string argument. +type Privilege struct { + Kind Kind + // Each privilege Kind has an optional "grant option" flag associated with + // it. A role can only grant a privilege on an object to others if it is the + // owner of the object or if it itself holds that privilege WITH GRANT OPTION + // on the object. This replaces the CockroachDB-specific GRANT privilege. + GrantOption bool +} + // ObjectType represents objects that can have privileges. type ObjectType string diff --git a/pkg/sql/randgen/BUILD.bazel b/pkg/sql/randgen/BUILD.bazel index 6e536f70b573..9ac9f8484fb0 100644 --- a/pkg/sql/randgen/BUILD.bazel +++ b/pkg/sql/randgen/BUILD.bazel @@ -26,6 +26,8 @@ go_library( "//pkg/sql/catalog/descpb", "//pkg/sql/parser", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", + "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/tree", "//pkg/sql/stats", "//pkg/sql/types", @@ -40,7 +42,6 @@ go_library( "//pkg/util/timeutil/pgdate", "//pkg/util/uint128", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_lib_pq//oid", ], diff --git a/pkg/sql/randgen/datum.go b/pkg/sql/randgen/datum.go index ef5cbf26884c..ae2b8cb17249 100644 --- a/pkg/sql/randgen/datum.go +++ b/pkg/sql/randgen/datum.go @@ -13,13 +13,11 @@ package randgen import ( "bytes" "math" - "math/big" "math/bits" "math/rand" "time" "unicode" - "github.com/cockroachdb/apd/v2" "github.com/cockroachdb/cockroach/pkg/geo" "github.com/cockroachdb/cockroach/pkg/geo/geogen" "github.com/cockroachdb/cockroach/pkg/geo/geopb" @@ -345,11 +343,9 @@ func RandDatumSimple(rng *rand.Rand, typ *types.T) tree.Datum { date, _ := pgdate.MakeDateFromPGEpoch(rng.Int31n(simpleRange)) datum = tree.NewDDate(date) case types.DecimalFamily: - datum = &tree.DDecimal{ - Decimal: apd.Decimal{ - Coeff: *big.NewInt(rng.Int63n(simpleRange)), - }, - } + dd := &tree.DDecimal{} + dd.SetInt64(rng.Int63n(simpleRange)) + datum = dd case types.IntFamily: datum = tree.NewDInt(tree.DInt(rng.Intn(simpleRange))) case types.IntervalFamily: diff --git a/pkg/sql/randgen/mutator.go b/pkg/sql/randgen/mutator.go index 29c4972cc28c..92548cde2651 100644 --- a/pkg/sql/randgen/mutator.go +++ b/pkg/sql/randgen/mutator.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -301,7 +302,7 @@ func randHistogram(rng *rand.Rand, colType *types.T) stats.HistogramData { encs := encodeInvertedIndexHistogramUpperBounds(colType, upper) encodedUpperBounds = append(encodedUpperBounds, encs...) } else { - enc, err := rowenc.EncodeTableKey(nil, upper, encoding.Ascending) + enc, err := keyside.Encode(nil, upper, encoding.Ascending) if err != nil { panic(err) } @@ -368,11 +369,11 @@ func encodeInvertedIndexHistogramUpperBounds(colType *types.T, val tree.Datum) ( panic(err) } - var da rowenc.DatumAlloc + var da tree.DatumAlloc for i := range keys { // Each key much be a byte-encoded datum so that it can be // decoded in JSONStatistic.SetHistogram. - enc, err := rowenc.EncodeTableKey(nil, da.NewDBytes(tree.DBytes(keys[i])), encoding.Ascending) + enc, err := keyside.Encode(nil, da.NewDBytes(tree.DBytes(keys[i])), encoding.Ascending) if err != nil { panic(err) } diff --git a/pkg/sql/randgen/schema.go b/pkg/sql/randgen/schema.go index 0aea89cf1ca4..a5c73399cc49 100644 --- a/pkg/sql/randgen/schema.go +++ b/pkg/sql/randgen/schema.go @@ -395,7 +395,7 @@ func TestingMakePrimaryIndexKeyForTenant( colIDToRowIndex.Set(index.GetKeyColumnID(i), i) } - keyPrefix := rowenc.MakeIndexKeyPrefix(codec, desc, index.GetID()) + keyPrefix := rowenc.MakeIndexKeyPrefix(codec, desc.GetID(), index.GetID()) key, _, err := rowenc.EncodeIndexKey(desc, index, colIDToRowIndex, datums, keyPrefix) if err != nil { return nil, err diff --git a/pkg/sql/randgen/type.go b/pkg/sql/randgen/type.go index 03ef7ebc1864..eaf22fa02110 100644 --- a/pkg/sql/randgen/type.go +++ b/pkg/sql/randgen/type.go @@ -15,7 +15,7 @@ import ( "sort" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/lib/pq/oid" ) @@ -69,7 +69,7 @@ func init() { // IsAllowedForArray returns true iff the passed in type can be a valid ArrayContents() func IsAllowedForArray(typ *types.T) bool { // Don't include un-encodable types. - encTyp, err := rowenc.DatumTypeToArrayElementEncodingType(typ) + encTyp, err := valueside.DatumTypeToArrayElementEncodingType(typ) if err != nil || encTyp == 0 { return false } diff --git a/pkg/sql/region_util.go b/pkg/sql/region_util.go index cd592143da52..ba9bb1022937 100644 --- a/pkg/sql/region_util.go +++ b/pkg/sql/region_util.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" @@ -37,10 +38,10 @@ import ( // LiveClusterRegions is a set representing regions that are live in // a given cluster. -type LiveClusterRegions map[descpb.RegionName]struct{} +type LiveClusterRegions map[catpb.RegionName]struct{} // IsActive returns whether the given region is a live region. -func (s *LiveClusterRegions) IsActive(region descpb.RegionName) bool { +func (s *LiveClusterRegions) IsActive(region catpb.RegionName) bool { _, ok := (*s)[region] return ok } @@ -81,11 +82,11 @@ func GetLiveClusterRegions(ctx context.Context, p PlanHookState) (LiveClusterReg return nil, err } - var ret LiveClusterRegions = make(map[descpb.RegionName]struct{}) + var ret LiveClusterRegions = make(map[catpb.RegionName]struct{}) var ok bool for ok, err = it.Next(ctx); ok; ok, err = it.Next(ctx) { row := it.Cur() - ret[descpb.RegionName(*row[0].(*tree.DString))] = struct{}{} + ret[catpb.RegionName(*row[0].(*tree.DString))] = struct{}{} } if err != nil { return nil, err @@ -96,7 +97,7 @@ func GetLiveClusterRegions(ctx context.Context, p PlanHookState) (LiveClusterReg // CheckClusterRegionIsLive checks whether a region supplied is one of the // currently active cluster regions. func CheckClusterRegionIsLive( - liveClusterRegions LiveClusterRegions, region descpb.RegionName, + liveClusterRegions LiveClusterRegions, region catpb.RegionName, ) error { if !liveClusterRegions.IsActive(region) { return errors.WithHintf( @@ -112,7 +113,7 @@ func CheckClusterRegionIsLive( return nil } -func makeRequiredConstraintForRegion(r descpb.RegionName) zonepb.Constraint { +func makeRequiredConstraintForRegion(r catpb.RegionName) zonepb.Constraint { return zonepb.Constraint{ Type: zonepb.Constraint_REQUIRED, Key: "region", @@ -191,7 +192,7 @@ func zoneConfigForMultiRegionDatabase( // the attributes `num_replicas` and `constraints` will be inherited from the // database level zone config. func zoneConfigForMultiRegionPartition( - partitionRegion descpb.RegionName, regionConfig multiregion.RegionConfig, + partitionRegion catpb.RegionName, regionConfig multiregion.RegionConfig, ) (zonepb.ZoneConfig, error) { numVoters, _ := getNumVotersAndNumReplicas(regionConfig) zc := zonepb.NewZoneConfig() @@ -278,7 +279,7 @@ func getNumVotersAndNumReplicas( // Under region survivability, we will constrain exactly voting // replicas in the primary/home region. func synthesizeVoterConstraints( - region descpb.RegionName, regionConfig multiregion.RegionConfig, + region catpb.RegionName, regionConfig multiregion.RegionConfig, ) ([]zonepb.ConstraintsConjunction, error) { numVoters, _ := getNumVotersAndNumReplicas(regionConfig) switch regionConfig.SurvivalGoal() { @@ -380,7 +381,7 @@ func synthesizeVoterConstraints( // `zonepb.MultiRegionZoneConfigFields`) will be overwritten by the calling function // into an existing ZoneConfig. func zoneConfigForMultiRegionTable( - localityConfig descpb.TableDescriptor_LocalityConfig, regionConfig multiregion.RegionConfig, + localityConfig catpb.LocalityConfig, regionConfig multiregion.RegionConfig, ) (*zonepb.ZoneConfig, error) { // We only care about NumVoters here at the table level. NumReplicas is set at // the database level, not at the table/partition level. @@ -388,7 +389,7 @@ func zoneConfigForMultiRegionTable( ret := zonepb.NewZoneConfig() switch l := localityConfig.Locality.(type) { - case *descpb.TableDescriptor_LocalityConfig_Global_: + case *catpb.LocalityConfig_Global_: // Enable non-blocking transactions. ret.GlobalReads = proto.Bool(true) @@ -424,7 +425,7 @@ func zoneConfigForMultiRegionTable( // Inherit lease preference from the database. We do // nothing here because `NewZoneConfig()` already marks the field as // 'inherited'. - case *descpb.TableDescriptor_LocalityConfig_RegionalByTable_: + case *catpb.LocalityConfig_RegionalByTable_: if l.RegionalByTable.Region == nil { // If we don't have an explicit primary // region, use the same configuration as the database and return a blank @@ -447,7 +448,7 @@ func zoneConfigForMultiRegionTable( ret.LeasePreferences = []zonepb.LeasePreference{ {Constraints: []zonepb.Constraint{makeRequiredConstraintForRegion(preferredRegion)}}, } - case *descpb.TableDescriptor_LocalityConfig_RegionalByRow_: + case *catpb.LocalityConfig_RegionalByRow_: // We purposely do not set anything here at table level - this should be done at // partition level instead. return ret, nil @@ -532,7 +533,7 @@ func isPlaceholderZoneConfigForMultiRegion(zc zonepb.ZoneConfig) bool { // applyZoneConfigForMultiRegionTableOptionTableNewConfig applies table zone // configs on the entire table with the given new locality config. func applyZoneConfigForMultiRegionTableOptionTableNewConfig( - newConfig descpb.TableDescriptor_LocalityConfig, + newConfig catpb.LocalityConfig, ) applyZoneConfigForMultiRegionTableOption { return func( zc zonepb.ZoneConfig, @@ -1156,7 +1157,7 @@ func SynthesizeRegionConfig( return multiregion.RegionConfig{}, err } - var regionNames descpb.RegionNames + var regionNames catpb.RegionNames if o.forValidation { regionNames, err = regionEnum.RegionNamesForValidation() } else { @@ -1346,7 +1347,7 @@ func (p *planner) CheckZoneConfigChangePermittedForMultiRegion( type zoneConfigForMultiRegionValidator interface { getExpectedDatabaseZoneConfig() (zonepb.ZoneConfig, error) getExpectedTableZoneConfig(desc catalog.TableDescriptor) (zonepb.ZoneConfig, error) - transitioningRegions() descpb.RegionNames + transitioningRegions() catpb.RegionNames newMismatchFieldError(descType string, descName string, field string) error newMissingSubzoneError(descType string, descName string, field string) error @@ -1367,7 +1368,7 @@ func (v *zoneConfigForMultiRegionValidatorSetInitialRegion) getExpectedDatabaseZ return *zonepb.NewZoneConfig(), nil } -func (v *zoneConfigForMultiRegionValidatorSetInitialRegion) transitioningRegions() descpb.RegionNames { +func (v *zoneConfigForMultiRegionValidatorSetInitialRegion) transitioningRegions() catpb.RegionNames { // There are no transitioning regions at setup time. return nil } @@ -1458,7 +1459,7 @@ func (v *zoneConfigForMultiRegionValidatorExistingMultiRegionObject) getExpected return expectedZoneConfig, err } -func (v *zoneConfigForMultiRegionValidatorExistingMultiRegionObject) transitioningRegions() descpb.RegionNames { +func (v *zoneConfigForMultiRegionValidatorExistingMultiRegionObject) transitioningRegions() catpb.RegionNames { return v.regionConfig.TransitioningRegions() } diff --git a/pkg/sql/region_util_test.go b/pkg/sql/region_util_test.go index ad0973cb2329..47289207aa13 100644 --- a/pkg/sql/region_util_test.go +++ b/pkg/sql/region_util_test.go @@ -14,6 +14,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/multiregion" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -32,7 +33,7 @@ func TestZoneConfigForMultiRegionDatabase(t *testing.T) { { desc: "one region, zone survival", regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_a", }, "region_a", @@ -71,7 +72,7 @@ func TestZoneConfigForMultiRegionDatabase(t *testing.T) { { desc: "two regions, zone survival", regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_a", }, @@ -117,7 +118,7 @@ func TestZoneConfigForMultiRegionDatabase(t *testing.T) { { desc: "three regions, zone survival", regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -170,7 +171,7 @@ func TestZoneConfigForMultiRegionDatabase(t *testing.T) { { desc: "three regions, region survival", regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -223,7 +224,7 @@ func TestZoneConfigForMultiRegionDatabase(t *testing.T) { { desc: "four regions, zone survival", regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -283,7 +284,7 @@ func TestZoneConfigForMultiRegionDatabase(t *testing.T) { { desc: "four regions, region survival", regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -344,7 +345,7 @@ func TestZoneConfigForMultiRegionDatabase(t *testing.T) { { desc: "one region, restricted placement", regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_a", }, "region_a", @@ -376,7 +377,7 @@ func TestZoneConfigForMultiRegionDatabase(t *testing.T) { { desc: "four regions, restricted placement", regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_a", "region_b", "region_c", @@ -419,7 +420,7 @@ func TestZoneConfigForMultiRegionDatabase(t *testing.T) { } } -func protoRegionName(region descpb.RegionName) *descpb.RegionName { +func protoRegionName(region catpb.RegionName) *catpb.RegionName { return ®ion } @@ -428,19 +429,19 @@ func TestZoneConfigForMultiRegionTable(t *testing.T) { testCases := []struct { desc string - localityConfig descpb.TableDescriptor_LocalityConfig + localityConfig catpb.LocalityConfig regionConfig multiregion.RegionConfig expected zonepb.ZoneConfig }{ { desc: "4-region global table with zone survival", - localityConfig: descpb.TableDescriptor_LocalityConfig{ - Locality: &descpb.TableDescriptor_LocalityConfig_Global_{ - Global: &descpb.TableDescriptor_LocalityConfig_Global{}, + localityConfig: catpb.LocalityConfig{ + Locality: &catpb.LocalityConfig_Global_{ + Global: &catpb.LocalityConfig_Global{}, }, }, regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -459,13 +460,13 @@ func TestZoneConfigForMultiRegionTable(t *testing.T) { }, { desc: "4-region global table with region survival", - localityConfig: descpb.TableDescriptor_LocalityConfig{ - Locality: &descpb.TableDescriptor_LocalityConfig_Global_{ - Global: &descpb.TableDescriptor_LocalityConfig_Global{}, + localityConfig: catpb.LocalityConfig{ + Locality: &catpb.LocalityConfig_Global_{ + Global: &catpb.LocalityConfig_Global{}, }, }, regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -484,13 +485,13 @@ func TestZoneConfigForMultiRegionTable(t *testing.T) { }, { desc: "4-region regional by row table with zone survival", - localityConfig: descpb.TableDescriptor_LocalityConfig{ - Locality: &descpb.TableDescriptor_LocalityConfig_RegionalByRow_{ - RegionalByRow: &descpb.TableDescriptor_LocalityConfig_RegionalByRow{}, + localityConfig: catpb.LocalityConfig{ + Locality: &catpb.LocalityConfig_RegionalByRow_{ + RegionalByRow: &catpb.LocalityConfig_RegionalByRow{}, }, }, regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -505,13 +506,13 @@ func TestZoneConfigForMultiRegionTable(t *testing.T) { }, { desc: "4-region regional by row table with region survival", - localityConfig: descpb.TableDescriptor_LocalityConfig{ - Locality: &descpb.TableDescriptor_LocalityConfig_RegionalByRow_{ - RegionalByRow: &descpb.TableDescriptor_LocalityConfig_RegionalByRow{}, + localityConfig: catpb.LocalityConfig{ + Locality: &catpb.LocalityConfig_RegionalByRow_{ + RegionalByRow: &catpb.LocalityConfig_RegionalByRow{}, }, }, regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -526,15 +527,15 @@ func TestZoneConfigForMultiRegionTable(t *testing.T) { }, { desc: "4-region regional by table with zone survival on primary region", - localityConfig: descpb.TableDescriptor_LocalityConfig{ - Locality: &descpb.TableDescriptor_LocalityConfig_RegionalByTable_{ - RegionalByTable: &descpb.TableDescriptor_LocalityConfig_RegionalByTable{ + localityConfig: catpb.LocalityConfig{ + Locality: &catpb.LocalityConfig_RegionalByTable_{ + RegionalByTable: &catpb.LocalityConfig_RegionalByTable{ Region: nil, }, }, }, regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -549,15 +550,15 @@ func TestZoneConfigForMultiRegionTable(t *testing.T) { }, { desc: "4-region regional by table with regional survival on primary region", - localityConfig: descpb.TableDescriptor_LocalityConfig{ - Locality: &descpb.TableDescriptor_LocalityConfig_RegionalByTable_{ - RegionalByTable: &descpb.TableDescriptor_LocalityConfig_RegionalByTable{ + localityConfig: catpb.LocalityConfig{ + Locality: &catpb.LocalityConfig_RegionalByTable_{ + RegionalByTable: &catpb.LocalityConfig_RegionalByTable{ Region: nil, }, }, }, regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -572,15 +573,15 @@ func TestZoneConfigForMultiRegionTable(t *testing.T) { }, { desc: "4-region regional by table with zone survival on non primary region", - localityConfig: descpb.TableDescriptor_LocalityConfig{ - Locality: &descpb.TableDescriptor_LocalityConfig_RegionalByTable_{ - RegionalByTable: &descpb.TableDescriptor_LocalityConfig_RegionalByTable{ + localityConfig: catpb.LocalityConfig{ + Locality: &catpb.LocalityConfig_RegionalByTable_{ + RegionalByTable: &catpb.LocalityConfig_RegionalByTable{ Region: protoRegionName("region_c"), }, }, }, regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -614,15 +615,15 @@ func TestZoneConfigForMultiRegionTable(t *testing.T) { }, { desc: "4-region regional by table with regional survival on non primary region", - localityConfig: descpb.TableDescriptor_LocalityConfig{ - Locality: &descpb.TableDescriptor_LocalityConfig_RegionalByTable_{ - RegionalByTable: &descpb.TableDescriptor_LocalityConfig_RegionalByTable{ + localityConfig: catpb.LocalityConfig{ + Locality: &catpb.LocalityConfig_RegionalByTable_{ + RegionalByTable: &catpb.LocalityConfig_RegionalByTable{ Region: protoRegionName("region_c"), }, }, }, regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -657,11 +658,11 @@ func TestZoneConfigForMultiRegionTable(t *testing.T) { }, { desc: "4-region global table with restricted placement", - localityConfig: descpb.TableDescriptor_LocalityConfig{ - Locality: &descpb.TableDescriptor_LocalityConfig_Global_{}, + localityConfig: catpb.LocalityConfig{ + Locality: &catpb.LocalityConfig_Global_{}, }, regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -729,7 +730,7 @@ func TestZoneConfigForMultiRegionPartition(t *testing.T) { testCases := []struct { desc string - region descpb.RegionName + region catpb.RegionName regionConfig multiregion.RegionConfig expected zonepb.ZoneConfig }{ @@ -737,7 +738,7 @@ func TestZoneConfigForMultiRegionPartition(t *testing.T) { desc: "4-region table with zone survivability", region: "region_a", regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", @@ -773,7 +774,7 @@ func TestZoneConfigForMultiRegionPartition(t *testing.T) { desc: "4-region table with region survivability", region: "region_a", regionConfig: multiregion.MakeRegionConfig( - descpb.RegionNames{ + catpb.RegionNames{ "region_b", "region_c", "region_a", diff --git a/pkg/sql/resolver.go b/pkg/sql/resolver.go index a6ddecf5e1ef..1400091ae27e 100644 --- a/pkg/sql/resolver.go +++ b/pkg/sql/resolver.go @@ -304,7 +304,7 @@ func (p *planner) HasPrivilege( ctx context.Context, specifier tree.HasPrivilegeSpecifier, user security.SQLUsername, - kind privilege.Kind, + priv privilege.Privilege, ) (bool, error) { desc, err := p.ResolveDescriptorForPrivilegeSpecifier( ctx, @@ -315,32 +315,42 @@ func (p *planner) HasPrivilege( } // hasPrivilegeFunc checks whether any role has the given privilege. - hasPrivilegeFunc := func(priv privilege.Kind) (bool, error) { - err := p.CheckPrivilegeForUser(ctx, desc, priv, user) + hasPrivilegeFunc := func(priv privilege.Privilege) (bool, error) { + err := p.CheckPrivilegeForUser(ctx, desc, priv.Kind, user) + if err == nil { + if priv.GrantOption { + if !p.ExecCfg().Settings.Version.IsActive(ctx, clusterversion.ValidateGrantOption) { + err = p.CheckPrivilegeForUser(ctx, desc, privilege.GRANT, user) + } else { + err = p.CheckGrantOptionsForUser(ctx, desc, []privilege.Kind{priv.Kind}, true /* isGrant */) + } + } + } if err != nil { if pgerror.GetPGCode(err) == pgcode.InsufficientPrivilege { return false, nil } return false, err } + return true, nil } - if kind == privilege.RULE { + if priv.Kind == privilege.RULE { // RULE was only added for compatibility with Postgres, and Postgres // never allows RULE to be granted, even if the user has ALL privileges. // See https://www.postgresql.org/docs/8.1/sql-grant.html // and https://www.postgresql.org/docs/release/8.2.0/. return false, nil } - hasPrivilege, err := hasPrivilegeFunc(privilege.ALL) + hasPrivilege, err := hasPrivilegeFunc(privilege.Privilege{Kind: privilege.ALL}) if err != nil { return false, err } if hasPrivilege { return true, nil } - return hasPrivilegeFunc(kind) + return hasPrivilegeFunc(priv) } // ResolveDescriptorForPrivilegeSpecifier resolves a tree.HasPrivilegeSpecifier @@ -348,28 +358,51 @@ func (p *planner) HasPrivilege( func (p *planner) ResolveDescriptorForPrivilegeSpecifier( ctx context.Context, specifier tree.HasPrivilegeSpecifier, ) (catalog.Descriptor, error) { - if specifier.TableName != nil { - tn, err := parser.ParseQualifiedTableName(*specifier.TableName) - if err != nil { - return nil, err - } - if _, err := p.ResolveTableName(ctx, tn); err != nil { - return nil, err - } + if specifier.DatabaseName != nil { + return p.Descriptors().GetImmutableDatabaseByName( + ctx, p.txn, *specifier.DatabaseName, tree.DatabaseLookupFlags{Required: true}, + ) + } else if specifier.DatabaseOID != nil { + _, database, err := p.Descriptors().GetImmutableDatabaseByID( + ctx, p.txn, descpb.ID(*specifier.DatabaseOID), tree.DatabaseLookupFlags{Required: true}, + ) + return database, err + } else if specifier.TableName != nil || specifier.TableOID != nil { + var table catalog.TableDescriptor + var err error + if specifier.TableName != nil { + var tn *tree.TableName + tn, err = parser.ParseQualifiedTableName(*specifier.TableName) + if err != nil { + return nil, err + } + if _, err = p.ResolveTableName(ctx, tn); err != nil { + return nil, err + } - if p.SessionData().Database != "" && p.SessionData().Database != string(tn.CatalogName) { - // Postgres does not allow cross-database references in these - // functions, so we don't either. - return nil, pgerror.Newf(pgcode.FeatureNotSupported, - "cross-database references are not implemented: %s", tn) - } - _, table, err := p.Descriptors().GetImmutableTableByName( - ctx, p.txn, tn, tree.ObjectLookupFlags{ - CommonLookupFlags: tree.CommonLookupFlags{ - Required: true, + if p.SessionData().Database != "" && p.SessionData().Database != string(tn.CatalogName) { + // Postgres does not allow cross-database references in these + // functions, so we don't either. + return nil, pgerror.Newf(pgcode.FeatureNotSupported, + "cross-database references are not implemented: %s", tn) + } + _, table, err = p.Descriptors().GetImmutableTableByName( + ctx, p.txn, tn, tree.ObjectLookupFlags{ + CommonLookupFlags: tree.CommonLookupFlags{ + Required: true, + }, }, - }, - ) + ) + } else { + table, err = p.Descriptors().GetImmutableTableByID( + ctx, p.txn, descpb.ID(*specifier.TableOID), + tree.ObjectLookupFlags{ + CommonLookupFlags: tree.CommonLookupFlags{ + Required: true, + }, + }, + ) + } if err != nil { return nil, err } @@ -381,27 +414,7 @@ func (p *planner) ResolveDescriptorForPrivilegeSpecifier( } return table, nil } - if specifier.TableOID == nil { - return nil, errors.AssertionFailedf("no table name or oid found") - } - table, err := p.Descriptors().GetImmutableTableByID( - ctx, p.txn, descpb.ID(*specifier.TableOID), - tree.ObjectLookupFlags{ - CommonLookupFlags: tree.CommonLookupFlags{ - Required: true, - }, - }, - ) - if err != nil { - return nil, err - } - if err := validateColumnForHasPrivilegeSpecifier( - table, - specifier, - ); err != nil { - return nil, err - } - return table, nil + return nil, errors.AssertionFailedf("invalid HasPrivilegeSpecifier") } func validateColumnForHasPrivilegeSpecifier( diff --git a/pkg/sql/roleoption/option_string.go b/pkg/sql/roleoption/option_string.go index ff118f2b5be8..d200c1a59818 100644 --- a/pkg/sql/roleoption/option_string.go +++ b/pkg/sql/roleoption/option_string.go @@ -29,11 +29,13 @@ func _() { _ = x[MODIFYCLUSTERSETTING-19] _ = x[NOMODIFYCLUSTERSETTING-20] _ = x[DEFAULTSETTINGS-21] + _ = x[VIEWACTIVITYREDACTED-22] + _ = x[NOVIEWACTIVITYREDACTED-23] } -const _Option_name = "CREATEROLENOCREATEROLEPASSWORDLOGINNOLOGINVALIDUNTILCONTROLJOBNOCONTROLJOBCONTROLCHANGEFEEDNOCONTROLCHANGEFEEDCREATEDBNOCREATEDBCREATELOGINNOCREATELOGINVIEWACTIVITYNOVIEWACTIVITYCANCELQUERYNOCANCELQUERYMODIFYCLUSTERSETTINGNOMODIFYCLUSTERSETTINGDEFAULTSETTINGS" +const _Option_name = "CREATEROLENOCREATEROLEPASSWORDLOGINNOLOGINVALIDUNTILCONTROLJOBNOCONTROLJOBCONTROLCHANGEFEEDNOCONTROLCHANGEFEEDCREATEDBNOCREATEDBCREATELOGINNOCREATELOGINVIEWACTIVITYNOVIEWACTIVITYCANCELQUERYNOCANCELQUERYMODIFYCLUSTERSETTINGNOMODIFYCLUSTERSETTINGDEFAULTSETTINGSVIEWACTIVITYREDACTEDNOVIEWACTIVITYREDACTED" -var _Option_index = [...]uint16{0, 10, 22, 30, 35, 42, 52, 62, 74, 91, 110, 118, 128, 139, 152, 164, 178, 189, 202, 222, 244, 259} +var _Option_index = [...]uint16{0, 10, 22, 30, 35, 42, 52, 62, 74, 91, 110, 118, 128, 139, 152, 164, 178, 189, 202, 222, 244, 259, 279, 301} func (i Option) String() string { i -= 1 diff --git a/pkg/sql/roleoption/role_option.go b/pkg/sql/roleoption/role_option.go index 13f43d5cb50c..6d39d40fc3b2 100644 --- a/pkg/sql/roleoption/role_option.go +++ b/pkg/sql/roleoption/role_option.go @@ -56,6 +56,8 @@ const ( MODIFYCLUSTERSETTING NOMODIFYCLUSTERSETTING DEFAULTSETTINGS + VIEWACTIVITYREDACTED + NOVIEWACTIVITYREDACTED ) // toSQLStmts is a map of Kind -> SQL statement string for applying the @@ -80,6 +82,8 @@ var toSQLStmts = map[Option]string{ NOCANCELQUERY: `DELETE FROM system.role_options WHERE username = $1 AND option = 'CANCELQUERY'`, MODIFYCLUSTERSETTING: `UPSERT INTO system.role_options (username, option) VALUES ($1, 'MODIFYCLUSTERSETTING')`, NOMODIFYCLUSTERSETTING: `DELETE FROM system.role_options WHERE username = $1 AND option = 'MODIFYCLUSTERSETTING'`, + VIEWACTIVITYREDACTED: `UPSERT INTO system.role_options (username, option) VALUES ($1, 'VIEWACTIVITYREDACTED')`, + NOVIEWACTIVITYREDACTED: `DELETE FROM system.role_options WHERE username = $1 AND option = 'VIEWACTIVITYREDACTED'`, } // Mask returns the bitmask for a given role option. @@ -110,6 +114,8 @@ var ByName = map[string]Option{ "MODIFYCLUSTERSETTING": MODIFYCLUSTERSETTING, "NOMODIFYCLUSTERSETTING": NOMODIFYCLUSTERSETTING, "DEFAULTSETTINGS": DEFAULTSETTINGS, + "VIEWACTIVITYREDACTED": VIEWACTIVITYREDACTED, + "NOVIEWACTIVITYREDACTED": VIEWACTIVITYREDACTED, } // ToOption takes a string and returns the corresponding Option. @@ -213,7 +219,9 @@ func (rol List) CheckRoleOptionConflicts() error { (roleOptionBits&CANCELQUERY.Mask() != 0 && roleOptionBits&NOCANCELQUERY.Mask() != 0) || (roleOptionBits&MODIFYCLUSTERSETTING.Mask() != 0 && - roleOptionBits&NOMODIFYCLUSTERSETTING.Mask() != 0) { + roleOptionBits&NOMODIFYCLUSTERSETTING.Mask() != 0) || + (roleOptionBits&VIEWACTIVITYREDACTED.Mask() != 0 && + roleOptionBits&NOVIEWACTIVITYREDACTED.Mask() != 0) { return pgerror.Newf(pgcode.Syntax, "conflicting role options") } return nil diff --git a/pkg/sql/row/BUILD.bazel b/pkg/sql/row/BUILD.bazel index f0fe5c4c4058..cb7108e3ced5 100644 --- a/pkg/sql/row/BUILD.bazel +++ b/pkg/sql/row/BUILD.bazel @@ -11,6 +11,7 @@ go_library( "helper.go", "inserter.go", "kv_batch_fetcher.go", + "kv_batch_streamer.go", "kv_fetcher.go", "locking.go", "metrics.go", @@ -23,26 +24,31 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/row", visibility = ["//visibility:public"], deps = [ + "//pkg/clusterversion", "//pkg/jobs", "//pkg/jobs/jobspb", "//pkg/keys", "//pkg/kv", + "//pkg/kv/kvclient/kvstreamer", "//pkg/kv/kvserver", "//pkg/kv/kvserver/concurrency/lock", "//pkg/roachpb:with-mocks", "//pkg/settings", + "//pkg/settings/cluster", "//pkg/sql/catalog", "//pkg/sql/catalog/catalogkeys", "//pkg/sql/catalog/catalogkv", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/schemaexpr", "//pkg/sql/catalog/seqexpr", - "//pkg/sql/execinfrapb", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", "//pkg/sql/rowenc/rowencpb", + "//pkg/sql/rowenc/valueside", "//pkg/sql/rowinfra", "//pkg/sql/scrub", "//pkg/sql/sem/builtins", @@ -97,7 +103,6 @@ go_test( "//pkg/sql/catalog/tabledesc", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", - "//pkg/sql/rowenc", "//pkg/sql/rowinfra", "//pkg/sql/sem/tree", "//pkg/storage", diff --git a/pkg/sql/row/errors.go b/pkg/sql/row/errors.go index 708d5f6f8ca4..0047d139a905 100644 --- a/pkg/sql/row/errors.go +++ b/pkg/sql/row/errors.go @@ -178,7 +178,7 @@ func DecodeRowInfo( if err != nil { return nil, nil, nil, err } - indexID, _, err := rowenc.DecodeIndexKeyPrefix(codec, tableDesc, key) + indexID, _, err := rowenc.DecodeIndexKeyPrefix(codec, tableDesc.GetID(), key) if err != nil { return nil, nil, nil, err } @@ -242,9 +242,8 @@ func DecodeRowInfo( false, /* reverse */ descpb.ScanLockingStrength_FOR_NONE, descpb.ScanLockingWaitPolicy_BLOCK, - 0, /* lockTimeout */ - false, /* isCheck */ - &rowenc.DatumAlloc{}, + 0, /* lockTimeout */ + &tree.DatumAlloc{}, nil, /* memMonitor */ tableArgs, ); err != nil { diff --git a/pkg/sql/row/fetcher.go b/pkg/sql/row/fetcher.go index e3114a33ba69..b615c55d2f87 100644 --- a/pkg/sql/row/fetcher.go +++ b/pkg/sql/row/fetcher.go @@ -21,10 +21,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" "github.com/cockroachdb/cockroach/pkg/sql/scrub" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -123,18 +125,6 @@ type tableInfo struct { // changefeeds use this by providing raw kvs with tombstones unfiltered via // `StartScanFrom`. rowIsDeleted bool - - // hasLast indicates whether there was a previously scanned k/v. - hasLast bool - // lastDatums is a buffer for the current key. It is only present when - // doing a physical check in order to verify round-trip encoding. - // It is required because Fetcher.kv is overwritten before NextRow - // returns. - lastKV roachpb.KeyValue - // lastDatums is a buffer for the previously scanned k/v datums. It is - // only present when doing a physical check in order to verify - // ordering. - lastDatums tree.Datums } // FetcherTableArgs are the arguments passed to Fetcher.Init @@ -151,17 +141,10 @@ type FetcherTableArgs struct { // InitCols initializes the columns in FetcherTableArgs. func (fta *FetcherTableArgs) InitCols( - desc catalog.TableDescriptor, - scanVisibility execinfrapb.ScanVisibility, - withSystemColumns bool, - invertedColumn catalog.Column, + desc catalog.TableDescriptor, withSystemColumns bool, invertedColumn catalog.Column, ) { cols := make([]catalog.Column, 0, len(desc.AllColumns())) - if scanVisibility == execinfrapb.ScanVisibility_PUBLIC_AND_NOT_PUBLIC { - cols = append(cols, desc.ReadableColumns()...) - } else { - cols = append(cols, desc.PublicColumns()...) - } + cols = append(cols, desc.ReadableColumns()...) if invertedColumn != nil { for i, col := range cols { if col.GetID() == invertedColumn.GetID() { @@ -247,16 +230,12 @@ type Fetcher struct { keyRemainingBytes []byte kvEnd bool - // isCheck indicates whether or not we are running checks for k/v - // correctness. It is set only during SCRUB commands. - isCheck bool - // IgnoreUnexpectedNulls allows Fetcher to return null values for non-nullable // columns and is only used for decoding for error messages or debugging. IgnoreUnexpectedNulls bool // Buffered allocation of decoded datums. - alloc *rowenc.DatumAlloc + alloc *tree.DatumAlloc // Memory monitor and memory account for the bytes fetched by this fetcher. mon *mon.BytesMonitor @@ -294,8 +273,7 @@ func (rf *Fetcher) Init( lockStrength descpb.ScanLockingStrength, lockWaitPolicy descpb.ScanLockingWaitPolicy, lockTimeout time.Duration, - isCheck bool, - alloc *rowenc.DatumAlloc, + alloc *tree.DatumAlloc, memMonitor *mon.BytesMonitor, tableArgs FetcherTableArgs, ) error { @@ -305,7 +283,6 @@ func (rf *Fetcher) Init( rf.lockWaitPolicy = lockWaitPolicy rf.lockTimeout = lockTimeout rf.alloc = alloc - rf.isCheck = isCheck if memMonitor != nil { rf.mon = mon.NewMonitorInheritWithLimit("fetcher-mem", 0 /* limit */, memMonitor) @@ -345,10 +322,10 @@ func (rf *Fetcher) Init( // Set up any system column metadata, if this column is a system column. switch colinfo.GetSystemColumnKindFromColumnID(col.GetID()) { - case descpb.SystemColumnKind_MVCCTIMESTAMP: + case catpb.SystemColumnKind_MVCCTIMESTAMP: table.timestampOutputIdx = idx rf.mvccDecodeStrategy = MVCCDecodingRequired - case descpb.SystemColumnKind_TABLEOID: + case catpb.SystemColumnKind_TABLEOID: table.oidOutputIdx = idx table.tableOid = tree.NewDOid(tree.DInt(tableArgs.Desc.GetID())) } @@ -356,31 +333,36 @@ func (rf *Fetcher) Init( } table.knownPrefixLength = len( - rowenc.MakeIndexKeyPrefix(codec, table.desc, table.index.GetID()), + rowenc.MakeIndexKeyPrefix(codec, table.desc.GetID(), table.index.GetID()), ) - var indexColumnIDs []descpb.ColumnID - indexColumnIDs, table.indexColumnDirs = catalog.FullIndexColumnIDs(table.index) + table.indexColumnDirs = table.desc.IndexFullColumnDirections(table.index) + fullColumns := table.desc.IndexFullColumns(table.index) table.neededValueColsByIdx = tableArgs.ValNeededForCol.Copy() neededIndexCols := 0 - nIndexCols := len(indexColumnIDs) + nIndexCols := len(fullColumns) if cap(table.indexColIdx) >= nIndexCols { table.indexColIdx = table.indexColIdx[:nIndexCols] } else { table.indexColIdx = make([]int, nIndexCols) } - for i, id := range indexColumnIDs { + for i, col := range fullColumns { + if col == nil { + table.indexColIdx[i] = -1 + continue + } + id := col.GetID() colIdx, ok := table.colIdxMap.Get(id) if ok { table.indexColIdx[i] = colIdx - if table.neededCols.Contains(int(id)) { + if table.neededCols.Contains(int(col.GetID())) { neededIndexCols++ table.neededValueColsByIdx.Remove(colIdx) } } else { table.indexColIdx[i] = -1 - if table.neededCols.Contains(int(id)) { + if table.neededCols.Contains(int(col.GetID())) { return errors.AssertionFailedf("needed column %d not in colIdxMap", id) } } @@ -419,7 +401,7 @@ func (rf *Fetcher) Init( } // Prepare our index key vals slice. - table.keyValTypes, err = colinfo.GetColumnTypes(table.desc, indexColumnIDs, table.keyValTypes) + table.keyValTypes, err = getColumnTypes(fullColumns, table.keyValTypes) if err != nil { return err } @@ -435,8 +417,9 @@ func (rf *Fetcher) Init( // Primary indexes only contain ascendingly-encoded // values. If this ever changes, we'll probably have to // figure out the directions here too. - table.extraTypes, err = colinfo.GetColumnTypes(table.desc, table.index.IndexDesc().KeySuffixColumnIDs, table.extraTypes) - nExtraColumns := table.index.NumKeySuffixColumns() + keySuffixCols := table.desc.IndexKeySuffixColumns(table.index) + table.extraTypes, err = getColumnTypes(keySuffixCols, table.extraTypes) + nExtraColumns := len(keySuffixCols) if cap(table.extraVals) >= nExtraColumns { table.extraVals = table.extraVals[:nExtraColumns] } else { @@ -451,6 +434,24 @@ func (rf *Fetcher) Init( return err } +func getColumnTypes(columns []catalog.Column, outTypes []*types.T) ([]*types.T, error) { + if cap(outTypes) < len(columns) { + outTypes = make([]*types.T, len(columns)) + } else { + outTypes = outTypes[:len(columns)] + } + for i, col := range columns { + if col == nil { + return nil, fmt.Errorf("column does not exist") + } + if !col.Public() { + return nil, fmt.Errorf("column %q (%d) is not public", col.GetName(), col.GetID()) + } + outTypes[i] = col.GetType() + } + return outTypes, nil +} + // GetTable returns the table that this Fetcher was initialized with. func (rf *Fetcher) GetTable() catalog.Descriptor { return rf.table.desc @@ -735,7 +736,7 @@ func (rf *Fetcher) NextKey(ctx context.Context) (rowDone bool, _ error) { for i := 0; i < rf.table.index.NumKeySuffixColumns(); i++ { var err error // Slice off an extra encoded column from rf.keyRemainingBytes. - rf.keyRemainingBytes, err = rowenc.SkipTableKey(rf.keyRemainingBytes) + rf.keyRemainingBytes, err = keyside.Skip(rf.keyRemainingBytes) if err != nil { return false, err } @@ -1020,7 +1021,7 @@ func (rf *Fetcher) processValueSingle( // although that would require changing UnmarshalColumnValue to operate // on bytes, and for Encode/DecodeTableValue to operate on marshaled // single values. - value, err := rowenc.UnmarshalColumnValue(rf.alloc, typ, kv.Value) + value, err := valueside.UnmarshalLegacy(rf.alloc, typ, kv.Value) if err != nil { return "", "", err } @@ -1142,9 +1143,6 @@ func (rf *Fetcher) NextRow( log.VEventf(ctx, 2, "fetched: %s -> %s", prettyKey, prettyVal) } - if rf.isCheck { - rf.table.lastKV = rf.kv - } rowDone, err := rf.NextKey(ctx) if err != nil { return nil, nil, nil, err @@ -1201,194 +1199,6 @@ func (rf *Fetcher) RowIsDeleted() bool { return rf.table.rowIsDeleted } -// NextRowWithErrors calls NextRow to fetch the next row and also run -// additional additional logic for physical checks. The Datums should -// not be modified and are only valid until the next call. When there -// are no more rows, the Datums is nil. The checks executed include: -// - k/v data round-trips, i.e. it decodes and re-encodes to the same -// value. -// - There is no extra unexpected or incorrect data encoded in the k/v -// pair. -// - Decoded keys follow the same ordering as their encoding. -func (rf *Fetcher) NextRowWithErrors(ctx context.Context) (rowenc.EncDatumRow, error) { - row, table, index, err := rf.NextRow(ctx) - if row == nil { - return nil, nil - } else if err != nil { - // If this is not already a wrapped error, we will consider it to be - // a generic physical error. - // FIXME(joey): This may not be needed if we capture all the errors - // encountered. This is a TBD when this change is polished. - if !scrub.IsScrubError(err) { - err = scrub.WrapError(scrub.PhysicalError, err) - } - return row, err - } - - // Decode the row in-place. The following check datum encoding - // functions require that the table.row datums are decoded. - for i := range row { - if row[i].IsUnset() { - rf.table.decodedRow[i] = tree.DNull - continue - } - if err := row[i].EnsureDecoded(rf.table.cols[i].GetType(), rf.alloc); err != nil { - return nil, err - } - rf.table.decodedRow[i] = row[i].Datum - } - - if index.GetID() == table.GetPrimaryIndexID() { - err = rf.checkPrimaryIndexDatumEncodings(ctx) - } else { - err = rf.checkSecondaryIndexDatumEncodings(ctx) - } - if err != nil { - return row, err - } - - err = rf.checkKeyOrdering(ctx) - - return row, err -} - -// checkPrimaryIndexDatumEncodings will run a round-trip encoding check -// on all values in the buffered row. This check is specific to primary -// index datums. -func (rf *Fetcher) checkPrimaryIndexDatumEncodings(ctx context.Context) error { - table := &rf.table - scratch := make([]byte, 1024) - colIDToColumn := make(map[descpb.ColumnID]catalog.Column) - for _, col := range table.desc.PublicColumns() { - colIDToColumn[col.GetID()] = col - } - - rh := rowHelper{TableDesc: table.desc, Indexes: table.desc.PublicNonPrimaryIndexes()} - - return table.desc.ForeachFamily(func(family *descpb.ColumnFamilyDescriptor) error { - var lastColID descpb.ColumnID - familyID := family.ID - familySortedColumnIDs, ok := rh.sortedColumnFamily(familyID) - if !ok { - return errors.AssertionFailedf("invalid family sorted column id map for family %d", familyID) - } - - for _, colID := range familySortedColumnIDs { - rowVal := table.row[table.colIdxMap.GetDefault(colID)] - if rowVal.IsNull() { - // Column is not present. - continue - } - - if skip, err := rh.skipColumnNotInPrimaryIndexValue(colID, rowVal.Datum); err != nil { - return errors.NewAssertionErrorWithWrappedErrf(err, "unable to determine skip") - } else if skip { - continue - } - - col := colIDToColumn[colID] - if col == nil { - return errors.AssertionFailedf("column mapping not found for column %d", colID) - } - - if lastColID > col.GetID() { - return errors.AssertionFailedf("cannot write column id %d after %d", col.GetID(), lastColID) - } - colIDDiff := col.GetID() - lastColID - lastColID = col.GetID() - - if result, err := rowenc.EncodeTableValue([]byte(nil), colIDDiff, rowVal.Datum, - scratch); err != nil { - return errors.NewAssertionErrorWithWrappedErrf(err, "could not re-encode column %s, value was %#v", - col.GetName(), rowVal.Datum) - } else if !rowVal.BytesEqual(result) { - return scrub.WrapError(scrub.IndexValueDecodingError, errors.Errorf( - "value failed to round-trip encode. Column=%s colIDDiff=%d Key=%s expected %#v, got: %#v", - col.GetName(), colIDDiff, rf.kv.Key, rowVal.EncodedString(), result)) - } - } - return nil - }) -} - -// checkSecondaryIndexDatumEncodings will run a round-trip encoding -// check on all values in the buffered row. This check is specific to -// secondary index datums. -func (rf *Fetcher) checkSecondaryIndexDatumEncodings(ctx context.Context) error { - table := &rf.table - colToEncDatum := make(map[descpb.ColumnID]rowenc.EncDatum, len(table.row)) - values := make(tree.Datums, len(table.row)) - for i, col := range table.cols { - colToEncDatum[col.GetID()] = table.row[i] - values[i] = table.row[i].Datum - } - - // The below code makes incorrect checks (#45256). - indexEntries, err := rowenc.EncodeSecondaryIndex( - rf.codec, table.desc, table.index, table.colIdxMap, values, false /* includeEmpty */) - if err != nil { - return err - } - - for _, indexEntry := range indexEntries { - // We ignore the first 4 bytes of the values. These bytes are a - // checksum which are not set by EncodeSecondaryIndex. - if !indexEntry.Key.Equal(rf.table.lastKV.Key) { - return scrub.WrapError(scrub.IndexKeyDecodingError, errors.Errorf( - "secondary index key failed to round-trip encode. expected %#v, got: %#v", - rf.table.lastKV.Key, indexEntry.Key)) - } else if !indexEntry.Value.EqualTagAndData(table.lastKV.Value) { - return scrub.WrapError(scrub.IndexValueDecodingError, errors.Errorf( - "secondary index value failed to round-trip encode. expected %#v, got: %#v", - rf.table.lastKV.Value, indexEntry.Value)) - } - } - return nil -} - -// checkKeyOrdering verifies that the datums decoded for the current key -// have the same ordering as the encoded key. -func (rf *Fetcher) checkKeyOrdering(ctx context.Context) error { - defer func() { - rf.table.lastDatums = append(tree.Datums(nil), rf.table.decodedRow...) - }() - - if !rf.table.hasLast { - rf.table.hasLast = true - return nil - } - - evalCtx := tree.EvalContext{} - // Iterate through columns in order, comparing each value to the value in the - // previous row in that column. When the first column with a differing value - // is found, compare the values to ensure the ordering matches the column - // ordering. - for i := 0; i < rf.table.index.NumKeyColumns(); i++ { - id := rf.table.index.GetKeyColumnID(i) - idx := rf.table.colIdxMap.GetDefault(id) - result := rf.table.decodedRow[idx].Compare(&evalCtx, rf.table.lastDatums[idx]) - expectedDirection := rf.table.index.GetKeyColumnDirection(i) - if rf.reverse && expectedDirection == descpb.IndexDescriptor_ASC { - expectedDirection = descpb.IndexDescriptor_DESC - } else if rf.reverse && expectedDirection == descpb.IndexDescriptor_DESC { - expectedDirection = descpb.IndexDescriptor_ASC - } - - if result != 0 { - if expectedDirection == descpb.IndexDescriptor_ASC && result < 0 || - expectedDirection == descpb.IndexDescriptor_DESC && result > 0 { - return scrub.WrapError(scrub.IndexKeyDecodingError, - errors.Errorf("key ordering did not match datum ordering. IndexDescriptor=%s", - expectedDirection)) - } - // After the first column with a differing value is found, the remaining - // columns are skipped (see #32874). - break - } - } - return nil -} - func (rf *Fetcher) finalizeRow() error { table := &rf.table @@ -1422,15 +1232,10 @@ func (rf *Fetcher) finalizeRow() error { indexColValues = append(indexColValues, "?") } } - err := errors.AssertionFailedf( + return errors.AssertionFailedf( "Non-nullable column \"%s:%s\" with no value! Index scanned was %q with the index key columns (%s) and the values (%s)", table.desc.GetName(), table.cols[i].GetName(), table.index.GetName(), strings.Join(table.index.IndexDesc().KeyColumnNames, ","), strings.Join(indexColValues, ",")) - - if rf.isCheck { - return scrub.WrapError(scrub.UnexpectedNullValueError, err) - } - return err } table.row[i] = rowenc.EncDatum{ Datum: tree.DNull, diff --git a/pkg/sql/row/fetcher_mvcc_test.go b/pkg/sql/row/fetcher_mvcc_test.go index 5852fcd9e1b6..8eb0c32d1959 100644 --- a/pkg/sql/row/fetcher_mvcc_test.go +++ b/pkg/sql/row/fetcher_mvcc_test.go @@ -23,7 +23,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/row" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -104,9 +103,8 @@ func TestRowFetcherMVCCMetadata(t *testing.T) { false, /* reverse */ descpb.ScanLockingStrength_FOR_NONE, descpb.ScanLockingWaitPolicy_BLOCK, - 0, /* lockTimeout */ - true, /* isCheck */ - &rowenc.DatumAlloc{}, + 0, /* lockTimeout */ + &tree.DatumAlloc{}, nil, /* memMonitor */ table, ); err != nil { diff --git a/pkg/sql/row/fetcher_test.go b/pkg/sql/row/fetcher_test.go index 09b5890e7b21..5b51e0acf09e 100644 --- a/pkg/sql/row/fetcher_test.go +++ b/pkg/sql/row/fetcher_test.go @@ -26,7 +26,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -57,7 +56,7 @@ func makeFetcherArgs(entry initFetcherArgs) FetcherTableArgs { } func initFetcher( - entry initFetcherArgs, reverseScan bool, alloc *rowenc.DatumAlloc, memMon *mon.BytesMonitor, + entry initFetcherArgs, reverseScan bool, alloc *tree.DatumAlloc, memMon *mon.BytesMonitor, ) (fetcher *Fetcher, err error) { fetcher = &Fetcher{} @@ -70,8 +69,7 @@ func initFetcher( reverseScan, descpb.ScanLockingStrength_FOR_NONE, descpb.ScanLockingWaitPolicy_BLOCK, - 0, /* lockTimeout */ - false, /* isCheck */ + 0, /* lockTimeout */ alloc, memMon, fetcherArgs, @@ -131,7 +129,7 @@ func TestNextRowSingle(t *testing.T) { ) } - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} // We try to read rows from each table. for tableName, table := range tables { @@ -250,7 +248,7 @@ func TestNextRowBatchLimiting(t *testing.T) { ) } - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} // We try to read rows from each table. for tableName, table := range tables { @@ -364,7 +362,7 @@ func TestRowFetcherMemoryLimits(t *testing.T) { valNeededForCol: valNeededForCol, } - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} settings := cluster.MakeTestingClusterSettings() @@ -428,7 +426,7 @@ INDEX(c) ), ) - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} tableDesc := catalogkv.TestingGetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, tableName) @@ -604,7 +602,7 @@ func TestNextRowSecondaryIndex(t *testing.T) { table.nRows += nNulls } - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} // We try to read rows from each index. for tableName, table := range tables { t.Run(tableName, func(t *testing.T) { @@ -744,7 +742,7 @@ func TestRowFetcherReset(t *testing.T) { indexIdx: 0, valNeededForCol: valNeededForCol, } - da := rowenc.DatumAlloc{} + da := tree.DatumAlloc{} fetcher, err := initFetcher(args, false, &da, nil /*memMon*/) if err != nil { t.Fatal(err) @@ -767,8 +765,7 @@ func TestRowFetcherReset(t *testing.T) { false, /*reverse*/ descpb.ScanLockingStrength_FOR_NONE, descpb.ScanLockingWaitPolicy_BLOCK, - 0, /* lockTimeout */ - false, /* isCheck */ + 0, /* lockTimeout */ &da, nil, /* memMonitor */ fetcherArgs, diff --git a/pkg/sql/row/helper.go b/pkg/sql/row/helper.go index a80091997709..fcb9c6aa9386 100644 --- a/pkg/sql/row/helper.go +++ b/pkg/sql/row/helper.go @@ -173,8 +173,9 @@ func (rh *rowHelper) encodePrimaryIndex( colIDtoRowIndex catalog.TableColMap, values []tree.Datum, ) (primaryIndexKey []byte, err error) { if rh.primaryIndexKeyPrefix == nil { - rh.primaryIndexKeyPrefix = rowenc.MakeIndexKeyPrefix(rh.Codec, rh.TableDesc, - rh.TableDesc.GetPrimaryIndexID()) + rh.primaryIndexKeyPrefix = rowenc.MakeIndexKeyPrefix( + rh.Codec, rh.TableDesc.GetID(), rh.TableDesc.GetPrimaryIndexID(), + ) } primaryIndexKey, _, err = rowenc.EncodeIndexKey( rh.TableDesc, rh.TableDesc.GetPrimaryIndex(), colIDtoRowIndex, values, rh.primaryIndexKeyPrefix) diff --git a/pkg/sql/row/inserter.go b/pkg/sql/row/inserter.go index 9f8c09dfe6ef..632184196980 100644 --- a/pkg/sql/row/inserter.go +++ b/pkg/sql/row/inserter.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" @@ -48,7 +48,7 @@ func MakeInserter( codec keys.SQLCodec, tableDesc catalog.TableDescriptor, insertCols []catalog.Column, - alloc *rowenc.DatumAlloc, + alloc *tree.DatumAlloc, sv *settings.Values, internal bool, metrics *Metrics, @@ -143,10 +143,13 @@ func (ri *Inserter) InsertRow( // Encode the values to the expected column type. This needs to // happen before index encoding because certain datum types (i.e. tuple) // cannot be used as index values. + // + // TODO(radu): the legacy marshaling is used only in rare cases; this is + // wasteful. for i, val := range values { // Make sure the value can be written to the column before proceeding. var err error - if ri.marshaled[i], err = rowenc.MarshalColumnValue(ri.InsertCols[i], val); err != nil { + if ri.marshaled[i], err = valueside.MarshalLegacy(ri.InsertCols[i].GetType(), val); err != nil { return err } } diff --git a/pkg/sql/row/kv_batch_streamer.go b/pkg/sql/row/kv_batch_streamer.go new file mode 100644 index 000000000000..550db6a32803 --- /dev/null +++ b/pkg/sql/row/kv_batch_streamer.go @@ -0,0 +1,215 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package row + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/clusterversion" + "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvstreamer" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/errors" +) + +// CanUseStreamer returns whether the kvstreamer.Streamer API should be used. +func CanUseStreamer(ctx context.Context, settings *cluster.Settings) bool { + // TODO(yuzefovich): remove the version gate in 22.2 cycle. + return settings.Version.IsActive(ctx, clusterversion.TargetBytesAvoidExcess) && + useStreamerEnabled.Get(&settings.SV) +} + +// useStreamerEnabled determines whether the Streamer API should be used. +// TODO(yuzefovich): remove this in 22.2. +var useStreamerEnabled = settings.RegisterBoolSetting( + settings.TenantWritable, + "sql.distsql.use_streamer.enabled", + "determines whether the usage of the Streamer API is allowed. "+ + "Enabling this will increase the speed of lookup/index joins "+ + "while adhering to memory limits.", + true, +) + +// TxnKVStreamer handles retrieval of key/values. +type TxnKVStreamer struct { + streamer *kvstreamer.Streamer + spans roachpb.Spans + + // numOutstandingRequests tracks the number of requests that haven't been + // fully responded to yet. + numOutstandingRequests int + + // getResponseScratch is reused to return the result of Get requests. + getResponseScratch [1]roachpb.KeyValue + + results []kvstreamer.Result + lastResultState struct { + kvstreamer.Result + // numEmitted tracks the number of times this result has been fully + // emitted. + numEmitted int + // Used only for ScanResponses. + remainingBatches [][]byte + } +} + +var _ KVBatchFetcher = &TxnKVStreamer{} + +// NewTxnKVStreamer creates a new TxnKVStreamer. +func NewTxnKVStreamer( + ctx context.Context, + streamer *kvstreamer.Streamer, + spans roachpb.Spans, + lockStrength descpb.ScanLockingStrength, +) (*TxnKVStreamer, error) { + if log.ExpensiveLogEnabled(ctx, 2) { + log.VEventf(ctx, 2, "Scan %s", spans) + } + keyLocking := getKeyLockingStrength(lockStrength) + reqs := spansToRequests(spans, false /* reverse */, keyLocking) + if err := streamer.Enqueue(ctx, reqs, nil /* enqueueKeys */); err != nil { + return nil, err + } + return &TxnKVStreamer{ + streamer: streamer, + spans: spans, + numOutstandingRequests: len(spans), + }, nil +} + +// proceedWithLastResult processes the result which must be already set on the +// lastResultState and emits the first part of the response (the only part for +// GetResponses). +func (f *TxnKVStreamer) proceedWithLastResult( + ctx context.Context, +) (skip bool, kvs []roachpb.KeyValue, batchResp []byte, err error) { + result := f.lastResultState.Result + if get := result.GetResp; get != nil { + if get.IntentValue != nil { + return false, nil, nil, errors.AssertionFailedf( + "unexpectedly got an IntentValue back from a SQL GetRequest %v", *get.IntentValue, + ) + } + if get.Value == nil { + // Nothing found in this particular response, so we skip it. + f.releaseLastResult(ctx) + return true, nil, nil, nil + } + pos := result.EnqueueKeysSatisfied[f.lastResultState.numEmitted] + origSpan := f.spans[pos] + f.lastResultState.numEmitted++ + f.numOutstandingRequests-- + f.getResponseScratch[0] = roachpb.KeyValue{Key: origSpan.Key, Value: *get.Value} + return false, f.getResponseScratch[:], nil, nil + } + scan := result.ScanResp + if len(scan.BatchResponses) > 0 { + batchResp, f.lastResultState.remainingBatches = scan.BatchResponses[0], scan.BatchResponses[1:] + } + if len(f.lastResultState.remainingBatches) == 0 { + f.processedScanResponse() + } + return false, scan.Rows, batchResp, nil +} + +// processedScanResponse updates the lastResultState before emitting the last +// part of the ScanResponse. This method should be called for each request that +// the ScanResponse satisfies. +func (f *TxnKVStreamer) processedScanResponse() { + f.lastResultState.numEmitted++ + if f.lastResultState.ScanResp.Complete { + f.numOutstandingRequests-- + } +} + +func (f *TxnKVStreamer) releaseLastResult(ctx context.Context) { + f.lastResultState.Release(ctx) + f.lastResultState.Result = kvstreamer.Result{} +} + +// nextBatch returns the next batch of key/value pairs. If there are none +// available, a fetch is initiated. When there are no more keys, ok is false. +func (f *TxnKVStreamer) nextBatch( + ctx context.Context, +) (ok bool, kvs []roachpb.KeyValue, batchResp []byte, err error) { + if f.numOutstandingRequests == 0 { + // All requests have already been responded to. + f.releaseLastResult(ctx) + return false, nil, nil, nil + } + + // Check whether there are more batches in the current ScanResponse. + if len(f.lastResultState.remainingBatches) > 0 { + batchResp, f.lastResultState.remainingBatches = f.lastResultState.remainingBatches[0], f.lastResultState.remainingBatches[1:] + if len(f.lastResultState.remainingBatches) == 0 { + f.processedScanResponse() + } + return true, nil, batchResp, nil + } + + // Check whether the current result satisfies multiple requests. + if f.lastResultState.numEmitted < len(f.lastResultState.EnqueueKeysSatisfied) { + // Note that we should never get an error here since we're processing + // the same result again. + _, kvs, batchResp, err = f.proceedWithLastResult(ctx) + return true, kvs, batchResp, err + } + + // Release the current result. + if f.lastResultState.numEmitted == len(f.lastResultState.EnqueueKeysSatisfied) && f.lastResultState.numEmitted > 0 { + f.releaseLastResult(ctx) + } + + // Process the next result we have already received from the streamer. + for len(f.results) > 0 { + // Peel off the next result and set it into lastResultState. + f.lastResultState.Result = f.results[0] + f.lastResultState.numEmitted = 0 + f.lastResultState.remainingBatches = nil + // Lose the reference to that result and advance the results slice for + // the next iteration. + f.results[0] = kvstreamer.Result{} + f.results = f.results[1:] + var skip bool + skip, kvs, batchResp, err = f.proceedWithLastResult(ctx) + if err != nil { + return false, nil, nil, err + } + if skip { + continue + } + return true, kvs, batchResp, nil + } + + // Get more results from the streamer. This call will block until some + // results are available or we're done. + // + // The memory accounting for the returned results has already been performed + // by the streamer against its own budget, so we don't have to concern + // ourselves with the memory accounting here. + f.results, err = f.streamer.GetResults(ctx) + if len(f.results) == 0 || err != nil { + return false, nil, nil, err + } + return f.nextBatch(ctx) +} + +// close releases the resources of this TxnKVStreamer. +func (f *TxnKVStreamer) close(ctx context.Context) { + f.lastResultState.Release(ctx) + for _, r := range f.results { + r.Release(ctx) + } + *f = TxnKVStreamer{} +} diff --git a/pkg/sql/row/kv_fetcher.go b/pkg/sql/row/kv_fetcher.go index 25eac1ef211a..7c0ed1b164f5 100644 --- a/pkg/sql/row/kv_fetcher.go +++ b/pkg/sql/row/kv_fetcher.go @@ -108,11 +108,18 @@ func NewKVFetcher( return newKVFetcher(&kvBatchFetcher), err } +// NewKVStreamingFetcher returns a new KVFetcher that utilizes the provided +// TxnKVStreamer to perform KV reads. +func NewKVStreamingFetcher(streamer *TxnKVStreamer) *KVFetcher { + return &KVFetcher{ + KVBatchFetcher: streamer, + } +} + func newKVFetcher(batchFetcher KVBatchFetcher) *KVFetcher { - ret := &KVFetcher{ + return &KVFetcher{ KVBatchFetcher: batchFetcher, } - return ret } // GetBytesRead returns the number of bytes read by this fetcher. It is safe for diff --git a/pkg/sql/row/row_converter.go b/pkg/sql/row/row_converter.go index 3fc9fd8c670e..d6eaacb1e4da 100644 --- a/pkg/sql/row/row_converter.go +++ b/pkg/sql/row/row_converter.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" "github.com/cockroachdb/cockroach/pkg/sql/sem/transform" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -351,7 +350,7 @@ func NewDatumRowConverter( evalCtx.Codec, tableDesc, cols, - &rowenc.DatumAlloc{}, + &tree.DatumAlloc{}, &evalCtx.Settings.SV, evalCtx.SessionData().Internal, metrics, diff --git a/pkg/sql/row/updater.go b/pkg/sql/row/updater.go index a52a3b3e4c3b..1cf8651fe5f7 100644 --- a/pkg/sql/row/updater.go +++ b/pkg/sql/row/updater.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/unique" @@ -83,7 +84,7 @@ func MakeUpdater( updateCols []catalog.Column, requestedCols []catalog.Column, updateType rowUpdaterType, - alloc *rowenc.DatumAlloc, + alloc *tree.DatumAlloc, sv *settings.Values, internal bool, metrics *Metrics, @@ -243,8 +244,11 @@ func (ru *Updater) UpdateRow( // Check that the new value types match the column types. This needs to // happen before index encoding because certain datum types (i.e. tuple) // cannot be used as index values. + // + // TODO(radu): the legacy marshaling is used only in rare cases; this is + // wasteful. for i, val := range updateValues { - if ru.marshaled[i], err = rowenc.MarshalColumnValue(ru.UpdateCols[i], val); err != nil { + if ru.marshaled[i], err = valueside.MarshalLegacy(ru.UpdateCols[i].GetType(), val); err != nil { return nil, err } } diff --git a/pkg/sql/row/writer.go b/pkg/sql/row/writer.go index 55bf07e79990..796230d950c1 100644 --- a/pkg/sql/row/writer.go +++ b/pkg/sql/row/writer.go @@ -17,7 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/errors" @@ -187,10 +187,10 @@ func prepareInsertOrUpdateBatch( if lastColID > col.GetID() { return nil, errors.AssertionFailedf("cannot write column id %d after %d", col.GetID(), lastColID) } - colIDDiff := col.GetID() - lastColID + colIDDelta := valueside.MakeColumnIDDelta(lastColID, col.GetID()) lastColID = col.GetID() var err error - rawValueBuf, err = rowenc.EncodeTableValue(rawValueBuf, colIDDiff, values[idx], nil) + rawValueBuf, err = valueside.Encode(rawValueBuf, colIDDelta, values[idx], nil) if err != nil { return nil, err } diff --git a/pkg/sql/row_source_to_plan_node.go b/pkg/sql/row_source_to_plan_node.go index b93a7d6763a1..ccf6c724ca9b 100644 --- a/pkg/sql/row_source_to_plan_node.go +++ b/pkg/sql/row_source_to_plan_node.go @@ -36,7 +36,7 @@ type rowSourceToPlanNode struct { // Temporary variables row rowenc.EncDatumRow - da rowenc.DatumAlloc + da tree.DatumAlloc datumRow tree.Datums } diff --git a/pkg/sql/rowcontainer/disk_row_container.go b/pkg/sql/rowcontainer/disk_row_container.go index 81674f2343d8..86f5cc31622c 100644 --- a/pkg/sql/rowcontainer/disk_row_container.go +++ b/pkg/sql/rowcontainer/disk_row_container.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -81,7 +82,7 @@ type DiskRowContainer struct { diskMonitor *mon.BytesMonitor engine diskmap.Factory - datumAlloc *rowenc.DatumAlloc + datumAlloc *tree.DatumAlloc } var _ SortableRowContainer = &DiskRowContainer{} @@ -109,7 +110,7 @@ func MakeDiskRowContainer( scratchEncRow: make(rowenc.EncDatumRow, len(types)), diskMonitor: diskMonitor, engine: e, - datumAlloc: &rowenc.DatumAlloc{}, + datumAlloc: &tree.DatumAlloc{}, } d.bufferedRows = d.diskMap.NewBatchWriter() diff --git a/pkg/sql/rowcontainer/disk_row_container_test.go b/pkg/sql/rowcontainer/disk_row_container_test.go index e2cfbfab288e..9bcf9c0a1fb8 100644 --- a/pkg/sql/rowcontainer/disk_row_container_test.go +++ b/pkg/sql/rowcontainer/disk_row_container_test.go @@ -43,7 +43,7 @@ func compareRows( lTypes []*types.T, l, r rowenc.EncDatumRow, e *tree.EvalContext, - d *rowenc.DatumAlloc, + d *tree.DatumAlloc, ordering colinfo.ColumnOrdering, ) (int, error) { for _, orderInfo := range ordering { @@ -343,7 +343,7 @@ func makeUniqueRows( ) (int, rowenc.EncDatumRows) { rows := randgen.RandEncDatumRowsOfTypes(rng, numRows, types) // It is possible there was some duplication, so remove duplicates. - var alloc rowenc.DatumAlloc + var alloc tree.DatumAlloc sort.Slice(rows, func(i, j int) bool { cmp, err := rows[i].Compare(types, &alloc, ordering, evalCtx, rows[j]) require.NoError(t, err) @@ -408,7 +408,7 @@ func TestDiskRowContainerFinalIterator(t *testing.T) { ctx := context.Background() st := cluster.MakeTestingClusterSettings() - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} evalCtx := tree.MakeTestingEvalContext(st) tempEngine, _, err := storage.NewTempEngine(ctx, base.DefaultTestTempStorageConfig(st), base.DefaultTestStoreSpec) if err != nil { diff --git a/pkg/sql/rowcontainer/hash_row_container.go b/pkg/sql/rowcontainer/hash_row_container.go index 0067e63734a9..a66978dad369 100644 --- a/pkg/sql/rowcontainer/hash_row_container.go +++ b/pkg/sql/rowcontainer/hash_row_container.go @@ -97,7 +97,7 @@ type columnEncoder struct { scratch []byte // types for the "key" columns (equality columns) keyTypes []*types.T - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc encodeNull bool } @@ -114,7 +114,7 @@ func (e *columnEncoder) init(typs []*types.T, keyCols columns, encodeNull bool) // If the row contains any NULLs and encodeNull is false, hasNull is true and // no encoding is returned. If encodeNull is true, hasNull is never set. func encodeColumnsOfRow( - da *rowenc.DatumAlloc, + da *tree.DatumAlloc, appendTo []byte, row rowenc.EncDatumRow, cols columns, diff --git a/pkg/sql/rowcontainer/hash_row_container_test.go b/pkg/sql/rowcontainer/hash_row_container_test.go index d4b33f88036c..33be3af65f2c 100644 --- a/pkg/sql/rowcontainer/hash_row_container_test.go +++ b/pkg/sql/rowcontainer/hash_row_container_test.go @@ -22,7 +22,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/randgen" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/storage" @@ -216,7 +215,7 @@ func TestHashDiskBackedRowContainer(t *testing.T) { t.Fatal(err) } if cmp, err := compareRows( - types.OneIntCol, row, rows[counter], &evalCtx, &rowenc.DatumAlloc{}, ordering, + types.OneIntCol, row, rows[counter], &evalCtx, &tree.DatumAlloc{}, ordering, ); err != nil { t.Fatal(err) } else if cmp != 0 { @@ -241,7 +240,7 @@ func TestHashDiskBackedRowContainer(t *testing.T) { t.Fatal(err) } if cmp, err := compareRows( - types.OneIntCol, row, rows[counter], &evalCtx, &rowenc.DatumAlloc{}, ordering, + types.OneIntCol, row, rows[counter], &evalCtx, &tree.DatumAlloc{}, ordering, ); err != nil { t.Fatal(err) } else if cmp != 0 { @@ -291,7 +290,7 @@ func TestHashDiskBackedRowContainer(t *testing.T) { t.Fatal(err) } if cmp, err := compareRows( - types.OneIntCol, row, rows[counter], &evalCtx, &rowenc.DatumAlloc{}, ordering, + types.OneIntCol, row, rows[counter], &evalCtx, &tree.DatumAlloc{}, ordering, ); err != nil { t.Fatal(err) } else if cmp != 0 { diff --git a/pkg/sql/rowcontainer/numbered_row_container.go b/pkg/sql/rowcontainer/numbered_row_container.go index 4ece45c401b4..871f49bc3360 100644 --- a/pkg/sql/rowcontainer/numbered_row_container.go +++ b/pkg/sql/rowcontainer/numbered_row_container.go @@ -294,7 +294,7 @@ type numberedDiskRowIterator struct { // EncDatumRow. The top element has the highest nextAccess and is the // best candidate to evict. cacheHeap cacheMaxNextAccessHeap - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc rowAlloc rowenc.EncDatumRowAlloc hitCount int diff --git a/pkg/sql/rowcontainer/row_container.go b/pkg/sql/rowcontainer/row_container.go index 87c215ae1c26..b709113d4f4b 100644 --- a/pkg/sql/rowcontainer/row_container.go +++ b/pkg/sql/rowcontainer/row_container.go @@ -151,7 +151,7 @@ type MemRowContainer struct { evalCtx *tree.EvalContext - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc } var _ heap.Interface = &MemRowContainer{} @@ -374,7 +374,7 @@ type DiskBackedRowContainer struct { // encodings keeps around the DatumEncoding equivalents of the encoding // directions in ordering to avoid conversions in hot paths. encodings []descpb.DatumEncoding - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc scratchKey []byte spilled bool @@ -639,7 +639,7 @@ type DiskBackedIndexedRowContainer struct { scratchEncRow rowenc.EncDatumRow storedTypes []*types.T - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc rowAlloc rowenc.EncDatumRowAlloc idx uint64 // the index of the next row to be added into the container diff --git a/pkg/sql/rowcontainer/row_container_test.go b/pkg/sql/rowcontainer/row_container_test.go index 278350da12c0..abaceb9b661f 100644 --- a/pkg/sql/rowcontainer/row_container_test.go +++ b/pkg/sql/rowcontainer/row_container_test.go @@ -58,7 +58,7 @@ func verifyRows( return err } if cmp, err := compareRows( - types.OneIntCol, row, expectedRows[0], evalCtx, &rowenc.DatumAlloc{}, ordering, + types.OneIntCol, row, expectedRows[0], evalCtx, &tree.DatumAlloc{}, ordering, ); err != nil { return err } else if cmp != 0 { @@ -433,7 +433,7 @@ func verifyOrdering( types []*types.T, ordering colinfo.ColumnOrdering, ) error { - var datumAlloc rowenc.DatumAlloc + var datumAlloc tree.DatumAlloc var rowAlloc rowenc.EncDatumRowAlloc var prevRow rowenc.EncDatumRow i := src.NewIterator(ctx) diff --git a/pkg/sql/rowenc/BUILD.bazel b/pkg/sql/rowenc/BUILD.bazel index 070aa83aa438..f2500205198e 100644 --- a/pkg/sql/rowenc/BUILD.bazel +++ b/pkg/sql/rowenc/BUILD.bazel @@ -3,8 +3,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "rowenc", srcs = [ - "column_type_encoding.go", - "datum_alloc.go", "encoded_datum.go", "index_encoding.go", "partition.go", @@ -13,7 +11,6 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/rowenc", visibility = ["//visibility:public"], deps = [ - "//pkg/geo", "//pkg/geo/geoindex", "//pkg/geo/geopb", "//pkg/keys", @@ -22,29 +19,21 @@ go_library( "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/descpb", "//pkg/sql/inverted", - "//pkg/sql/lex", "//pkg/sql/parser", + "//pkg/sql/rowenc/keyside", "//pkg/sql/rowenc/rowencpb", + "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/tree", "//pkg/sql/sqlerrors", "//pkg/sql/types", "//pkg/util", - "//pkg/util/bitarray", - "//pkg/util/duration", "//pkg/util/encoding", - "//pkg/util/errorutil/unimplemented", - "//pkg/util/ipaddr", "//pkg/util/json", "//pkg/util/log", "//pkg/util/mon", "//pkg/util/protoutil", - "//pkg/util/timetz", - "//pkg/util/timeutil/pgdate", "//pkg/util/unique", - "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", "@com_github_cockroachdb_errors//:errors", - "@com_github_lib_pq//oid", ], ) @@ -52,15 +41,13 @@ go_test( name = "rowenc_test", size = "medium", srcs = [ - "column_type_encoding_test.go", "encoded_datum_test.go", - "helpers_test.go", "index_encoding_test.go", "main_test.go", "roundtrip_format_test.go", ], - embed = [":rowenc"], deps = [ + ":rowenc", "//pkg/keys", "//pkg/kv", "//pkg/roachpb:with-mocks", @@ -74,22 +61,17 @@ go_test( "//pkg/sql/catalog/tabledesc", "//pkg/sql/inverted", "//pkg/sql/randgen", + "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/tree", "//pkg/sql/types", - "//pkg/testutils", "//pkg/testutils/serverutils", "//pkg/util/encoding", "//pkg/util/json", "//pkg/util/leaktest", "//pkg/util/randutil", - "//pkg/util/timeofday", - "//pkg/util/timeutil", - "//pkg/util/timeutil/pgdate", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", - "@com_github_leanovate_gopter//:gopter", - "@com_github_leanovate_gopter//prop", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/sql/rowenc/column_type_encoding.go b/pkg/sql/rowenc/column_type_encoding.go deleted file mode 100644 index 073ddbf53d52..000000000000 --- a/pkg/sql/rowenc/column_type_encoding.go +++ /dev/null @@ -1,1471 +0,0 @@ -// Copyright 2018 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package rowenc - -import ( - "time" - - "github.com/cockroachdb/apd/v2" - "github.com/cockroachdb/cockroach/pkg/geo" - "github.com/cockroachdb/cockroach/pkg/geo/geopb" - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/lex" - "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util/bitarray" - "github.com/cockroachdb/cockroach/pkg/util/duration" - "github.com/cockroachdb/cockroach/pkg/util/encoding" - "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" - "github.com/cockroachdb/cockroach/pkg/util/ipaddr" - "github.com/cockroachdb/cockroach/pkg/util/json" - "github.com/cockroachdb/cockroach/pkg/util/timetz" - "github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate" - "github.com/cockroachdb/cockroach/pkg/util/uuid" - "github.com/cockroachdb/errors" - "github.com/lib/pq/oid" -) - -// This file contains facilities to encode values of specific SQL -// types to either index keys or to store in the value part of column -// families. - -// EncodeTableKey encodes `val` into `b` and returns the new buffer. -// This is suitable to generate index/lookup keys in KV. -// -// The encoded value is guaranteed to be lexicographically sortable, -// but not guaranteed to be round-trippable during decoding: some -// values like decimals or collated strings have composite encoding -// where part of their value lies in the value part of the key/value -// pair. -// -// See also: docs/tech-notes/encoding.md, EncodeTableValue(). -func EncodeTableKey(b []byte, val tree.Datum, dir encoding.Direction) ([]byte, error) { - if (dir != encoding.Ascending) && (dir != encoding.Descending) { - return nil, errors.Errorf("invalid direction: %d", dir) - } - - if val == tree.DNull { - if dir == encoding.Ascending { - return encoding.EncodeNullAscending(b), nil - } - return encoding.EncodeNullDescending(b), nil - } - - switch t := tree.UnwrapDatum(nil, val).(type) { - case *tree.DBool: - var x int64 - if *t { - x = 1 - } else { - x = 0 - } - if dir == encoding.Ascending { - return encoding.EncodeVarintAscending(b, x), nil - } - return encoding.EncodeVarintDescending(b, x), nil - case *tree.DInt: - if dir == encoding.Ascending { - return encoding.EncodeVarintAscending(b, int64(*t)), nil - } - return encoding.EncodeVarintDescending(b, int64(*t)), nil - case *tree.DFloat: - if dir == encoding.Ascending { - return encoding.EncodeFloatAscending(b, float64(*t)), nil - } - return encoding.EncodeFloatDescending(b, float64(*t)), nil - case *tree.DDecimal: - if dir == encoding.Ascending { - return encoding.EncodeDecimalAscending(b, &t.Decimal), nil - } - return encoding.EncodeDecimalDescending(b, &t.Decimal), nil - case *tree.DString: - if dir == encoding.Ascending { - return encoding.EncodeStringAscending(b, string(*t)), nil - } - return encoding.EncodeStringDescending(b, string(*t)), nil - case *tree.DBytes: - if dir == encoding.Ascending { - return encoding.EncodeStringAscending(b, string(*t)), nil - } - return encoding.EncodeStringDescending(b, string(*t)), nil - case *tree.DVoid: - return encoding.EncodeVoidAscendingOrDescending(b), nil - case *tree.DBox2D: - if dir == encoding.Ascending { - return encoding.EncodeBox2DAscending(b, t.CartesianBoundingBox.BoundingBox) - } - return encoding.EncodeBox2DDescending(b, t.CartesianBoundingBox.BoundingBox) - case *tree.DGeography: - so := t.Geography.SpatialObjectRef() - if dir == encoding.Ascending { - return encoding.EncodeGeoAscending(b, t.Geography.SpaceCurveIndex(), so) - } - return encoding.EncodeGeoDescending(b, t.Geography.SpaceCurveIndex(), so) - case *tree.DGeometry: - so := t.Geometry.SpatialObjectRef() - spaceCurveIndex, err := t.Geometry.SpaceCurveIndex() - if err != nil { - return nil, err - } - if dir == encoding.Ascending { - return encoding.EncodeGeoAscending(b, spaceCurveIndex, so) - } - return encoding.EncodeGeoDescending(b, spaceCurveIndex, so) - case *tree.DDate: - if dir == encoding.Ascending { - return encoding.EncodeVarintAscending(b, t.UnixEpochDaysWithOrig()), nil - } - return encoding.EncodeVarintDescending(b, t.UnixEpochDaysWithOrig()), nil - case *tree.DTime: - if dir == encoding.Ascending { - return encoding.EncodeVarintAscending(b, int64(*t)), nil - } - return encoding.EncodeVarintDescending(b, int64(*t)), nil - case *tree.DTimestamp: - if dir == encoding.Ascending { - return encoding.EncodeTimeAscending(b, t.Time), nil - } - return encoding.EncodeTimeDescending(b, t.Time), nil - case *tree.DTimestampTZ: - if dir == encoding.Ascending { - return encoding.EncodeTimeAscending(b, t.Time), nil - } - return encoding.EncodeTimeDescending(b, t.Time), nil - case *tree.DTimeTZ: - if dir == encoding.Ascending { - return encoding.EncodeTimeTZAscending(b, t.TimeTZ), nil - } - return encoding.EncodeTimeTZDescending(b, t.TimeTZ), nil - case *tree.DInterval: - if dir == encoding.Ascending { - return encoding.EncodeDurationAscending(b, t.Duration) - } - return encoding.EncodeDurationDescending(b, t.Duration) - case *tree.DUuid: - if dir == encoding.Ascending { - return encoding.EncodeBytesAscending(b, t.GetBytes()), nil - } - return encoding.EncodeBytesDescending(b, t.GetBytes()), nil - case *tree.DIPAddr: - data := t.ToBuffer(nil) - if dir == encoding.Ascending { - return encoding.EncodeBytesAscending(b, data), nil - } - return encoding.EncodeBytesDescending(b, data), nil - case *tree.DTuple: - for _, datum := range t.D { - var err error - b, err = EncodeTableKey(b, datum, dir) - if err != nil { - return nil, err - } - } - return b, nil - case *tree.DArray: - return encodeArrayKey(b, t, dir) - case *tree.DCollatedString: - if dir == encoding.Ascending { - return encoding.EncodeBytesAscending(b, t.Key), nil - } - return encoding.EncodeBytesDescending(b, t.Key), nil - case *tree.DBitArray: - if dir == encoding.Ascending { - return encoding.EncodeBitArrayAscending(b, t.BitArray), nil - } - return encoding.EncodeBitArrayDescending(b, t.BitArray), nil - case *tree.DOid: - if dir == encoding.Ascending { - return encoding.EncodeVarintAscending(b, int64(t.DInt)), nil - } - return encoding.EncodeVarintDescending(b, int64(t.DInt)), nil - case *tree.DEnum: - if dir == encoding.Ascending { - return encoding.EncodeBytesAscending(b, t.PhysicalRep), nil - } - return encoding.EncodeBytesDescending(b, t.PhysicalRep), nil - case *tree.DJSON: - return nil, unimplemented.NewWithIssue(35706, "unable to encode JSON as a table key") - } - return nil, errors.Errorf("unable to encode table key: %T", val) -} - -// SkipTableKey skips a value of type valType in key, returning the remainder -// of the key. -func SkipTableKey(key []byte) ([]byte, error) { - skipLen, err := encoding.PeekLength(key) - if err != nil { - return nil, err - } - return key[skipLen:], nil -} - -// DecodeTableKey decodes a value encoded by EncodeTableKey. -func DecodeTableKey( - a *DatumAlloc, valType *types.T, key []byte, dir encoding.Direction, -) (tree.Datum, []byte, error) { - if (dir != encoding.Ascending) && (dir != encoding.Descending) { - return nil, nil, errors.Errorf("invalid direction: %d", dir) - } - var isNull bool - if key, isNull = encoding.DecodeIfNull(key); isNull { - return tree.DNull, key, nil - } - var rkey []byte - var err error - - switch valType.Family() { - case types.ArrayFamily: - return decodeArrayKey(a, valType, key, dir) - case types.BitFamily: - var r bitarray.BitArray - if dir == encoding.Ascending { - rkey, r, err = encoding.DecodeBitArrayAscending(key) - } else { - rkey, r, err = encoding.DecodeBitArrayDescending(key) - } - return a.NewDBitArray(tree.DBitArray{BitArray: r}), rkey, err - case types.BoolFamily: - var i int64 - if dir == encoding.Ascending { - rkey, i, err = encoding.DecodeVarintAscending(key) - } else { - rkey, i, err = encoding.DecodeVarintDescending(key) - } - // No need to chunk allocate DBool as MakeDBool returns either - // tree.DBoolTrue or tree.DBoolFalse. - return tree.MakeDBool(tree.DBool(i != 0)), rkey, err - case types.IntFamily: - var i int64 - if dir == encoding.Ascending { - rkey, i, err = encoding.DecodeVarintAscending(key) - } else { - rkey, i, err = encoding.DecodeVarintDescending(key) - } - return a.NewDInt(tree.DInt(i)), rkey, err - case types.FloatFamily: - var f float64 - if dir == encoding.Ascending { - rkey, f, err = encoding.DecodeFloatAscending(key) - } else { - rkey, f, err = encoding.DecodeFloatDescending(key) - } - return a.NewDFloat(tree.DFloat(f)), rkey, err - case types.DecimalFamily: - var d apd.Decimal - if dir == encoding.Ascending { - rkey, d, err = encoding.DecodeDecimalAscending(key, nil) - } else { - rkey, d, err = encoding.DecodeDecimalDescending(key, nil) - } - dd := a.NewDDecimal(tree.DDecimal{Decimal: d}) - return dd, rkey, err - case types.StringFamily: - var r string - if dir == encoding.Ascending { - // Perform a deep copy so that r would never reference the key's - // memory which might keep the BatchResponse alive. - rkey, r, err = encoding.DecodeUnsafeStringAscendingDeepCopy(key, nil) - } else { - rkey, r, err = encoding.DecodeUnsafeStringDescending(key, nil) - } - if valType.Oid() == oid.T_name { - return a.NewDName(tree.DString(r)), rkey, err - } - return a.NewDString(tree.DString(r)), rkey, err - case types.CollatedStringFamily: - var r string - if dir == encoding.Ascending { - // Perform a deep copy so that r would never reference the key's - // memory which might keep the BatchResponse alive. - rkey, r, err = encoding.DecodeUnsafeStringAscendingDeepCopy(key, nil) - } else { - rkey, r, err = encoding.DecodeUnsafeStringDescending(key, nil) - } - if err != nil { - return nil, nil, err - } - d, err := tree.NewDCollatedString(r, valType.Locale(), &a.env) - return d, rkey, err - case types.JsonFamily: - // Don't attempt to decode the JSON value. Instead, just return the - // remaining bytes of the key. - jsonLen, err := encoding.PeekLength(key) - if err != nil { - return nil, nil, err - } - return tree.DNull, key[jsonLen:], nil - case types.BytesFamily: - var r []byte - if dir == encoding.Ascending { - // No need to perform the deep copy since converting to string below - // will do that for us. - rkey, r, err = encoding.DecodeBytesAscending(key, nil) - } else { - rkey, r, err = encoding.DecodeBytesDescending(key, nil) - } - return a.NewDBytes(tree.DBytes(r)), rkey, err - case types.VoidFamily: - rkey, err = encoding.DecodeVoidAscendingOrDescending(key) - return a.NewDVoid(), rkey, err - case types.Box2DFamily: - var r geopb.BoundingBox - if dir == encoding.Ascending { - rkey, r, err = encoding.DecodeBox2DAscending(key) - } else { - rkey, r, err = encoding.DecodeBox2DDescending(key) - } - return a.NewDBox2D(tree.DBox2D{ - CartesianBoundingBox: geo.CartesianBoundingBox{BoundingBox: r}, - }), rkey, err - case types.GeographyFamily: - g := a.NewDGeographyEmpty() - so := g.Geography.SpatialObjectRef() - if dir == encoding.Ascending { - rkey, err = encoding.DecodeGeoAscending(key, so) - } else { - rkey, err = encoding.DecodeGeoDescending(key, so) - } - a.DoneInitNewDGeo(so) - return g, rkey, err - case types.GeometryFamily: - g := a.NewDGeometryEmpty() - so := g.Geometry.SpatialObjectRef() - if dir == encoding.Ascending { - rkey, err = encoding.DecodeGeoAscending(key, so) - } else { - rkey, err = encoding.DecodeGeoDescending(key, so) - } - a.DoneInitNewDGeo(so) - return g, rkey, err - case types.DateFamily: - var t int64 - if dir == encoding.Ascending { - rkey, t, err = encoding.DecodeVarintAscending(key) - } else { - rkey, t, err = encoding.DecodeVarintDescending(key) - } - return a.NewDDate(tree.MakeDDate(pgdate.MakeCompatibleDateFromDisk(t))), rkey, err - case types.TimeFamily: - var t int64 - if dir == encoding.Ascending { - rkey, t, err = encoding.DecodeVarintAscending(key) - } else { - rkey, t, err = encoding.DecodeVarintDescending(key) - } - return a.NewDTime(tree.DTime(t)), rkey, err - case types.TimeTZFamily: - var t timetz.TimeTZ - if dir == encoding.Ascending { - rkey, t, err = encoding.DecodeTimeTZAscending(key) - } else { - rkey, t, err = encoding.DecodeTimeTZDescending(key) - } - return a.NewDTimeTZ(tree.DTimeTZ{TimeTZ: t}), rkey, err - case types.TimestampFamily: - var t time.Time - if dir == encoding.Ascending { - rkey, t, err = encoding.DecodeTimeAscending(key) - } else { - rkey, t, err = encoding.DecodeTimeDescending(key) - } - return a.NewDTimestamp(tree.DTimestamp{Time: t}), rkey, err - case types.TimestampTZFamily: - var t time.Time - if dir == encoding.Ascending { - rkey, t, err = encoding.DecodeTimeAscending(key) - } else { - rkey, t, err = encoding.DecodeTimeDescending(key) - } - return a.NewDTimestampTZ(tree.DTimestampTZ{Time: t}), rkey, err - case types.IntervalFamily: - var d duration.Duration - if dir == encoding.Ascending { - rkey, d, err = encoding.DecodeDurationAscending(key) - } else { - rkey, d, err = encoding.DecodeDurationDescending(key) - } - return a.NewDInterval(tree.DInterval{Duration: d}), rkey, err - case types.UuidFamily: - var r []byte - if dir == encoding.Ascending { - // No need to perform the deep copy since converting to UUID below - // will do that for us. - rkey, r, err = encoding.DecodeBytesAscending(key, nil) - } else { - rkey, r, err = encoding.DecodeBytesDescending(key, nil) - } - if err != nil { - return nil, nil, err - } - u, err := uuid.FromBytes(r) - return a.NewDUuid(tree.DUuid{UUID: u}), rkey, err - case types.INetFamily: - var r []byte - if dir == encoding.Ascending { - // No need to perform the deep copy since converting to IPAddr below - // will do that for us. - rkey, r, err = encoding.DecodeBytesAscending(key, nil) - } else { - rkey, r, err = encoding.DecodeBytesDescending(key, nil) - } - if err != nil { - return nil, nil, err - } - var ipAddr ipaddr.IPAddr - _, err := ipAddr.FromBuffer(r) - return a.NewDIPAddr(tree.DIPAddr{IPAddr: ipAddr}), rkey, err - case types.OidFamily: - var i int64 - if dir == encoding.Ascending { - rkey, i, err = encoding.DecodeVarintAscending(key) - } else { - rkey, i, err = encoding.DecodeVarintDescending(key) - } - return a.NewDOid(tree.MakeDOid(tree.DInt(i))), rkey, err - case types.EnumFamily: - var r []byte - if dir == encoding.Ascending { - // No need to perform the deep copy since we only need r for a brief - // period of time. - rkey, r, err = encoding.DecodeBytesAscending(key, nil) - } else { - rkey, r, err = encoding.DecodeBytesDescending(key, nil) - } - if err != nil { - return nil, nil, err - } - phys, log, err := tree.GetEnumComponentsFromPhysicalRep(valType, r) - if err != nil { - return nil, nil, err - } - return a.NewDEnum(tree.DEnum{EnumTyp: valType, PhysicalRep: phys, LogicalRep: log}), rkey, nil - default: - return nil, nil, errors.Errorf("unable to decode table key: %s", valType) - } -} - -// EncodeTableValue encodes `val` into `appendTo` using DatumEncoding_VALUE -// and returns the new buffer. -// -// This is suitable for generating the value part of individual columns -// in a column family. -// -// The encoded value is guaranteed to round -// trip and decode exactly to its input, but is not guaranteed to be -// lexicographically sortable. -// -// See also: docs/tech-notes/encoding.md, EncodeTableKey(). -func EncodeTableValue( - appendTo []byte, colID descpb.ColumnID, val tree.Datum, scratch []byte, -) ([]byte, error) { - if val == tree.DNull { - return encoding.EncodeNullValue(appendTo, uint32(colID)), nil - } - switch t := tree.UnwrapDatum(nil, val).(type) { - case *tree.DBitArray: - return encoding.EncodeBitArrayValue(appendTo, uint32(colID), t.BitArray), nil - case *tree.DBool: - return encoding.EncodeBoolValue(appendTo, uint32(colID), bool(*t)), nil - case *tree.DInt: - return encoding.EncodeIntValue(appendTo, uint32(colID), int64(*t)), nil - case *tree.DFloat: - return encoding.EncodeFloatValue(appendTo, uint32(colID), float64(*t)), nil - case *tree.DDecimal: - return encoding.EncodeDecimalValue(appendTo, uint32(colID), &t.Decimal), nil - case *tree.DString: - return encoding.EncodeBytesValue(appendTo, uint32(colID), []byte(*t)), nil - case *tree.DBytes: - return encoding.EncodeBytesValue(appendTo, uint32(colID), []byte(*t)), nil - case *tree.DDate: - return encoding.EncodeIntValue(appendTo, uint32(colID), t.UnixEpochDaysWithOrig()), nil - case *tree.DBox2D: - return encoding.EncodeBox2DValue(appendTo, uint32(colID), t.CartesianBoundingBox.BoundingBox) - case *tree.DGeography: - return encoding.EncodeGeoValue(appendTo, uint32(colID), t.SpatialObjectRef()) - case *tree.DGeometry: - return encoding.EncodeGeoValue(appendTo, uint32(colID), t.SpatialObjectRef()) - case *tree.DTime: - return encoding.EncodeIntValue(appendTo, uint32(colID), int64(*t)), nil - case *tree.DTimeTZ: - return encoding.EncodeTimeTZValue(appendTo, uint32(colID), t.TimeTZ), nil - case *tree.DTimestamp: - return encoding.EncodeTimeValue(appendTo, uint32(colID), t.Time), nil - case *tree.DTimestampTZ: - return encoding.EncodeTimeValue(appendTo, uint32(colID), t.Time), nil - case *tree.DInterval: - return encoding.EncodeDurationValue(appendTo, uint32(colID), t.Duration), nil - case *tree.DUuid: - return encoding.EncodeUUIDValue(appendTo, uint32(colID), t.UUID), nil - case *tree.DIPAddr: - return encoding.EncodeIPAddrValue(appendTo, uint32(colID), t.IPAddr), nil - case *tree.DJSON: - encoded, err := json.EncodeJSON(scratch, t.JSON) - if err != nil { - return nil, err - } - return encoding.EncodeJSONValue(appendTo, uint32(colID), encoded), nil - case *tree.DArray: - a, err := encodeArray(t, scratch) - if err != nil { - return nil, err - } - return encoding.EncodeArrayValue(appendTo, uint32(colID), a), nil - case *tree.DTuple: - return encodeTuple(t, appendTo, uint32(colID), scratch) - case *tree.DCollatedString: - return encoding.EncodeBytesValue(appendTo, uint32(colID), []byte(t.Contents)), nil - case *tree.DOid: - return encoding.EncodeIntValue(appendTo, uint32(colID), int64(t.DInt)), nil - case *tree.DEnum: - return encoding.EncodeBytesValue(appendTo, uint32(colID), t.PhysicalRep), nil - case *tree.DVoid: - return encoding.EncodeVoidValue(appendTo, uint32(colID)), nil - default: - return nil, errors.Errorf("unable to encode table value: %T", t) - } -} - -// DecodeTableValue decodes a value encoded by EncodeTableValue. -func DecodeTableValue(a *DatumAlloc, valType *types.T, b []byte) (tree.Datum, []byte, error) { - _, dataOffset, _, typ, err := encoding.DecodeValueTag(b) - if err != nil { - return nil, b, err - } - // NULL is special because it is a valid value for any type. - if typ == encoding.Null { - return tree.DNull, b[dataOffset:], nil - } - // Bool is special because the value is stored in the value tag. - if valType.Family() != types.BoolFamily { - b = b[dataOffset:] - } - return DecodeUntaggedDatum(a, valType, b) -} - -// DecodeUntaggedDatum is used to decode a Datum whose type is known, -// and which doesn't have a value tag (either due to it having been -// consumed already or not having one in the first place). -// -// This is used to decode datums encoded using value encoding. -// -// If t is types.Bool, the value tag must be present, as its value is encoded in -// the tag directly. -func DecodeUntaggedDatum(a *DatumAlloc, t *types.T, buf []byte) (tree.Datum, []byte, error) { - switch t.Family() { - case types.IntFamily: - b, i, err := encoding.DecodeUntaggedIntValue(buf) - if err != nil { - return nil, b, err - } - return a.NewDInt(tree.DInt(i)), b, nil - case types.StringFamily: - b, data, err := encoding.DecodeUntaggedBytesValue(buf) - if err != nil { - return nil, b, err - } - return a.NewDString(tree.DString(data)), b, nil - case types.CollatedStringFamily: - b, data, err := encoding.DecodeUntaggedBytesValue(buf) - if err != nil { - return nil, b, err - } - d, err := tree.NewDCollatedString(string(data), t.Locale(), &a.env) - return d, b, err - case types.BitFamily: - b, data, err := encoding.DecodeUntaggedBitArrayValue(buf) - return a.NewDBitArray(tree.DBitArray{BitArray: data}), b, err - case types.BoolFamily: - // A boolean's value is encoded in its tag directly, so we don't have an - // "Untagged" version of this function. - b, data, err := encoding.DecodeBoolValue(buf) - if err != nil { - return nil, b, err - } - return tree.MakeDBool(tree.DBool(data)), b, nil - case types.FloatFamily: - b, data, err := encoding.DecodeUntaggedFloatValue(buf) - if err != nil { - return nil, b, err - } - return a.NewDFloat(tree.DFloat(data)), b, nil - case types.DecimalFamily: - b, data, err := encoding.DecodeUntaggedDecimalValue(buf) - if err != nil { - return nil, b, err - } - return a.NewDDecimal(tree.DDecimal{Decimal: data}), b, nil - case types.BytesFamily: - b, data, err := encoding.DecodeUntaggedBytesValue(buf) - if err != nil { - return nil, b, err - } - return a.NewDBytes(tree.DBytes(data)), b, nil - case types.DateFamily: - b, data, err := encoding.DecodeUntaggedIntValue(buf) - if err != nil { - return nil, b, err - } - return a.NewDDate(tree.MakeDDate(pgdate.MakeCompatibleDateFromDisk(data))), b, nil - case types.Box2DFamily: - b, data, err := encoding.DecodeUntaggedBox2DValue(buf) - if err != nil { - return nil, b, err - } - return a.NewDBox2D(tree.DBox2D{ - CartesianBoundingBox: geo.CartesianBoundingBox{BoundingBox: data}, - }), b, nil - case types.GeographyFamily: - g := a.NewDGeographyEmpty() - so := g.Geography.SpatialObjectRef() - b, err := encoding.DecodeUntaggedGeoValue(buf, so) - a.DoneInitNewDGeo(so) - if err != nil { - return nil, b, err - } - return g, b, nil - case types.GeometryFamily: - g := a.NewDGeometryEmpty() - so := g.Geometry.SpatialObjectRef() - b, err := encoding.DecodeUntaggedGeoValue(buf, so) - a.DoneInitNewDGeo(so) - if err != nil { - return nil, b, err - } - return g, b, nil - case types.TimeFamily: - b, data, err := encoding.DecodeUntaggedIntValue(buf) - if err != nil { - return nil, b, err - } - return a.NewDTime(tree.DTime(data)), b, nil - case types.TimeTZFamily: - b, data, err := encoding.DecodeUntaggedTimeTZValue(buf) - if err != nil { - return nil, b, err - } - return a.NewDTimeTZ(tree.DTimeTZ{TimeTZ: data}), b, nil - case types.TimestampFamily: - b, data, err := encoding.DecodeUntaggedTimeValue(buf) - if err != nil { - return nil, b, err - } - return a.NewDTimestamp(tree.DTimestamp{Time: data}), b, nil - case types.TimestampTZFamily: - b, data, err := encoding.DecodeUntaggedTimeValue(buf) - if err != nil { - return nil, b, err - } - return a.NewDTimestampTZ(tree.DTimestampTZ{Time: data}), b, nil - case types.IntervalFamily: - b, data, err := encoding.DecodeUntaggedDurationValue(buf) - return a.NewDInterval(tree.DInterval{Duration: data}), b, err - case types.UuidFamily: - b, data, err := encoding.DecodeUntaggedUUIDValue(buf) - return a.NewDUuid(tree.DUuid{UUID: data}), b, err - case types.INetFamily: - b, data, err := encoding.DecodeUntaggedIPAddrValue(buf) - return a.NewDIPAddr(tree.DIPAddr{IPAddr: data}), b, err - case types.JsonFamily: - b, data, err := encoding.DecodeUntaggedBytesValue(buf) - if err != nil { - return nil, b, err - } - // We copy the byte buffer here, because the JSON decoding is lazy, and we - // do not want to hang on to the backing byte buffer, which might be an - // entire KV batch. - cpy := make([]byte, len(data)) - copy(cpy, data) - j, err := json.FromEncoding(cpy) - if err != nil { - return nil, b, err - } - return a.NewDJSON(tree.DJSON{JSON: j}), b, nil - case types.OidFamily: - b, data, err := encoding.DecodeUntaggedIntValue(buf) - return a.NewDOid(tree.MakeDOid(tree.DInt(data))), b, err - case types.ArrayFamily: - return decodeArray(a, t.ArrayContents(), buf) - case types.TupleFamily: - return decodeTuple(a, t, buf) - case types.EnumFamily: - b, data, err := encoding.DecodeUntaggedBytesValue(buf) - if err != nil { - return nil, b, err - } - phys, log, err := tree.GetEnumComponentsFromPhysicalRep(t, data) - if err != nil { - return nil, nil, err - } - return a.NewDEnum(tree.DEnum{EnumTyp: t, PhysicalRep: phys, LogicalRep: log}), b, nil - case types.VoidFamily: - return a.NewDVoid(), buf, nil - default: - return nil, buf, errors.Errorf("couldn't decode type %s", t) - } -} - -// MarshalColumnValue produces the value encoding of the given datum, -// constrained by the given column type, into a roachpb.Value. -// -// This is used when when the table format does not use column -// families, such as pre-2.0 tables and some system tables. -// -// If val's type is incompatible with col, or if col's type is not yet -// implemented by this function, an error is returned. -func MarshalColumnValue(col catalog.Column, val tree.Datum) (roachpb.Value, error) { - return MarshalColumnTypeValue(col.GetName(), col.GetType(), val) -} - -// MarshalColumnTypeValue is called by MarshalColumnValue and in tests. -func MarshalColumnTypeValue( - colName string, colType *types.T, val tree.Datum, -) (roachpb.Value, error) { - var r roachpb.Value - - if val == tree.DNull { - return r, nil - } - - switch colType.Family() { - case types.BitFamily: - if v, ok := val.(*tree.DBitArray); ok { - r.SetBitArray(v.BitArray) - return r, nil - } - case types.BoolFamily: - if v, ok := val.(*tree.DBool); ok { - r.SetBool(bool(*v)) - return r, nil - } - case types.IntFamily: - if v, ok := tree.AsDInt(val); ok { - r.SetInt(int64(v)) - return r, nil - } - case types.FloatFamily: - if v, ok := val.(*tree.DFloat); ok { - r.SetFloat(float64(*v)) - return r, nil - } - case types.DecimalFamily: - if v, ok := val.(*tree.DDecimal); ok { - err := r.SetDecimal(&v.Decimal) - return r, err - } - case types.StringFamily: - if v, ok := tree.AsDString(val); ok { - r.SetString(string(v)) - return r, nil - } - case types.BytesFamily: - if v, ok := val.(*tree.DBytes); ok { - r.SetString(string(*v)) - return r, nil - } - case types.DateFamily: - if v, ok := val.(*tree.DDate); ok { - r.SetInt(v.UnixEpochDaysWithOrig()) - return r, nil - } - case types.Box2DFamily: - if v, ok := val.(*tree.DBox2D); ok { - r.SetBox2D(v.CartesianBoundingBox.BoundingBox) - return r, nil - } - case types.GeographyFamily: - if v, ok := val.(*tree.DGeography); ok { - err := r.SetGeo(v.SpatialObject()) - return r, err - } - case types.GeometryFamily: - if v, ok := val.(*tree.DGeometry); ok { - err := r.SetGeo(v.SpatialObject()) - return r, err - } - case types.TimeFamily: - if v, ok := val.(*tree.DTime); ok { - r.SetInt(int64(*v)) - return r, nil - } - case types.TimeTZFamily: - if v, ok := val.(*tree.DTimeTZ); ok { - r.SetTimeTZ(v.TimeTZ) - return r, nil - } - case types.TimestampFamily: - if v, ok := val.(*tree.DTimestamp); ok { - r.SetTime(v.Time) - return r, nil - } - case types.TimestampTZFamily: - if v, ok := val.(*tree.DTimestampTZ); ok { - r.SetTime(v.Time) - return r, nil - } - case types.IntervalFamily: - if v, ok := val.(*tree.DInterval); ok { - err := r.SetDuration(v.Duration) - return r, err - } - case types.UuidFamily: - if v, ok := val.(*tree.DUuid); ok { - r.SetBytes(v.GetBytes()) - return r, nil - } - case types.INetFamily: - if v, ok := val.(*tree.DIPAddr); ok { - data := v.ToBuffer(nil) - r.SetBytes(data) - return r, nil - } - case types.JsonFamily: - if v, ok := val.(*tree.DJSON); ok { - data, err := json.EncodeJSON(nil, v.JSON) - if err != nil { - return r, err - } - r.SetBytes(data) - return r, nil - } - case types.ArrayFamily: - if v, ok := val.(*tree.DArray); ok { - if err := checkElementType(v.ParamTyp, colType.ArrayContents()); err != nil { - return r, err - } - b, err := encodeArray(v, nil) - if err != nil { - return r, err - } - r.SetBytes(b) - return r, nil - } - case types.CollatedStringFamily: - if v, ok := val.(*tree.DCollatedString); ok { - if lex.LocaleNamesAreEqual(v.Locale, colType.Locale()) { - r.SetString(v.Contents) - return r, nil - } - // We can't fail here with a locale mismatch, this is a sign - // that the proper validation has not been performed upstream in - // the mutation planning code. - return r, errors.AssertionFailedf( - "locale mismatch %q vs %q for column %q", - v.Locale, colType.Locale(), tree.ErrNameString(colName)) - } - case types.OidFamily: - if v, ok := val.(*tree.DOid); ok { - r.SetInt(int64(v.DInt)) - return r, nil - } - case types.EnumFamily: - if v, ok := val.(*tree.DEnum); ok { - r.SetBytes(v.PhysicalRep) - return r, nil - } - default: - return r, errors.AssertionFailedf("unsupported column type: %s", colType.Family()) - } - return r, errors.AssertionFailedf("mismatched type %q vs %q for column %q", - val.ResolvedType(), colType.Family(), tree.ErrNameString(colName)) -} - -// UnmarshalColumnValue is the counterpart to MarshalColumnValues. -// -// It decodes the value from a roachpb.Value using the type expected -// by the column. An error is returned if the value's type does not -// match the column's type. -func UnmarshalColumnValue(a *DatumAlloc, typ *types.T, value roachpb.Value) (tree.Datum, error) { - if value.RawBytes == nil { - return tree.DNull, nil - } - - switch typ.Family() { - case types.BitFamily: - d, err := value.GetBitArray() - if err != nil { - return nil, err - } - return a.NewDBitArray(tree.DBitArray{BitArray: d}), nil - case types.BoolFamily: - v, err := value.GetBool() - if err != nil { - return nil, err - } - return tree.MakeDBool(tree.DBool(v)), nil - case types.IntFamily: - v, err := value.GetInt() - if err != nil { - return nil, err - } - return a.NewDInt(tree.DInt(v)), nil - case types.FloatFamily: - v, err := value.GetFloat() - if err != nil { - return nil, err - } - return a.NewDFloat(tree.DFloat(v)), nil - case types.DecimalFamily: - v, err := value.GetDecimal() - if err != nil { - return nil, err - } - dd := a.NewDDecimal(tree.DDecimal{Decimal: v}) - return dd, nil - case types.StringFamily: - v, err := value.GetBytes() - if err != nil { - return nil, err - } - if typ.Oid() == oid.T_name { - return a.NewDName(tree.DString(v)), nil - } - return a.NewDString(tree.DString(v)), nil - case types.BytesFamily: - v, err := value.GetBytes() - if err != nil { - return nil, err - } - return a.NewDBytes(tree.DBytes(v)), nil - case types.DateFamily: - v, err := value.GetInt() - if err != nil { - return nil, err - } - return a.NewDDate(tree.MakeDDate(pgdate.MakeCompatibleDateFromDisk(v))), nil - case types.Box2DFamily: - v, err := value.GetBox2D() - if err != nil { - return nil, err - } - return a.NewDBox2D(tree.DBox2D{ - CartesianBoundingBox: geo.CartesianBoundingBox{BoundingBox: v}, - }), nil - case types.GeographyFamily: - v, err := value.GetGeo() - if err != nil { - return nil, err - } - return a.NewDGeography(tree.DGeography{Geography: geo.MakeGeographyUnsafe(v)}), nil - case types.GeometryFamily: - v, err := value.GetGeo() - if err != nil { - return nil, err - } - return a.NewDGeometry(tree.DGeometry{Geometry: geo.MakeGeometryUnsafe(v)}), nil - case types.TimeFamily: - v, err := value.GetInt() - if err != nil { - return nil, err - } - return a.NewDTime(tree.DTime(v)), nil - case types.TimeTZFamily: - v, err := value.GetTimeTZ() - if err != nil { - return nil, err - } - return a.NewDTimeTZ(tree.DTimeTZ{TimeTZ: v}), nil - case types.TimestampFamily: - v, err := value.GetTime() - if err != nil { - return nil, err - } - return a.NewDTimestamp(tree.DTimestamp{Time: v}), nil - case types.TimestampTZFamily: - v, err := value.GetTime() - if err != nil { - return nil, err - } - return a.NewDTimestampTZ(tree.DTimestampTZ{Time: v}), nil - case types.IntervalFamily: - d, err := value.GetDuration() - if err != nil { - return nil, err - } - return a.NewDInterval(tree.DInterval{Duration: d}), nil - case types.CollatedStringFamily: - v, err := value.GetBytes() - if err != nil { - return nil, err - } - return tree.NewDCollatedString(string(v), typ.Locale(), &a.env) - case types.UuidFamily: - v, err := value.GetBytes() - if err != nil { - return nil, err - } - u, err := uuid.FromBytes(v) - if err != nil { - return nil, err - } - return a.NewDUuid(tree.DUuid{UUID: u}), nil - case types.INetFamily: - v, err := value.GetBytes() - if err != nil { - return nil, err - } - var ipAddr ipaddr.IPAddr - _, err = ipAddr.FromBuffer(v) - if err != nil { - return nil, err - } - return a.NewDIPAddr(tree.DIPAddr{IPAddr: ipAddr}), nil - case types.OidFamily: - v, err := value.GetInt() - if err != nil { - return nil, err - } - return a.NewDOid(tree.MakeDOid(tree.DInt(v))), nil - case types.ArrayFamily: - v, err := value.GetBytes() - if err != nil { - return nil, err - } - datum, _, err := decodeArrayNoMarshalColumnValue(a, typ.ArrayContents(), v) - // TODO(yuzefovich): do we want to create a new object via DatumAlloc? - return datum, err - case types.JsonFamily: - v, err := value.GetBytes() - if err != nil { - return nil, err - } - _, jsonDatum, err := json.DecodeJSON(v) - if err != nil { - return nil, err - } - return tree.NewDJSON(jsonDatum), nil - case types.EnumFamily: - v, err := value.GetBytes() - if err != nil { - return nil, err - } - phys, log, err := tree.GetEnumComponentsFromPhysicalRep(typ, v) - if err != nil { - return nil, err - } - return a.NewDEnum(tree.DEnum{EnumTyp: typ, PhysicalRep: phys, LogicalRep: log}), nil - default: - return nil, errors.Errorf("unsupported column type: %s", typ.Family()) - } -} - -// encodeTuple produces the value encoding for a tuple. -func encodeTuple(t *tree.DTuple, appendTo []byte, colID uint32, scratch []byte) ([]byte, error) { - appendTo = encoding.EncodeValueTag(appendTo, colID, encoding.Tuple) - return encodeUntaggedTuple(t, appendTo, colID, scratch) -} - -// encodeUntaggedTuple produces the value encoding for a tuple without a value tag. -func encodeUntaggedTuple( - t *tree.DTuple, appendTo []byte, colID uint32, scratch []byte, -) ([]byte, error) { - appendTo = encoding.EncodeNonsortingUvarint(appendTo, uint64(len(t.D))) - - var err error - for _, dd := range t.D { - appendTo, err = EncodeTableValue(appendTo, descpb.ColumnID(encoding.NoColumnID), dd, scratch) - if err != nil { - return nil, err - } - } - return appendTo, nil -} - -// decodeTuple decodes a tuple from its value encoding. It is the -// counterpart of encodeTuple(). -func decodeTuple(a *DatumAlloc, tupTyp *types.T, b []byte) (tree.Datum, []byte, error) { - b, _, _, err := encoding.DecodeNonsortingUvarint(b) - if err != nil { - return nil, nil, err - } - - result := *(tree.NewDTuple(tupTyp)) - result.D = a.NewDatums(len(tupTyp.TupleContents())) - var datum tree.Datum - for i := range tupTyp.TupleContents() { - datum, b, err = DecodeTableValue(a, tupTyp.TupleContents()[i], b) - if err != nil { - return nil, b, err - } - result.D[i] = datum - } - return a.NewDTuple(result), b, nil -} - -// encodeArrayKey generates an ordered key encoding of an array. -// The encoding format for an array [a, b] is as follows: -// [arrayMarker, enc(a), enc(b), terminator]. -// The terminator is guaranteed to be less than all encoded values, -// so two arrays with the same prefix but different lengths will sort -// correctly. The key difference is that NULL values need to be encoded -// differently, because the standard NULL encoding conflicts with the -// terminator byte. This NULL value is chosen to be larger than the -// terminator but less than all existing encoded values. -func encodeArrayKey(b []byte, array *tree.DArray, dir encoding.Direction) ([]byte, error) { - var err error - b = encoding.EncodeArrayKeyMarker(b, dir) - for _, elem := range array.Array { - if elem == tree.DNull { - b = encoding.EncodeNullWithinArrayKey(b, dir) - } else { - b, err = EncodeTableKey(b, elem, dir) - if err != nil { - return nil, err - } - } - } - return encoding.EncodeArrayKeyTerminator(b, dir), nil -} - -// decodeArrayKey decodes an array key generated by encodeArrayKey. -func decodeArrayKey( - a *DatumAlloc, t *types.T, buf []byte, dir encoding.Direction, -) (tree.Datum, []byte, error) { - var err error - buf, err = encoding.ValidateAndConsumeArrayKeyMarker(buf, dir) - if err != nil { - return nil, nil, err - } - - result := tree.NewDArray(t.ArrayContents()) - - for { - if len(buf) == 0 { - return nil, nil, errors.AssertionFailedf("invalid array encoding (unterminated)") - } - if encoding.IsArrayKeyDone(buf, dir) { - buf = buf[1:] - break - } - var d tree.Datum - if encoding.IsNextByteArrayEncodedNull(buf, dir) { - d = tree.DNull - buf = buf[1:] - } else { - d, buf, err = DecodeTableKey(a, t.ArrayContents(), buf, dir) - if err != nil { - return nil, nil, err - } - } - if err := result.Append(d); err != nil { - return nil, nil, err - } - } - return result, buf, nil -} - -// encodeArray produces the value encoding for an array. -func encodeArray(d *tree.DArray, scratch []byte) ([]byte, error) { - if err := d.Validate(); err != nil { - return scratch, err - } - scratch = scratch[0:0] - elementType, err := DatumTypeToArrayElementEncodingType(d.ParamTyp) - - if err != nil { - return nil, err - } - header := arrayHeader{ - hasNulls: d.HasNulls, - // TODO(justin): support multiple dimensions. - numDimensions: 1, - elementType: elementType, - length: uint64(d.Len()), - // We don't encode the NULL bitmap in this function because we do it in lockstep with the - // main data. - } - scratch, err = encodeArrayHeader(header, scratch) - if err != nil { - return nil, err - } - nullBitmapStart := len(scratch) - if d.HasNulls { - for i := 0; i < numBytesInBitArray(d.Len()); i++ { - scratch = append(scratch, 0) - } - } - for i, e := range d.Array { - var err error - if d.HasNulls && e == tree.DNull { - setBit(scratch[nullBitmapStart:], i) - } else { - scratch, err = encodeArrayElement(scratch, e) - if err != nil { - return nil, err - } - } - } - return scratch, nil -} - -// decodeArray decodes the value encoding for an array. -func decodeArray(a *DatumAlloc, elementType *types.T, b []byte) (tree.Datum, []byte, error) { - b, _, _, err := encoding.DecodeNonsortingUvarint(b) - if err != nil { - return nil, b, err - } - return decodeArrayNoMarshalColumnValue(a, elementType, b) -} - -// decodeArrayNoMarshalColumnValue skips the step where the MarshalColumnValue -// is stripped from the bytes. This is required for single-column family arrays. -func decodeArrayNoMarshalColumnValue( - a *DatumAlloc, elementType *types.T, b []byte, -) (tree.Datum, []byte, error) { - header, b, err := decodeArrayHeader(b) - if err != nil { - return nil, b, err - } - result := tree.DArray{ - Array: make(tree.Datums, header.length), - ParamTyp: elementType, - } - var val tree.Datum - for i := uint64(0); i < header.length; i++ { - if header.isNull(i) { - result.Array[i] = tree.DNull - result.HasNulls = true - } else { - result.HasNonNulls = true - val, b, err = DecodeUntaggedDatum(a, elementType, b) - if err != nil { - return nil, b, err - } - result.Array[i] = val - } - } - return &result, b, nil -} - -// arrayHeader is a parameter passing struct between -// encodeArray/decodeArray and encodeArrayHeader/decodeArrayHeader. -// -// It describes the important properties of an array that are useful -// for an efficient value encoding. -type arrayHeader struct { - // hasNulls is set if the array contains any NULL values. - hasNulls bool - // numDimensions is the number of dimensions in the array. - numDimensions int - // elementType is the encoding type of the array elements. - elementType encoding.Type - // length is the total number of elements encoded. - length uint64 - // nullBitmap is a compact representation of which array indexes - // have NULL values. - nullBitmap []byte -} - -// isNull returns true iff the array element at the given index is -// NULL. -func (h arrayHeader) isNull(i uint64) bool { - return h.hasNulls && ((h.nullBitmap[i/8]>>(i%8))&1) == 1 -} - -// setBit sets the bit in the given bitmap at index idx to 1. It's used to -// construct the NULL bitmap within arrays. -func setBit(bitmap []byte, idx int) { - bitmap[idx/8] = bitmap[idx/8] | (1 << uint(idx%8)) -} - -// numBytesInBitArray returns the minimum number of bytes necessary to -// store the given number of bits. -func numBytesInBitArray(numBits int) int { - return (numBits + 7) / 8 -} - -// makeBitVec carves a bitmap (byte array intended to store bits) for -// the given number of bits out of its first argument. It returns the -// remainder of the first argument after the bitmap has been reserved -// into it. -func makeBitVec(src []byte, length int) (b, bitVec []byte) { - nullBitmapNumBytes := numBytesInBitArray(length) - return src[nullBitmapNumBytes:], src[:nullBitmapNumBytes] -} - -const hasNullFlag = 1 << 4 - -// encodeArrayHeader is used by encodeArray to encode the header -// at the beginning of the value encoding. -func encodeArrayHeader(h arrayHeader, buf []byte) ([]byte, error) { - // The header byte we append here is formatted as follows: - // * The low 4 bits encode the number of dimensions in the array. - // * The high 4 bits are flags, with the lowest representing whether the array - // contains NULLs, and the rest reserved. - headerByte := h.numDimensions - if h.hasNulls { - headerByte = headerByte | hasNullFlag - } - buf = append(buf, byte(headerByte)) - buf = encoding.EncodeValueTag(buf, encoding.NoColumnID, h.elementType) - buf = encoding.EncodeNonsortingUvarint(buf, h.length) - return buf, nil -} - -// decodeArrayHeader is used by decodeArray to decode the header at -// the beginning of the value encoding. -func decodeArrayHeader(b []byte) (arrayHeader, []byte, error) { - if len(b) < 2 { - return arrayHeader{}, b, errors.Errorf("buffer too small") - } - hasNulls := b[0]&hasNullFlag != 0 - b = b[1:] - _, dataOffset, _, encType, err := encoding.DecodeValueTag(b) - if err != nil { - return arrayHeader{}, b, err - } - b = b[dataOffset:] - b, _, length, err := encoding.DecodeNonsortingUvarint(b) - if err != nil { - return arrayHeader{}, b, err - } - nullBitmap := []byte(nil) - if hasNulls { - b, nullBitmap = makeBitVec(b, int(length)) - } - return arrayHeader{ - hasNulls: hasNulls, - // TODO(justin): support multiple dimensions. - numDimensions: 1, - elementType: encType, - length: length, - nullBitmap: nullBitmap, - }, b, nil -} - -// DatumTypeToArrayElementEncodingType decides an encoding type to -// place in the array header given a datum type. The element encoding -// type is then used to encode/decode array elements. -func DatumTypeToArrayElementEncodingType(t *types.T) (encoding.Type, error) { - switch t.Family() { - case types.IntFamily: - return encoding.Int, nil - case types.OidFamily: - return encoding.Int, nil - case types.FloatFamily: - return encoding.Float, nil - case types.Box2DFamily: - return encoding.Box2D, nil - case types.GeometryFamily: - return encoding.Geo, nil - case types.GeographyFamily: - return encoding.Geo, nil - case types.DecimalFamily: - return encoding.Decimal, nil - case types.BytesFamily, types.StringFamily, types.CollatedStringFamily, types.EnumFamily: - return encoding.Bytes, nil - case types.TimestampFamily, types.TimestampTZFamily: - return encoding.Time, nil - // Note: types.Date was incorrectly mapped to encoding.Time when arrays were - // first introduced. If any 1.1 users used date arrays, they would have been - // persisted with incorrect elementType values. - case types.DateFamily, types.TimeFamily: - return encoding.Int, nil - case types.TimeTZFamily: - return encoding.TimeTZ, nil - case types.IntervalFamily: - return encoding.Duration, nil - case types.BoolFamily: - return encoding.True, nil - case types.BitFamily: - return encoding.BitArray, nil - case types.UuidFamily: - return encoding.UUID, nil - case types.INetFamily: - return encoding.IPAddr, nil - case types.JsonFamily: - return encoding.JSON, nil - case types.TupleFamily: - return encoding.Tuple, nil - default: - return 0, errors.AssertionFailedf("no known encoding type for %s", t) - } -} - -func checkElementType(paramType *types.T, elemType *types.T) error { - if paramType.Family() != elemType.Family() { - return errors.Errorf("type of array contents %s doesn't match column type %s", - paramType, elemType.Family()) - } - if paramType.Family() == types.CollatedStringFamily { - if paramType.Locale() != elemType.Locale() { - return errors.Errorf("locale of collated string array being inserted (%s) doesn't match locale of column type (%s)", - paramType.Locale(), elemType.Locale()) - } - } - return nil -} - -// encodeArrayElement appends the encoded form of one array element to -// the target byte buffer. -func encodeArrayElement(b []byte, d tree.Datum) ([]byte, error) { - switch t := tree.UnwrapDatum(nil, d).(type) { - case *tree.DInt: - return encoding.EncodeUntaggedIntValue(b, int64(*t)), nil - case *tree.DString: - bytes := []byte(*t) - b = encoding.EncodeUntaggedBytesValue(b, bytes) - return b, nil - case *tree.DBytes: - bytes := []byte(*t) - b = encoding.EncodeUntaggedBytesValue(b, bytes) - return b, nil - case *tree.DBitArray: - return encoding.EncodeUntaggedBitArrayValue(b, t.BitArray), nil - case *tree.DFloat: - return encoding.EncodeUntaggedFloatValue(b, float64(*t)), nil - case *tree.DBool: - return encoding.EncodeBoolValue(b, encoding.NoColumnID, bool(*t)), nil - case *tree.DDecimal: - return encoding.EncodeUntaggedDecimalValue(b, &t.Decimal), nil - case *tree.DDate: - return encoding.EncodeUntaggedIntValue(b, t.UnixEpochDaysWithOrig()), nil - case *tree.DBox2D: - return encoding.EncodeUntaggedBox2DValue(b, t.CartesianBoundingBox.BoundingBox) - case *tree.DGeography: - return encoding.EncodeUntaggedGeoValue(b, t.SpatialObjectRef()) - case *tree.DGeometry: - return encoding.EncodeUntaggedGeoValue(b, t.SpatialObjectRef()) - case *tree.DTime: - return encoding.EncodeUntaggedIntValue(b, int64(*t)), nil - case *tree.DTimeTZ: - return encoding.EncodeUntaggedTimeTZValue(b, t.TimeTZ), nil - case *tree.DTimestamp: - return encoding.EncodeUntaggedTimeValue(b, t.Time), nil - case *tree.DTimestampTZ: - return encoding.EncodeUntaggedTimeValue(b, t.Time), nil - case *tree.DInterval: - return encoding.EncodeUntaggedDurationValue(b, t.Duration), nil - case *tree.DUuid: - return encoding.EncodeUntaggedUUIDValue(b, t.UUID), nil - case *tree.DIPAddr: - return encoding.EncodeUntaggedIPAddrValue(b, t.IPAddr), nil - case *tree.DOid: - return encoding.EncodeUntaggedIntValue(b, int64(t.DInt)), nil - case *tree.DCollatedString: - return encoding.EncodeUntaggedBytesValue(b, []byte(t.Contents)), nil - case *tree.DOidWrapper: - return encodeArrayElement(b, t.Wrapped) - case *tree.DEnum: - return encoding.EncodeUntaggedBytesValue(b, t.PhysicalRep), nil - case *tree.DJSON: - encoded, err := json.EncodeJSON(nil, t.JSON) - if err != nil { - return nil, err - } - return encoding.EncodeUntaggedBytesValue(b, encoded), nil - case *tree.DTuple: - return encodeUntaggedTuple(t, b, encoding.NoColumnID, nil) - default: - return nil, errors.Errorf("don't know how to encode %s (%T)", d, d) - } -} diff --git a/pkg/sql/rowenc/encoded_datum.go b/pkg/sql/rowenc/encoded_datum.go index 0f3f42bcfada..158d4c705caa 100644 --- a/pkg/sql/rowenc/encoded_datum.go +++ b/pkg/sql/rowenc/encoded_datum.go @@ -18,6 +18,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -53,13 +55,13 @@ type EncDatum struct { Datum tree.Datum } -func (ed *EncDatum) stringWithAlloc(typ *types.T, a *DatumAlloc) string { +func (ed *EncDatum) stringWithAlloc(typ *types.T, a *tree.DatumAlloc) string { if ed.Datum == nil { if ed.encoded == nil { return "" } if a == nil { - a = &DatumAlloc{} + a = &tree.DatumAlloc{} } err := ed.EnsureDecoded(typ, a) if err != nil { @@ -235,7 +237,7 @@ func (ed *EncDatum) IsNull() bool { } // EnsureDecoded ensures that the Datum field is set (decoding if it is not). -func (ed *EncDatum) EnsureDecoded(typ *types.T, a *DatumAlloc) error { +func (ed *EncDatum) EnsureDecoded(typ *types.T, a *tree.DatumAlloc) error { if ed.Datum != nil { return nil } @@ -246,11 +248,11 @@ func (ed *EncDatum) EnsureDecoded(typ *types.T, a *DatumAlloc) error { var rem []byte switch ed.encoding { case descpb.DatumEncoding_ASCENDING_KEY: - ed.Datum, rem, err = DecodeTableKey(a, typ, ed.encoded, encoding.Ascending) + ed.Datum, rem, err = keyside.Decode(a, typ, ed.encoded, encoding.Ascending) case descpb.DatumEncoding_DESCENDING_KEY: - ed.Datum, rem, err = DecodeTableKey(a, typ, ed.encoded, encoding.Descending) + ed.Datum, rem, err = keyside.Decode(a, typ, ed.encoded, encoding.Descending) case descpb.DatumEncoding_VALUE: - ed.Datum, rem, err = DecodeTableValue(a, typ, ed.encoded) + ed.Datum, rem, err = valueside.Decode(a, typ, ed.encoded) default: return errors.AssertionFailedf("unknown encoding %d", log.Safe(ed.encoding)) } @@ -279,7 +281,7 @@ func (ed *EncDatum) Encoding() (descpb.DatumEncoding, bool) { // Note: descpb.DatumEncoding_VALUE encodings are not unique because they can contain // a column ID so they should not be used to test for equality. func (ed *EncDatum) Encode( - typ *types.T, a *DatumAlloc, enc descpb.DatumEncoding, appendTo []byte, + typ *types.T, a *tree.DatumAlloc, enc descpb.DatumEncoding, appendTo []byte, ) ([]byte, error) { if ed.encoded != nil && enc == ed.encoding { // We already have an encoding that matches that we can use. @@ -290,11 +292,11 @@ func (ed *EncDatum) Encode( } switch enc { case descpb.DatumEncoding_ASCENDING_KEY: - return EncodeTableKey(appendTo, ed.Datum, encoding.Ascending) + return keyside.Encode(appendTo, ed.Datum, encoding.Ascending) case descpb.DatumEncoding_DESCENDING_KEY: - return EncodeTableKey(appendTo, ed.Datum, encoding.Descending) + return keyside.Encode(appendTo, ed.Datum, encoding.Descending) case descpb.DatumEncoding_VALUE: - return EncodeTableValue(appendTo, descpb.ColumnID(encoding.NoColumnID), ed.Datum, a.scratch) + return valueside.Encode(appendTo, valueside.NoColumnID, ed.Datum, nil /* scratch */) default: panic(errors.AssertionFailedf("unknown encoding requested %s", enc)) } @@ -312,7 +314,7 @@ func (ed *EncDatum) Encode( // returned byte slice. Note that the context will only be used if acc is // non-nil. func (ed *EncDatum) Fingerprint( - ctx context.Context, typ *types.T, a *DatumAlloc, appendTo []byte, acc *mon.BoundAccount, + ctx context.Context, typ *types.T, a *tree.DatumAlloc, appendTo []byte, acc *mon.BoundAccount, ) ([]byte, error) { // Note: we don't ed.EnsureDecoded on top of this method, because the default // case uses ed.Encode, which has a fast path if the encoded bytes are already @@ -327,7 +329,7 @@ func (ed *EncDatum) Fingerprint( } // We must use value encodings without a column ID even if the EncDatum already // is encoded with the value encoding so that the hashes are indeed unique. - fingerprint, err = EncodeTableValue(appendTo, descpb.ColumnID(encoding.NoColumnID), ed.Datum, a.scratch) + fingerprint, err = valueside.Encode(appendTo, valueside.NoColumnID, ed.Datum, nil /* scratch */) default: // For values that are key encodable, using the ascending key. // Note that using a value encoding will not easily work in case when @@ -350,7 +352,7 @@ func (ed *EncDatum) Fingerprint( // 0 if the receiver is equal to rhs, // +1 if the receiver is greater than rhs. func (ed *EncDatum) Compare( - typ *types.T, a *DatumAlloc, evalCtx *tree.EvalContext, rhs *EncDatum, + typ *types.T, a *tree.DatumAlloc, evalCtx *tree.EvalContext, rhs *EncDatum, ) (int, error) { // TODO(radu): if we have both the Datum and a key encoding available, which // one would be faster to use? @@ -418,7 +420,7 @@ func (ed *EncDatum) GetInt() (int64, error) { // EncDatumRow is a row of EncDatums. type EncDatumRow []EncDatum -func (r EncDatumRow) stringToBuf(types []*types.T, a *DatumAlloc, b *bytes.Buffer) { +func (r EncDatumRow) stringToBuf(types []*types.T, a *tree.DatumAlloc, b *bytes.Buffer) { if len(types) != len(r) { panic(errors.AssertionFailedf("mismatched types (%v) and row (%v)", types, r)) } @@ -445,7 +447,7 @@ func (r EncDatumRow) Copy() EncDatumRow { func (r EncDatumRow) String(types []*types.T) string { var b bytes.Buffer - r.stringToBuf(types, &DatumAlloc{}, &b) + r.stringToBuf(types, &tree.DatumAlloc{}, &b) return b.String() } @@ -464,7 +466,7 @@ func (r EncDatumRow) Size() uintptr { // EncDatumRowToDatums converts a given EncDatumRow to a Datums. func EncDatumRowToDatums( - types []*types.T, datums tree.Datums, row EncDatumRow, da *DatumAlloc, + types []*types.T, datums tree.Datums, row EncDatumRow, da *tree.DatumAlloc, ) error { if len(types) != len(row) { return errors.AssertionFailedf( @@ -501,7 +503,7 @@ func EncDatumRowToDatums( // column). func (r EncDatumRow) Compare( types []*types.T, - a *DatumAlloc, + a *tree.DatumAlloc, ordering colinfo.ColumnOrdering, evalCtx *tree.EvalContext, rhs EncDatumRow, @@ -527,7 +529,7 @@ func (r EncDatumRow) Compare( // CompareToDatums is a version of Compare which compares against decoded Datums. func (r EncDatumRow) CompareToDatums( types []*types.T, - a *DatumAlloc, + a *tree.DatumAlloc, ordering colinfo.ColumnOrdering, evalCtx *tree.EvalContext, rhs tree.Datums, @@ -551,7 +553,7 @@ func (r EncDatumRow) CompareToDatums( type EncDatumRows []EncDatumRow func (r EncDatumRows) String(types []*types.T) string { - var a DatumAlloc + var a tree.DatumAlloc var b bytes.Buffer b.WriteString("[") for i, r := range r { diff --git a/pkg/sql/rowenc/encoded_datum_test.go b/pkg/sql/rowenc/encoded_datum_test.go index 26e3b57da136..b8d2a3e4d837 100644 --- a/pkg/sql/rowenc/encoded_datum_test.go +++ b/pkg/sql/rowenc/encoded_datum_test.go @@ -16,12 +16,13 @@ import ( "time" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/randgen" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -32,7 +33,7 @@ import ( func TestEncDatum(t *testing.T) { defer leaktest.AfterTest(t)() - a := &rowenc.DatumAlloc{} + a := &tree.DatumAlloc{} evalCtx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) v := rowenc.EncDatum{} @@ -126,7 +127,7 @@ func TestEncDatumNull(t *testing.T) { t.Error("DNull not null") } - var alloc rowenc.DatumAlloc + var alloc tree.DatumAlloc rng, _ := randutil.NewTestRand() // Generate random EncDatums (some of which are null), and verify that a datum @@ -156,7 +157,7 @@ func TestEncDatumNull(t *testing.T) { // those encodings. It also checks if the Compare resulted in decoding or not. func checkEncDatumCmp( t *testing.T, - a *rowenc.DatumAlloc, + a *tree.DatumAlloc, typ *types.T, v1, v2 *rowenc.EncDatum, enc1, enc2 descpb.DatumEncoding, @@ -205,7 +206,7 @@ func checkEncDatumCmp( func TestEncDatumCompare(t *testing.T) { defer leaktest.AfterTest(t)() - a := &rowenc.DatumAlloc{} + a := &tree.DatumAlloc{} evalCtx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) rng, _ := randutil.NewTestRand() @@ -264,7 +265,7 @@ func TestEncDatumCompare(t *testing.T) { func TestEncDatumFromBuffer(t *testing.T) { defer leaktest.AfterTest(t)() - var alloc rowenc.DatumAlloc + var alloc tree.DatumAlloc evalCtx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) rng, _ := randutil.NewTestRand() @@ -445,7 +446,7 @@ func TestEncDatumRowCompare(t *testing.T) { }, } - a := &rowenc.DatumAlloc{} + a := &tree.DatumAlloc{} evalCtx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) defer evalCtx.Stop(context.Background()) for _, c := range testCases { @@ -529,7 +530,7 @@ func TestValueEncodeDecodeTuple(t *testing.T) { switch typedTest := test.(type) { case *tree.DTuple: - buf, err := rowenc.EncodeTableValue(nil, descpb.ColumnID(encoding.NoColumnID), typedTest, nil) + buf, err := valueside.Encode(nil, valueside.NoColumnID, typedTest, nil) if err != nil { t.Fatalf("seed %d: encoding tuple %v with types %v failed with error: %v", seed, test, colTypes[i], err) @@ -537,7 +538,7 @@ func TestValueEncodeDecodeTuple(t *testing.T) { var decodedTuple tree.Datum testTyp := test.ResolvedType() - decodedTuple, buf, err = rowenc.DecodeTableValue(&rowenc.DatumAlloc{}, testTyp, buf) + decodedTuple, buf, err = valueside.Decode(&tree.DatumAlloc{}, testTyp, buf) if err != nil { t.Fatalf("seed %d: decoding tuple %v with type (%+v, %+v) failed with error: %v", seed, test, colTypes[i], testTyp, err) @@ -735,7 +736,7 @@ func TestEncDatumFingerprintMemory(t *testing.T) { defer evalCtx.Stop(ctx) memAcc := evalCtx.Mon.MakeBoundAccount() defer memAcc.Close(ctx) - var da rowenc.DatumAlloc + var da tree.DatumAlloc for _, c := range testCases { memAcc.Clear(ctx) _, err := c.encDatum.Fingerprint(ctx, c.typ, &da, nil /* appendTo */, &memAcc) diff --git a/pkg/sql/rowenc/index_encoding.go b/pkg/sql/rowenc/index_encoding.go index a62e7480b6df..8ee27037ab4a 100644 --- a/pkg/sql/rowenc/index_encoding.go +++ b/pkg/sql/rowenc/index_encoding.go @@ -12,7 +12,6 @@ package rowenc import ( "context" - "fmt" "sort" "unsafe" @@ -23,7 +22,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/inverted" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/rowenc/rowencpb" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -42,10 +43,8 @@ import ( // MakeIndexKeyPrefix returns the key prefix used for the index's data. If you // need the corresponding Span, prefer desc.IndexSpan(indexID) or // desc.PrimaryIndexSpan(). -func MakeIndexKeyPrefix( - codec keys.SQLCodec, desc catalog.TableDescriptor, indexID descpb.IndexID, -) []byte { - return codec.IndexPrefix(uint32(desc.GetID()), uint32(indexID)) +func MakeIndexKeyPrefix(codec keys.SQLCodec, tableID descpb.ID, indexID descpb.IndexID) []byte { + return codec.IndexPrefix(uint32(tableID), uint32(indexID)) } // EncodeIndexKey creates a key by concatenating keyPrefix with the @@ -63,7 +62,6 @@ func EncodeIndexKey( ) (key []byte, containsNull bool, err error) { var colIDWithNullVal descpb.ColumnID key, colIDWithNullVal, err = EncodePartialIndexKey( - tableDesc, index, index.NumKeyColumns(), /* encode all columns */ colMap, @@ -88,7 +86,6 @@ func EncodeIndexKey( // given table, index, and values, with the same method as // EncodePartialIndexKey. func EncodePartialIndexSpan( - tableDesc catalog.TableDescriptor, index catalog.Index, numCols int, colMap catalog.TableColMap, @@ -97,7 +94,7 @@ func EncodePartialIndexSpan( ) (span roachpb.Span, containsNull bool, err error) { var key roachpb.Key var colIDWithNullVal descpb.ColumnID - key, colIDWithNullVal, err = EncodePartialIndexKey(tableDesc, index, numCols, colMap, values, keyPrefix) + key, colIDWithNullVal, err = EncodePartialIndexKey(index, numCols, colMap, values, keyPrefix) containsNull = colIDWithNullVal != 0 if err != nil { return span, containsNull, err @@ -110,7 +107,6 @@ func EncodePartialIndexSpan( // - index.KeyColumnIDs for unique indexes, and // - append(index.KeyColumnIDs, index.KeySuffixColumnIDs) for non-unique indexes. func EncodePartialIndexKey( - tableDesc catalog.TableDescriptor, index catalog.Index, numCols int, colMap catalog.TableColMap, @@ -176,7 +172,7 @@ func MakeSpanFromEncDatums( types []*types.T, dirs []descpb.IndexDescriptor_Direction, index catalog.Index, - alloc *DatumAlloc, + alloc *tree.DatumAlloc, keyPrefix []byte, ) (_ roachpb.Span, containsNull bool, _ error) { startKey, _, containsNull, err := MakeKeyFromEncDatums(values, types, dirs, index, alloc, keyPrefix) @@ -366,7 +362,7 @@ func MakeKeyFromEncDatums( types []*types.T, dirs []descpb.IndexDescriptor_Direction, index catalog.Index, - alloc *DatumAlloc, + alloc *tree.DatumAlloc, keyPrefix []byte, ) (_ roachpb.Key, complete bool, containsNull bool, _ error) { // Values may be a prefix of the index columns. @@ -413,7 +409,7 @@ func appendEncDatumsToKey( types []*types.T, values EncDatumRow, dirs []descpb.IndexDescriptor_Direction, - alloc *DatumAlloc, + alloc *tree.DatumAlloc, ) (_ roachpb.Key, containsNull bool, _ error) { for i, val := range values { encoding := descpb.DatumEncoding_ASCENDING_KEY @@ -444,7 +440,7 @@ func DecodePartialTableIDIndexID(key []byte) ([]byte, descpb.ID, descpb.IndexID, // // Don't use this function in the scan "hot path". func DecodeIndexKeyPrefix( - codec keys.SQLCodec, desc catalog.TableDescriptor, key []byte, + codec keys.SQLCodec, expectedTableID descpb.ID, key []byte, ) (indexID descpb.IndexID, remaining []byte, err error) { key, err = codec.StripTenantPrefix(key) if err != nil { @@ -455,9 +451,9 @@ func DecodeIndexKeyPrefix( if err != nil { return 0, nil, err } - if tableID != desc.GetID() { + if tableID != expectedTableID { return 0, nil, errors.Errorf( - "unexpected table ID %d, expected %d instead", tableID, desc.GetID()) + "unexpected table ID %d, expected %d instead", tableID, expectedTableID) } return indexID, key, err } @@ -716,7 +712,7 @@ func encodeArrayInvertedIndexTableKeys( } outKey := make([]byte, len(inKey)) copy(outKey, inKey) - newKey, err := EncodeTableKey(outKey, d, encoding.Ascending) + newKey, err := keyside.Encode(outKey, d, encoding.Ascending) if err != nil { return nil, err } @@ -881,7 +877,7 @@ func EncodePrimaryIndex( values []tree.Datum, includeEmpty bool, ) ([]IndexEntry, error) { - keyPrefix := MakeIndexKeyPrefix(codec, tableDesc, index.GetID()) + keyPrefix := MakeIndexKeyPrefix(codec, tableDesc.GetID(), index.GetID()) indexKey, _, err := EncodeIndexKey(tableDesc, index, colMap, values, keyPrefix) if err != nil { return nil, err @@ -911,7 +907,7 @@ func EncodePrimaryIndex( if err != nil { return err } - value, err := MarshalColumnValue(col, datum) + value, err := valueside.MarshalLegacy(col.GetType(), datum) if err != nil { return err } @@ -971,7 +967,7 @@ func EncodeSecondaryIndex( values []tree.Datum, includeEmpty bool, ) ([]IndexEntry, error) { - secondaryIndexKeyPrefix := MakeIndexKeyPrefix(codec, tableDesc, secondaryIndex.GetID()) + secondaryIndexKeyPrefix := MakeIndexKeyPrefix(codec, tableDesc.GetID(), secondaryIndex.GetID()) // Use the primary key encoding for covering indexes. if secondaryIndex.GetEncodingType() == descpb.PrimaryIndexEncoding { @@ -1222,13 +1218,10 @@ func writeColumnValues( if val == tree.DNull || (col.isComposite && !val.(tree.CompositeDatum).IsComposite()) { continue } - if lastColID > col.id { - panic(fmt.Errorf("cannot write column id %d after %d", col.id, lastColID)) - } - colIDDiff := col.id - lastColID + colIDDelta := valueside.MakeColumnIDDelta(lastColID, col.id) lastColID = col.id var err error - value, err = EncodeTableValue(value, colIDDiff, val, nil) + value, err = valueside.Encode(value, colIDDelta, val, nil) if err != nil { return nil, err } @@ -1326,7 +1319,7 @@ func EncodeColumns( return nil, colIDWithNullVal, err } - if key, err = EncodeTableKey(key, val, dir); err != nil { + if key, err = keyside.Encode(key, val, dir); err != nil { return nil, colIDWithNullVal, err } } diff --git a/pkg/sql/rowenc/index_encoding_test.go b/pkg/sql/rowenc/index_encoding_test.go index 06630afeab52..e28efa531656 100644 --- a/pkg/sql/rowenc/index_encoding_test.go +++ b/pkg/sql/rowenc/index_encoding_test.go @@ -14,10 +14,7 @@ import ( "bytes" "context" "fmt" - "math" - "reflect" "testing" - "time" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" @@ -32,13 +29,8 @@ import ( . "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/json" - "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/randutil" - "github.com/cockroachdb/cockroach/pkg/util/timeofday" - "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) @@ -95,7 +87,7 @@ func makeTableDescForTest(test indexKeyTest) (catalog.TableDescriptor, catalog.T func decodeIndex( codec keys.SQLCodec, tableDesc catalog.TableDescriptor, index catalog.Index, key []byte, ) ([]tree.Datum, error) { - types, err := colinfo.GetColumnTypes(tableDesc, index.IndexDesc().KeyColumnIDs, nil) + types, err := getColumnTypes(tableDesc.IndexKeyColumns(index)) if err != nil { return nil, err } @@ -110,7 +102,7 @@ func decodeIndex( } decodedValues := make([]tree.Datum, len(values)) - var da DatumAlloc + var da tree.DatumAlloc for i, value := range values { err := value.EnsureDecoded(types[i], &da) if err != nil { @@ -124,7 +116,7 @@ func decodeIndex( func TestIndexKey(t *testing.T) { rng, _ := randutil.NewTestRand() - var a DatumAlloc + var a tree.DatumAlloc tests := []indexKeyTest{ { @@ -206,7 +198,7 @@ func TestIndexKey(t *testing.T) { testValues := append(test.primaryValues, test.secondaryValues...) codec := keys.SystemSQLCodec - primaryKeyPrefix := MakeIndexKeyPrefix(codec, tableDesc, tableDesc.GetPrimaryIndexID()) + primaryKeyPrefix := MakeIndexKeyPrefix(codec, tableDesc.GetID(), tableDesc.GetPrimaryIndexID()) primaryKey, _, err := EncodeIndexKey(tableDesc, tableDesc.GetPrimaryIndex(), colMap, testValues, primaryKeyPrefix) if err != nil { t.Fatal(err) @@ -240,7 +232,7 @@ func TestIndexKey(t *testing.T) { } } - indexID, _, err := DecodeIndexKeyPrefix(codec, tableDesc, entry.Key) + indexID, _, err := DecodeIndexKeyPrefix(codec, tableDesc.GetID(), entry.Key) if err != nil { t.Fatal(err) } @@ -650,327 +642,14 @@ func TestEncodeContainedArrayInvertedIndexSpans(t *testing.T) { } } -type arrayEncodingTest struct { - name string - datum tree.DArray - encoding []byte -} - -func TestArrayEncoding(t *testing.T) { - tests := []arrayEncodingTest{ - { - "empty int array", - tree.DArray{ - ParamTyp: types.Int, - Array: tree.Datums{}, - }, - []byte{1, 3, 0}, - }, { - "single int array", - tree.DArray{ - ParamTyp: types.Int, - Array: tree.Datums{tree.NewDInt(1)}, - }, - []byte{1, 3, 1, 2}, - }, { - "multiple int array", - tree.DArray{ - ParamTyp: types.Int, - Array: tree.Datums{tree.NewDInt(1), tree.NewDInt(2), tree.NewDInt(3)}, - }, - []byte{1, 3, 3, 2, 4, 6}, - }, { - "string array", - tree.DArray{ - ParamTyp: types.String, - Array: tree.Datums{tree.NewDString("foo"), tree.NewDString("bar"), tree.NewDString("baz")}, - }, - []byte{1, 6, 3, 3, 102, 111, 111, 3, 98, 97, 114, 3, 98, 97, 122}, - }, { - "name array", - tree.DArray{ - ParamTyp: types.Name, - Array: tree.Datums{tree.NewDName("foo"), tree.NewDName("bar"), tree.NewDName("baz")}, - }, - []byte{1, 6, 3, 3, 102, 111, 111, 3, 98, 97, 114, 3, 98, 97, 122}, - }, - { - "bool array", - tree.DArray{ - ParamTyp: types.Bool, - Array: tree.Datums{tree.MakeDBool(true), tree.MakeDBool(false)}, - }, - []byte{1, 10, 2, 10, 11}, - }, { - "array containing a single null", - tree.DArray{ - ParamTyp: types.Int, - Array: tree.Datums{tree.DNull}, - HasNulls: true, - }, - []byte{17, 3, 1, 1}, - }, { - "array containing multiple nulls", - tree.DArray{ - ParamTyp: types.Int, - Array: tree.Datums{tree.NewDInt(1), tree.DNull, tree.DNull}, - HasNulls: true, - }, - []byte{17, 3, 3, 6, 2}, - }, { - "array whose NULL bitmap spans exactly one byte", - tree.DArray{ - ParamTyp: types.Int, - Array: tree.Datums{ - tree.NewDInt(1), tree.DNull, tree.DNull, tree.NewDInt(2), tree.NewDInt(3), - tree.NewDInt(4), tree.NewDInt(5), tree.NewDInt(6), - }, - HasNulls: true, - }, - []byte{17, 3, 8, 6, 2, 4, 6, 8, 10, 12}, - }, { - "array whose NULL bitmap spans more than one byte", - tree.DArray{ - ParamTyp: types.Int, - Array: tree.Datums{ - tree.NewDInt(1), tree.DNull, tree.DNull, tree.NewDInt(2), tree.NewDInt(3), - tree.NewDInt(4), tree.NewDInt(5), tree.NewDInt(6), tree.DNull, - }, - HasNulls: true, - }, - []byte{17, 3, 9, 6, 1, 2, 4, 6, 8, 10, 12}, - }, - } - - for _, test := range tests { - t.Run("encode "+test.name, func(t *testing.T) { - enc, err := EncodeArray(&test.datum, nil) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(enc, test.encoding) { - t.Fatalf("expected %s to encode to %v, got %v", test.datum.String(), test.encoding, enc) - } - }) - - t.Run("decode "+test.name, func(t *testing.T) { - enc := make([]byte, 0) - enc = append(enc, byte(len(test.encoding))) - enc = append(enc, test.encoding...) - d, _, err := DecodeArray(&DatumAlloc{}, test.datum.ParamTyp, enc) - hasNulls := d.(*tree.DArray).HasNulls - if test.datum.HasNulls != hasNulls { - t.Fatalf("expected %v to have HasNulls=%t, got %t", enc, test.datum.HasNulls, hasNulls) - } - if err != nil { - t.Fatal(err) - } - evalContext := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) - if d.Compare(evalContext, &test.datum) != 0 { - t.Fatalf("expected %v to decode to %s, got %s", enc, test.datum.String(), d.String()) - } - }) - } -} - -func BenchmarkArrayEncoding(b *testing.B) { - ary := tree.DArray{ParamTyp: types.Int, Array: tree.Datums{}} - for i := 0; i < 10000; i++ { - _ = ary.Append(tree.NewDInt(1)) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = EncodeArray(&ary, nil) - } -} - -func TestMarshalColumnValue(t *testing.T) { - defer leaktest.AfterTest(t)() - - tests := []struct { - typ *types.T - datum tree.Datum - exp roachpb.Value - }{ - { - typ: types.Bool, - datum: tree.MakeDBool(true), - exp: func() (v roachpb.Value) { v.SetBool(true); return }(), - }, - { - typ: types.Bool, - datum: tree.MakeDBool(false), - exp: func() (v roachpb.Value) { v.SetBool(false); return }(), - }, - { - typ: types.Int, - datum: tree.NewDInt(314159), - exp: func() (v roachpb.Value) { v.SetInt(314159); return }(), - }, - { - typ: types.Float, - datum: tree.NewDFloat(3.14159), - exp: func() (v roachpb.Value) { v.SetFloat(3.14159); return }(), - }, - { - typ: types.Decimal, - datum: func() (v tree.Datum) { - v, err := tree.ParseDDecimal("1234567890.123456890") - if err != nil { - t.Fatalf("Unexpected error while creating expected value: %s", err) - } - return - }(), - exp: func() (v roachpb.Value) { - dDecimal, err := tree.ParseDDecimal("1234567890.123456890") - if err != nil { - t.Fatalf("Unexpected error while creating expected value: %s", err) - } - err = v.SetDecimal(&dDecimal.Decimal) - if err != nil { - t.Fatalf("Unexpected error while creating expected value: %s", err) - } - return - }(), - }, - { - typ: types.Date, - datum: tree.NewDDate(pgdate.MakeCompatibleDateFromDisk(314159)), - exp: func() (v roachpb.Value) { v.SetInt(314159); return }(), - }, - { - typ: types.Date, - datum: tree.NewDDate(pgdate.MakeCompatibleDateFromDisk(math.MinInt64)), - exp: func() (v roachpb.Value) { v.SetInt(math.MinInt64); return }(), - }, - { - typ: types.Date, - datum: tree.NewDDate(pgdate.MakeCompatibleDateFromDisk(math.MaxInt64)), - exp: func() (v roachpb.Value) { v.SetInt(math.MaxInt64); return }(), - }, - { - typ: types.Time, - datum: tree.MakeDTime(timeofday.FromInt(314159)), - exp: func() (v roachpb.Value) { v.SetInt(314159); return }(), - }, - { - typ: types.Timestamp, - datum: tree.MustMakeDTimestamp(timeutil.Unix(314159, 1000), time.Microsecond), - exp: func() (v roachpb.Value) { v.SetTime(timeutil.Unix(314159, 1000)); return }(), - }, - { - typ: types.TimestampTZ, - datum: tree.MustMakeDTimestampTZ(timeutil.Unix(314159, 1000), time.Microsecond), - exp: func() (v roachpb.Value) { v.SetTime(timeutil.Unix(314159, 1000)); return }(), - }, - { - typ: types.String, - datum: tree.NewDString("testing123"), - exp: func() (v roachpb.Value) { v.SetString("testing123"); return }(), - }, - { - typ: types.Name, - datum: tree.NewDName("testingname123"), - exp: func() (v roachpb.Value) { v.SetString("testingname123"); return }(), - }, - { - typ: types.Bytes, - datum: tree.NewDBytes(tree.DBytes([]byte{0x31, 0x41, 0x59})), - exp: func() (v roachpb.Value) { v.SetBytes([]byte{0x31, 0x41, 0x59}); return }(), - }, - { - typ: types.Uuid, - datum: func() (v tree.Datum) { - v, err := tree.ParseDUuidFromString("63616665-6630-3064-6465-616462656562") - if err != nil { - t.Fatalf("Unexpected error while creating expected value: %s", err) - } - return - }(), - exp: func() (v roachpb.Value) { - dUUID, err := tree.ParseDUuidFromString("63616665-6630-3064-6465-616462656562") - if err != nil { - t.Fatalf("Unexpected error while creating expected value: %s", err) - } - v.SetBytes(dUUID.GetBytes()) - return - }(), - }, - { - typ: types.INet, - datum: func() (v tree.Datum) { - v, err := tree.ParseDIPAddrFromINetString("192.168.0.1") - if err != nil { - t.Fatalf("Unexpected error while creating expected value: %s", err) - } - return - }(), - exp: func() (v roachpb.Value) { - ipAddr, err := tree.ParseDIPAddrFromINetString("192.168.0.1") - if err != nil { - t.Fatalf("Unexpected error while creating expected value: %s", err) - } - data := ipAddr.ToBuffer(nil) - v.SetBytes(data) - return - }(), - }, - } - - for i, testCase := range tests { - typ := testCase.typ - if actual, err := MarshalColumnTypeValue("testcol", typ, testCase.datum); err != nil { - t.Errorf("%d: unexpected error with column type %v: %v", i, typ, err) - } else if !reflect.DeepEqual(actual, testCase.exp) { - t.Errorf("%d: MarshalColumnValue() got %v, expected %v", i, actual, testCase.exp) - } - } -} - -func TestDecodeTableValue(t *testing.T) { - a := &DatumAlloc{} - for _, tc := range []struct { - in tree.Datum - typ *types.T - err string - }{ - // These test cases are not intended to be exhaustive, but rather exercise - // the special casing and error handling of DecodeTableValue. - {tree.DNull, types.Bool, ""}, - {tree.DBoolTrue, types.Bool, ""}, - {tree.NewDInt(tree.DInt(4)), types.Bool, "value type is not True or False: Int"}, - {tree.DNull, types.Int, ""}, - {tree.NewDInt(tree.DInt(4)), types.Int, ""}, - {tree.DBoolTrue, types.Int, "decoding failed"}, - } { - t.Run("", func(t *testing.T) { - var prefix, scratch []byte - buf, err := EncodeTableValue(prefix, 0 /* colID */, tc.in, scratch) - if err != nil { - t.Fatal(err) - } - d, _, err := DecodeTableValue(a, tc.typ, buf) - if !testutils.IsError(err, tc.err) { - t.Fatalf("expected error %q, but got %v", tc.err, err) - } else if err != nil { - return - } - if tc.in.Compare(tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()), d) != 0 { - t.Fatalf("decoded datum %[1]v (%[1]T) does not match encoded datum %[2]v (%[2]T)", d, tc.in) - } - }) - } -} - // ExtractIndexKey constructs the index (primary) key for a row from any index // key/value entry, including secondary indexes. // // Don't use this function in the scan "hot path". func ExtractIndexKey( - a *DatumAlloc, codec keys.SQLCodec, tableDesc catalog.TableDescriptor, entry kv.KeyValue, + a *tree.DatumAlloc, codec keys.SQLCodec, tableDesc catalog.TableDescriptor, entry kv.KeyValue, ) (roachpb.Key, error) { - indexID, key, err := DecodeIndexKeyPrefix(codec, tableDesc, entry.Key) + indexID, key, err := DecodeIndexKeyPrefix(codec, tableDesc.GetID(), entry.Key) if err != nil { return nil, err } @@ -984,7 +663,7 @@ func ExtractIndexKey( } // Extract the values for index.KeyColumnIDs. - indexTypes, err := colinfo.GetColumnTypes(tableDesc, index.IndexDesc().KeyColumnIDs, nil) + indexTypes, err := getColumnTypes(tableDesc.IndexKeyColumns(index)) if err != nil { return nil, err } @@ -996,7 +675,7 @@ func ExtractIndexKey( } // Extract the values for index.KeySuffixColumnIDs - extraTypes, err := colinfo.GetColumnTypes(tableDesc, index.IndexDesc().KeySuffixColumnIDs, nil) + extraTypes, err := getColumnTypes(tableDesc.IndexKeySuffixColumns(index)) if err != nil { return nil, err } @@ -1028,7 +707,7 @@ func ExtractIndexKey( columnID := index.GetKeySuffixColumnID(i) colMap.Set(columnID, i+index.NumKeyColumns()) } - indexKeyPrefix := MakeIndexKeyPrefix(codec, tableDesc, tableDesc.GetPrimaryIndexID()) + indexKeyPrefix := MakeIndexKeyPrefix(codec, tableDesc.GetID(), tableDesc.GetPrimaryIndexID()) decodedValues := make([]tree.Datum, len(values)+len(extraValues)) for i, value := range values { @@ -1049,3 +728,14 @@ func ExtractIndexKey( tableDesc, tableDesc.GetPrimaryIndex(), colMap, decodedValues, indexKeyPrefix) return indexKey, err } + +func getColumnTypes(columns []catalog.Column) ([]*types.T, error) { + outTypes := make([]*types.T, len(columns)) + for i, col := range columns { + if !col.Public() { + return nil, fmt.Errorf("column-id \"%d\" does not exist", col.GetID()) + } + outTypes[i] = col.GetType() + } + return outTypes, nil +} diff --git a/pkg/sql/rowenc/keyside/BUILD.bazel b/pkg/sql/rowenc/keyside/BUILD.bazel new file mode 100644 index 000000000000..a8232dc170ab --- /dev/null +++ b/pkg/sql/rowenc/keyside/BUILD.bazel @@ -0,0 +1,47 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "keyside", + srcs = [ + "array.go", + "decode.go", + "doc.go", + "encode.go", + ], + importpath = "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside", + visibility = ["//visibility:public"], + deps = [ + "//pkg/geo", + "//pkg/geo/geopb", + "//pkg/sql/sem/tree", + "//pkg/sql/types", + "//pkg/util/bitarray", + "//pkg/util/duration", + "//pkg/util/encoding", + "//pkg/util/errorutil/unimplemented", + "//pkg/util/ipaddr", + "//pkg/util/timetz", + "//pkg/util/timeutil/pgdate", + "//pkg/util/uuid", + "@com_github_cockroachdb_apd_v3//:apd", + "@com_github_cockroachdb_errors//:errors", + "@com_github_lib_pq//oid", + ], +) + +go_test( + name = "keyside_test", + srcs = ["keyside_test.go"], + deps = [ + ":keyside", + "//pkg/settings/cluster", + "//pkg/sql/randgen", + "//pkg/sql/sem/tree", + "//pkg/sql/types", + "//pkg/util/encoding", + "//pkg/util/timeutil", + "@com_github_leanovate_gopter//:gopter", + "@com_github_leanovate_gopter//prop", + "@com_github_stretchr_testify//require", + ], +) diff --git a/pkg/sql/rowenc/keyside/array.go b/pkg/sql/rowenc/keyside/array.go new file mode 100644 index 000000000000..f68b88b79c5f --- /dev/null +++ b/pkg/sql/rowenc/keyside/array.go @@ -0,0 +1,80 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keyside + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/errors" +) + +// encodeArrayKey generates an ordered key encoding of an array. +// The encoding format for an array [a, b] is as follows: +// [arrayMarker, enc(a), enc(b), terminator]. +// The terminator is guaranteed to be less than all encoded values, +// so two arrays with the same prefix but different lengths will sort +// correctly. The key difference is that NULL values need to be encoded +// differently, because the standard NULL encoding conflicts with the +// terminator byte. This NULL value is chosen to be larger than the +// terminator but less than all existing encoded values. +func encodeArrayKey(b []byte, array *tree.DArray, dir encoding.Direction) ([]byte, error) { + var err error + b = encoding.EncodeArrayKeyMarker(b, dir) + for _, elem := range array.Array { + if elem == tree.DNull { + b = encoding.EncodeNullWithinArrayKey(b, dir) + } else { + b, err = Encode(b, elem, dir) + if err != nil { + return nil, err + } + } + } + return encoding.EncodeArrayKeyTerminator(b, dir), nil +} + +// decodeArrayKey decodes an array key generated by encodeArrayKey. +func decodeArrayKey( + a *tree.DatumAlloc, t *types.T, buf []byte, dir encoding.Direction, +) (tree.Datum, []byte, error) { + var err error + buf, err = encoding.ValidateAndConsumeArrayKeyMarker(buf, dir) + if err != nil { + return nil, nil, err + } + + result := tree.NewDArray(t.ArrayContents()) + + for { + if len(buf) == 0 { + return nil, nil, errors.AssertionFailedf("invalid array encoding (unterminated)") + } + if encoding.IsArrayKeyDone(buf, dir) { + buf = buf[1:] + break + } + var d tree.Datum + if encoding.IsNextByteArrayEncodedNull(buf, dir) { + d = tree.DNull + buf = buf[1:] + } else { + d, buf, err = Decode(a, t.ArrayContents(), buf, dir) + if err != nil { + return nil, nil, err + } + } + if err := result.Append(d); err != nil { + return nil, nil, err + } + } + return result, buf, nil +} diff --git a/pkg/sql/rowenc/keyside/decode.go b/pkg/sql/rowenc/keyside/decode.go new file mode 100644 index 000000000000..9057ddaff062 --- /dev/null +++ b/pkg/sql/rowenc/keyside/decode.go @@ -0,0 +1,284 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keyside + +import ( + "time" + + "github.com/cockroachdb/apd/v3" + "github.com/cockroachdb/cockroach/pkg/geo" + "github.com/cockroachdb/cockroach/pkg/geo/geopb" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/bitarray" + "github.com/cockroachdb/cockroach/pkg/util/duration" + "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/cockroach/pkg/util/ipaddr" + "github.com/cockroachdb/cockroach/pkg/util/timetz" + "github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate" + "github.com/cockroachdb/cockroach/pkg/util/uuid" + "github.com/cockroachdb/errors" + "github.com/lib/pq/oid" +) + +// Decode decodes a value encoded by Encode from a key. +func Decode( + a *tree.DatumAlloc, valType *types.T, key []byte, dir encoding.Direction, +) (_ tree.Datum, remainingKey []byte, _ error) { + if (dir != encoding.Ascending) && (dir != encoding.Descending) { + return nil, nil, errors.Errorf("invalid direction: %d", dir) + } + var isNull bool + if key, isNull = encoding.DecodeIfNull(key); isNull { + return tree.DNull, key, nil + } + var rkey []byte + var err error + + switch valType.Family() { + case types.ArrayFamily: + return decodeArrayKey(a, valType, key, dir) + case types.BitFamily: + var r bitarray.BitArray + if dir == encoding.Ascending { + rkey, r, err = encoding.DecodeBitArrayAscending(key) + } else { + rkey, r, err = encoding.DecodeBitArrayDescending(key) + } + return a.NewDBitArray(tree.DBitArray{BitArray: r}), rkey, err + case types.BoolFamily: + var i int64 + if dir == encoding.Ascending { + rkey, i, err = encoding.DecodeVarintAscending(key) + } else { + rkey, i, err = encoding.DecodeVarintDescending(key) + } + // No need to chunk allocate DBool as MakeDBool returns either + // tree.DBoolTrue or tree.DBoolFalse. + return tree.MakeDBool(tree.DBool(i != 0)), rkey, err + case types.IntFamily: + var i int64 + if dir == encoding.Ascending { + rkey, i, err = encoding.DecodeVarintAscending(key) + } else { + rkey, i, err = encoding.DecodeVarintDescending(key) + } + return a.NewDInt(tree.DInt(i)), rkey, err + case types.FloatFamily: + var f float64 + if dir == encoding.Ascending { + rkey, f, err = encoding.DecodeFloatAscending(key) + } else { + rkey, f, err = encoding.DecodeFloatDescending(key) + } + return a.NewDFloat(tree.DFloat(f)), rkey, err + case types.DecimalFamily: + var d apd.Decimal + if dir == encoding.Ascending { + rkey, d, err = encoding.DecodeDecimalAscending(key, nil) + } else { + rkey, d, err = encoding.DecodeDecimalDescending(key, nil) + } + dd := a.NewDDecimal(tree.DDecimal{Decimal: d}) + return dd, rkey, err + case types.StringFamily: + var r string + if dir == encoding.Ascending { + // Perform a deep copy so that r would never reference the key's + // memory which might keep the BatchResponse alive. + rkey, r, err = encoding.DecodeUnsafeStringAscendingDeepCopy(key, nil) + } else { + rkey, r, err = encoding.DecodeUnsafeStringDescending(key, nil) + } + if valType.Oid() == oid.T_name { + return a.NewDName(tree.DString(r)), rkey, err + } + return a.NewDString(tree.DString(r)), rkey, err + case types.CollatedStringFamily: + var r string + if dir == encoding.Ascending { + // Perform a deep copy so that r would never reference the key's + // memory which might keep the BatchResponse alive. + rkey, r, err = encoding.DecodeUnsafeStringAscendingDeepCopy(key, nil) + } else { + rkey, r, err = encoding.DecodeUnsafeStringDescending(key, nil) + } + if err != nil { + return nil, nil, err + } + d, err := a.NewDCollatedString(r, valType.Locale()) + return d, rkey, err + case types.JsonFamily: + // Don't attempt to decode the JSON value. Instead, just return the + // remaining bytes of the key. + jsonLen, err := encoding.PeekLength(key) + if err != nil { + return nil, nil, err + } + return tree.DNull, key[jsonLen:], nil + case types.BytesFamily: + var r []byte + if dir == encoding.Ascending { + // No need to perform the deep copy since converting to string below + // will do that for us. + rkey, r, err = encoding.DecodeBytesAscending(key, nil) + } else { + rkey, r, err = encoding.DecodeBytesDescending(key, nil) + } + return a.NewDBytes(tree.DBytes(r)), rkey, err + case types.VoidFamily: + rkey, err = encoding.DecodeVoidAscendingOrDescending(key) + return a.NewDVoid(), rkey, err + case types.Box2DFamily: + var r geopb.BoundingBox + if dir == encoding.Ascending { + rkey, r, err = encoding.DecodeBox2DAscending(key) + } else { + rkey, r, err = encoding.DecodeBox2DDescending(key) + } + return a.NewDBox2D(tree.DBox2D{ + CartesianBoundingBox: geo.CartesianBoundingBox{BoundingBox: r}, + }), rkey, err + case types.GeographyFamily: + g := a.NewDGeographyEmpty() + so := g.Geography.SpatialObjectRef() + if dir == encoding.Ascending { + rkey, err = encoding.DecodeGeoAscending(key, so) + } else { + rkey, err = encoding.DecodeGeoDescending(key, so) + } + a.DoneInitNewDGeo(so) + return g, rkey, err + case types.GeometryFamily: + g := a.NewDGeometryEmpty() + so := g.Geometry.SpatialObjectRef() + if dir == encoding.Ascending { + rkey, err = encoding.DecodeGeoAscending(key, so) + } else { + rkey, err = encoding.DecodeGeoDescending(key, so) + } + a.DoneInitNewDGeo(so) + return g, rkey, err + case types.DateFamily: + var t int64 + if dir == encoding.Ascending { + rkey, t, err = encoding.DecodeVarintAscending(key) + } else { + rkey, t, err = encoding.DecodeVarintDescending(key) + } + return a.NewDDate(tree.MakeDDate(pgdate.MakeCompatibleDateFromDisk(t))), rkey, err + case types.TimeFamily: + var t int64 + if dir == encoding.Ascending { + rkey, t, err = encoding.DecodeVarintAscending(key) + } else { + rkey, t, err = encoding.DecodeVarintDescending(key) + } + return a.NewDTime(tree.DTime(t)), rkey, err + case types.TimeTZFamily: + var t timetz.TimeTZ + if dir == encoding.Ascending { + rkey, t, err = encoding.DecodeTimeTZAscending(key) + } else { + rkey, t, err = encoding.DecodeTimeTZDescending(key) + } + return a.NewDTimeTZ(tree.DTimeTZ{TimeTZ: t}), rkey, err + case types.TimestampFamily: + var t time.Time + if dir == encoding.Ascending { + rkey, t, err = encoding.DecodeTimeAscending(key) + } else { + rkey, t, err = encoding.DecodeTimeDescending(key) + } + return a.NewDTimestamp(tree.DTimestamp{Time: t}), rkey, err + case types.TimestampTZFamily: + var t time.Time + if dir == encoding.Ascending { + rkey, t, err = encoding.DecodeTimeAscending(key) + } else { + rkey, t, err = encoding.DecodeTimeDescending(key) + } + return a.NewDTimestampTZ(tree.DTimestampTZ{Time: t}), rkey, err + case types.IntervalFamily: + var d duration.Duration + if dir == encoding.Ascending { + rkey, d, err = encoding.DecodeDurationAscending(key) + } else { + rkey, d, err = encoding.DecodeDurationDescending(key) + } + return a.NewDInterval(tree.DInterval{Duration: d}), rkey, err + case types.UuidFamily: + var r []byte + if dir == encoding.Ascending { + // No need to perform the deep copy since converting to UUID below + // will do that for us. + rkey, r, err = encoding.DecodeBytesAscending(key, nil) + } else { + rkey, r, err = encoding.DecodeBytesDescending(key, nil) + } + if err != nil { + return nil, nil, err + } + u, err := uuid.FromBytes(r) + return a.NewDUuid(tree.DUuid{UUID: u}), rkey, err + case types.INetFamily: + var r []byte + if dir == encoding.Ascending { + // No need to perform the deep copy since converting to IPAddr below + // will do that for us. + rkey, r, err = encoding.DecodeBytesAscending(key, nil) + } else { + rkey, r, err = encoding.DecodeBytesDescending(key, nil) + } + if err != nil { + return nil, nil, err + } + var ipAddr ipaddr.IPAddr + _, err := ipAddr.FromBuffer(r) + return a.NewDIPAddr(tree.DIPAddr{IPAddr: ipAddr}), rkey, err + case types.OidFamily: + var i int64 + if dir == encoding.Ascending { + rkey, i, err = encoding.DecodeVarintAscending(key) + } else { + rkey, i, err = encoding.DecodeVarintDescending(key) + } + return a.NewDOid(tree.MakeDOid(tree.DInt(i))), rkey, err + case types.EnumFamily: + var r []byte + if dir == encoding.Ascending { + // No need to perform the deep copy since we only need r for a brief + // period of time. + rkey, r, err = encoding.DecodeBytesAscending(key, nil) + } else { + rkey, r, err = encoding.DecodeBytesDescending(key, nil) + } + if err != nil { + return nil, nil, err + } + phys, log, err := tree.GetEnumComponentsFromPhysicalRep(valType, r) + if err != nil { + return nil, nil, err + } + return a.NewDEnum(tree.DEnum{EnumTyp: valType, PhysicalRep: phys, LogicalRep: log}), rkey, nil + default: + return nil, nil, errors.Errorf("unable to decode table key: %s", valType) + } +} + +// Skip skips one value of in a key, returning the remainder of the key. +func Skip(key []byte) (remainingKey []byte, _ error) { + skipLen, err := encoding.PeekLength(key) + if err != nil { + return nil, err + } + return key[skipLen:], nil +} diff --git a/pkg/sql/rowenc/keyside/doc.go b/pkg/sql/rowenc/keyside/doc.go new file mode 100644 index 000000000000..7a9226b9a69f --- /dev/null +++ b/pkg/sql/rowenc/keyside/doc.go @@ -0,0 +1,18 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Package keyside contains low-level primitives used to encode/decode SQL +// values into/from KV Keys (see roachpb.Key). +// +// Low-level here means that these primitives do not operate with table or index +// descriptors. +// +// See also: docs/tech-notes/encoding.md. +package keyside diff --git a/pkg/sql/rowenc/keyside/encode.go b/pkg/sql/rowenc/keyside/encode.go new file mode 100644 index 000000000000..5df3b6d1c23d --- /dev/null +++ b/pkg/sql/rowenc/keyside/encode.go @@ -0,0 +1,177 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keyside + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" + "github.com/cockroachdb/errors" +) + +// Encode encodes `val` using key encoding and appends it to `b`, returning the +// new buffer. It is suitable to generate index/lookup keys in KV. +// +// The encoded value is guaranteed to be lexicographically sortable, but not +// guaranteed to be roundtrippable during decoding: some values like decimals +// or collated strings have composite encoding where part of their value lies in +// the value part of the key/value pair. +// +// See also: docs/tech-notes/encoding.md, valueside.Encode(). +func Encode(b []byte, val tree.Datum, dir encoding.Direction) ([]byte, error) { + if (dir != encoding.Ascending) && (dir != encoding.Descending) { + return nil, errors.Errorf("invalid direction: %d", dir) + } + + if val == tree.DNull { + if dir == encoding.Ascending { + return encoding.EncodeNullAscending(b), nil + } + return encoding.EncodeNullDescending(b), nil + } + + switch t := tree.UnwrapDatum(nil, val).(type) { + case *tree.DBool: + var x int64 + if *t { + x = 1 + } else { + x = 0 + } + if dir == encoding.Ascending { + return encoding.EncodeVarintAscending(b, x), nil + } + return encoding.EncodeVarintDescending(b, x), nil + case *tree.DInt: + if dir == encoding.Ascending { + return encoding.EncodeVarintAscending(b, int64(*t)), nil + } + return encoding.EncodeVarintDescending(b, int64(*t)), nil + case *tree.DFloat: + if dir == encoding.Ascending { + return encoding.EncodeFloatAscending(b, float64(*t)), nil + } + return encoding.EncodeFloatDescending(b, float64(*t)), nil + case *tree.DDecimal: + if dir == encoding.Ascending { + return encoding.EncodeDecimalAscending(b, &t.Decimal), nil + } + return encoding.EncodeDecimalDescending(b, &t.Decimal), nil + case *tree.DString: + if dir == encoding.Ascending { + return encoding.EncodeStringAscending(b, string(*t)), nil + } + return encoding.EncodeStringDescending(b, string(*t)), nil + case *tree.DBytes: + if dir == encoding.Ascending { + return encoding.EncodeStringAscending(b, string(*t)), nil + } + return encoding.EncodeStringDescending(b, string(*t)), nil + case *tree.DVoid: + return encoding.EncodeVoidAscendingOrDescending(b), nil + case *tree.DBox2D: + if dir == encoding.Ascending { + return encoding.EncodeBox2DAscending(b, t.CartesianBoundingBox.BoundingBox) + } + return encoding.EncodeBox2DDescending(b, t.CartesianBoundingBox.BoundingBox) + case *tree.DGeography: + so := t.Geography.SpatialObjectRef() + if dir == encoding.Ascending { + return encoding.EncodeGeoAscending(b, t.Geography.SpaceCurveIndex(), so) + } + return encoding.EncodeGeoDescending(b, t.Geography.SpaceCurveIndex(), so) + case *tree.DGeometry: + so := t.Geometry.SpatialObjectRef() + spaceCurveIndex, err := t.Geometry.SpaceCurveIndex() + if err != nil { + return nil, err + } + if dir == encoding.Ascending { + return encoding.EncodeGeoAscending(b, spaceCurveIndex, so) + } + return encoding.EncodeGeoDescending(b, spaceCurveIndex, so) + case *tree.DDate: + if dir == encoding.Ascending { + return encoding.EncodeVarintAscending(b, t.UnixEpochDaysWithOrig()), nil + } + return encoding.EncodeVarintDescending(b, t.UnixEpochDaysWithOrig()), nil + case *tree.DTime: + if dir == encoding.Ascending { + return encoding.EncodeVarintAscending(b, int64(*t)), nil + } + return encoding.EncodeVarintDescending(b, int64(*t)), nil + case *tree.DTimestamp: + if dir == encoding.Ascending { + return encoding.EncodeTimeAscending(b, t.Time), nil + } + return encoding.EncodeTimeDescending(b, t.Time), nil + case *tree.DTimestampTZ: + if dir == encoding.Ascending { + return encoding.EncodeTimeAscending(b, t.Time), nil + } + return encoding.EncodeTimeDescending(b, t.Time), nil + case *tree.DTimeTZ: + if dir == encoding.Ascending { + return encoding.EncodeTimeTZAscending(b, t.TimeTZ), nil + } + return encoding.EncodeTimeTZDescending(b, t.TimeTZ), nil + case *tree.DInterval: + if dir == encoding.Ascending { + return encoding.EncodeDurationAscending(b, t.Duration) + } + return encoding.EncodeDurationDescending(b, t.Duration) + case *tree.DUuid: + if dir == encoding.Ascending { + return encoding.EncodeBytesAscending(b, t.GetBytes()), nil + } + return encoding.EncodeBytesDescending(b, t.GetBytes()), nil + case *tree.DIPAddr: + data := t.ToBuffer(nil) + if dir == encoding.Ascending { + return encoding.EncodeBytesAscending(b, data), nil + } + return encoding.EncodeBytesDescending(b, data), nil + case *tree.DTuple: + for _, datum := range t.D { + var err error + b, err = Encode(b, datum, dir) + if err != nil { + return nil, err + } + } + return b, nil + case *tree.DArray: + return encodeArrayKey(b, t, dir) + case *tree.DCollatedString: + if dir == encoding.Ascending { + return encoding.EncodeBytesAscending(b, t.Key), nil + } + return encoding.EncodeBytesDescending(b, t.Key), nil + case *tree.DBitArray: + if dir == encoding.Ascending { + return encoding.EncodeBitArrayAscending(b, t.BitArray), nil + } + return encoding.EncodeBitArrayDescending(b, t.BitArray), nil + case *tree.DOid: + if dir == encoding.Ascending { + return encoding.EncodeVarintAscending(b, int64(t.DInt)), nil + } + return encoding.EncodeVarintDescending(b, int64(t.DInt)), nil + case *tree.DEnum: + if dir == encoding.Ascending { + return encoding.EncodeBytesAscending(b, t.PhysicalRep), nil + } + return encoding.EncodeBytesDescending(b, t.PhysicalRep), nil + case *tree.DJSON: + return nil, unimplemented.NewWithIssue(35706, "unable to encode JSON as a table key") + } + return nil, errors.Errorf("unable to encode table key: %T", val) +} diff --git a/pkg/sql/rowenc/column_type_encoding_test.go b/pkg/sql/rowenc/keyside/keyside_test.go similarity index 61% rename from pkg/sql/rowenc/column_type_encoding_test.go rename to pkg/sql/rowenc/keyside/keyside_test.go index c44e178a0e3f..8744e292a2aa 100644 --- a/pkg/sql/rowenc/column_type_encoding_test.go +++ b/pkg/sql/rowenc/keyside/keyside_test.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Cockroach Authors. +// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package rowenc_test +package keyside_test import ( "bytes" @@ -18,9 +18,8 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/randgen" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -30,104 +29,18 @@ import ( "github.com/stretchr/testify/require" ) -func genColumnType() gopter.Gen { - return func(genParams *gopter.GenParameters) *gopter.GenResult { - columnType := randgen.RandColumnType(genParams.Rng) - return gopter.NewGenResult(columnType, gopter.NoShrinker) - } -} - -func genRandomArrayType() gopter.Gen { - return func(genParams *gopter.GenParameters) *gopter.GenResult { - arrType := randgen.RandArrayType(genParams.Rng) - return gopter.NewGenResult(arrType, gopter.NoShrinker) - } -} - -func genDatum() gopter.Gen { - return func(genParams *gopter.GenParameters) *gopter.GenResult { - return gopter.NewGenResult(randgen.RandDatum(genParams.Rng, randgen.RandColumnType(genParams.Rng), - false), gopter.NoShrinker) - } -} - -func genDatumWithType(columnType interface{}) gopter.Gen { - return func(genParams *gopter.GenParameters) *gopter.GenResult { - datum := randgen.RandDatum(genParams.Rng, columnType.(*types.T), false) - return gopter.NewGenResult(datum, gopter.NoShrinker) - } -} - -func genArrayDatumWithType(arrTyp interface{}) gopter.Gen { - return func(genParams *gopter.GenParameters) *gopter.GenResult { - // Mark the array contents to have a 1 in 10 chance of being null. - datum := randgen.RandArray(genParams.Rng, arrTyp.(*types.T), 10) - return gopter.NewGenResult(datum, gopter.NoShrinker) - } -} - -func genEncodingDirection() gopter.Gen { - return func(genParams *gopter.GenParameters) *gopter.GenResult { - return gopter.NewGenResult( - encoding.Direction((genParams.Rng.Int()%int(encoding.Descending))+1), - gopter.NoShrinker) - } -} - -func hasKeyEncoding(typ *types.T) bool { - // Only some types are round-trip key encodable. - switch typ.Family() { - case types.JsonFamily, types.CollatedStringFamily, types.TupleFamily, types.DecimalFamily, - types.GeographyFamily, types.GeometryFamily: - return false - case types.ArrayFamily: - return hasKeyEncoding(typ.ArrayContents()) - } - return true -} - -func TestEncodeTableValue(t *testing.T) { - a := &rowenc.DatumAlloc{} - ctx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) - parameters := gopter.DefaultTestParameters() - parameters.MinSuccessfulTests = 10000 - properties := gopter.NewProperties(parameters) - var scratch []byte - properties.Property("roundtrip", prop.ForAll( - func(d tree.Datum) string { - b, err := rowenc.EncodeTableValue(nil, 0, d, scratch) - if err != nil { - return "error: " + err.Error() - } - newD, leftoverBytes, err := rowenc.DecodeTableValue(a, d.ResolvedType(), b) - if len(leftoverBytes) > 0 { - return "Leftover bytes" - } - if err != nil { - return "error: " + err.Error() - } - if newD.Compare(ctx, d) != 0 { - return "unequal" - } - return "" - }, - genDatum(), - )) - properties.TestingRun(t) -} - -func TestEncodeTableKey(t *testing.T) { - a := &rowenc.DatumAlloc{} +func TestEncodeDecode(t *testing.T) { + a := &tree.DatumAlloc{} ctx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) parameters := gopter.DefaultTestParameters() parameters.MinSuccessfulTests = 10000 properties := gopter.NewProperties(parameters) roundtripDatum := func(d tree.Datum, dir encoding.Direction) string { - b, err := rowenc.EncodeTableKey(nil, d, dir) + b, err := keyside.Encode(nil, d, dir) if err != nil { return "error: " + err.Error() } - newD, leftoverBytes, err := rowenc.DecodeTableKey(a, d.ResolvedType(), b, dir) + newD, leftoverBytes, err := keyside.Decode(a, d.ResolvedType(), b, dir) if len(leftoverBytes) > 0 { return "Leftover bytes" } @@ -160,11 +73,11 @@ func TestEncodeTableKey(t *testing.T) { generateAndCompareDatums := func(datums []tree.Datum, dir encoding.Direction) string { d1 := datums[0] d2 := datums[1] - b1, err := rowenc.EncodeTableKey(nil, d1, dir) + b1, err := keyside.Encode(nil, d1, dir) if err != nil { return "error: " + err.Error() } - b2, err := rowenc.EncodeTableKey(nil, d2, dir) + b2, err := keyside.Encode(nil, d2, dir) if err != nil { return "error: " + err.Error() } @@ -238,17 +151,17 @@ func TestEncodeTableKey(t *testing.T) { properties.TestingRun(t) } -func TestSkipTableKey(t *testing.T) { +func TestSkip(t *testing.T) { parameters := gopter.DefaultTestParameters() parameters.MinSuccessfulTests = 10000 properties := gopter.NewProperties(parameters) properties.Property("correctness", prop.ForAll( func(d tree.Datum, dir encoding.Direction) string { - b, err := rowenc.EncodeTableKey(nil, d, dir) + b, err := keyside.Encode(nil, d, dir) if err != nil { return "error: " + err.Error() } - res, err := rowenc.SkipTableKey(b) + res, err := keyside.Skip(b) if err != nil { return "error: " + err.Error() } @@ -265,53 +178,19 @@ func TestSkipTableKey(t *testing.T) { properties.TestingRun(t) } -func TestMarshalColumnValueRoundtrip(t *testing.T) { - a := &rowenc.DatumAlloc{} - ctx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) - parameters := gopter.DefaultTestParameters() - parameters.MinSuccessfulTests = 10000 - properties := gopter.NewProperties(parameters) - - properties.Property("roundtrip", - prop.ForAll( - func(typ *types.T) string { - d, ok := genDatumWithType(typ).Sample() - if !ok { - return "error generating datum" - } - datum := d.(tree.Datum) - value, err := rowenc.MarshalColumnTypeValue("testcol", typ, datum) - if err != nil { - return "error marshaling: " + err.Error() - } - outDatum, err := rowenc.UnmarshalColumnValue(a, typ, value) - if err != nil { - return "error unmarshaling: " + err.Error() - } - if datum.Compare(ctx, outDatum) != 0 { - return fmt.Sprintf("datum didn't roundtrip.\ninput: %v\noutput: %v", datum, outDatum) - } - return "" - }, - genColumnType(), - ), - ) - properties.TestingRun(t) -} - -// TestDecodeTableKeyOutOfRangeTimestamp deliberately tests out of range timestamps +// TestDecodeOutOfRangeTimestamp deliberately tests out of range timestamps // can still be decoded from disk. See #46973. -func TestDecodeTableKeyOutOfRangeTimestamp(t *testing.T) { +func TestDecodeOutOfRangeTimestamp(t *testing.T) { for _, d := range []tree.Datum{ &tree.DTimestamp{Time: timeutil.Unix(-9223372036854775808, 0).In(time.UTC)}, &tree.DTimestampTZ{Time: timeutil.Unix(-9223372036854775808, 0).In(time.UTC)}, } { for _, dir := range []encoding.Direction{encoding.Ascending, encoding.Descending} { t.Run(fmt.Sprintf("%s/direction:%d", d.String(), dir), func(t *testing.T) { - encoded, err := rowenc.EncodeTableKey([]byte{}, d, dir) + encoded, err := keyside.Encode([]byte{}, d, dir) require.NoError(t, err) - a := &rowenc.DatumAlloc{} - decoded, _, err := rowenc.DecodeTableKey(a, d.ResolvedType(), encoded, dir) + a := &tree.DatumAlloc{} + decoded, _, err := keyside.Decode(a, d.ResolvedType(), encoded, dir) require.NoError(t, err) require.Equal(t, d, decoded) }) @@ -319,41 +198,51 @@ func TestDecodeTableKeyOutOfRangeTimestamp(t *testing.T) { } } -// TestDecodeTableValueOutOfRangeTimestamp deliberately tests out of range timestamps -// can still be decoded from disk. See #46973. -func TestDecodeTableValueOutOfRangeTimestamp(t *testing.T) { - for _, d := range []tree.Datum{ - &tree.DTimestamp{Time: timeutil.Unix(-9223372036854775808, 0).In(time.UTC)}, - &tree.DTimestampTZ{Time: timeutil.Unix(-9223372036854775808, 0).In(time.UTC)}, - } { - t.Run(d.String(), func(t *testing.T) { - var b []byte - colID := descpb.ColumnID(1) - encoded, err := rowenc.EncodeTableValue(b, colID, d, []byte{}) - require.NoError(t, err) - a := &rowenc.DatumAlloc{} - decoded, _, err := rowenc.DecodeTableValue(a, d.ResolvedType(), encoded) - require.NoError(t, err) - require.Equal(t, d, decoded) - }) +func genColumnType() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + columnType := randgen.RandColumnType(genParams.Rng) + return gopter.NewGenResult(columnType, gopter.NoShrinker) } } -// This test ensures that decoding a tuple value with a specific, labeled tuple -// type preserves the labels. -func TestDecodeTupleValueWithType(t *testing.T) { - tupleType := types.MakeLabeledTuple([]*types.T{types.Int, types.String}, []string{"a", "b"}) - datum := tree.NewDTuple(tupleType, tree.NewDInt(tree.DInt(1)), tree.NewDString("foo")) - buf, err := rowenc.EncodeTableValue(nil, descpb.ColumnID(encoding.NoColumnID), datum, nil) - if err != nil { - t.Fatal(err) +func genRandomArrayType() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + arrType := randgen.RandArrayType(genParams.Rng) + return gopter.NewGenResult(arrType, gopter.NoShrinker) } - da := rowenc.DatumAlloc{} - var decoded tree.Datum - decoded, _, err = rowenc.DecodeTableValue(&da, tupleType, buf) - if err != nil { - t.Fatal(err) +} + +func genDatumWithType(columnType interface{}) gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + datum := randgen.RandDatum(genParams.Rng, columnType.(*types.T), false) + return gopter.NewGenResult(datum, gopter.NoShrinker) + } +} + +func genArrayDatumWithType(arrTyp interface{}) gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + // Mark the array contents to have a 1 in 10 chance of being null. + datum := randgen.RandArray(genParams.Rng, arrTyp.(*types.T), 10) + return gopter.NewGenResult(datum, gopter.NoShrinker) + } +} + +func genEncodingDirection() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + return gopter.NewGenResult( + encoding.Direction((genParams.Rng.Int()%int(encoding.Descending))+1), + gopter.NoShrinker) } +} - require.Equal(t, decoded, datum) +func hasKeyEncoding(typ *types.T) bool { + // Only some types are round-trip key encodable. + switch typ.Family() { + case types.JsonFamily, types.CollatedStringFamily, types.TupleFamily, types.DecimalFamily, + types.GeographyFamily, types.GeometryFamily: + return false + case types.ArrayFamily: + return hasKeyEncoding(typ.ArrayContents()) + } + return true } diff --git a/pkg/sql/rowenc/partition.go b/pkg/sql/rowenc/partition.go index 39211e31711a..530ff85d04e0 100644 --- a/pkg/sql/rowenc/partition.go +++ b/pkg/sql/rowenc/partition.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/errors" @@ -102,7 +103,7 @@ func (t *PartitionTuple) String() string { // DEFAULT) is valid but (1, DEFAULT, 2) is not. Similarly for range // partitioning and MINVALUE/MAXVALUE. func DecodePartitionTuple( - a *DatumAlloc, + a *tree.DatumAlloc, codec keys.SQLCodec, tableDesc catalog.TableDescriptor, index catalog.Index, @@ -142,7 +143,7 @@ func DecodePartitionTuple( t.SpecialCount++ } else { var datum tree.Datum - datum, valueEncBuf, err = DecodeTableValue(a, col.GetType(), valueEncBuf) + datum, valueEncBuf, err = valueside.Decode(a, col.GetType(), valueEncBuf) if err != nil { return nil, nil, errors.Wrapf(err, "decoding") } @@ -163,9 +164,8 @@ func DecodePartitionTuple( colMap.Set(index.GetKeyColumnID(i), i) } - indexKeyPrefix := MakeIndexKeyPrefix(codec, tableDesc, index.GetID()) - key, _, err := EncodePartialIndexKey( - tableDesc, index, len(allDatums), colMap, allDatums, indexKeyPrefix) + indexKeyPrefix := MakeIndexKeyPrefix(codec, tableDesc.GetID(), index.GetID()) + key, _, err := EncodePartialIndexKey(index, len(allDatums), colMap, allDatums, indexKeyPrefix) if err != nil { return nil, nil, err } diff --git a/pkg/sql/rowenc/valueside/BUILD.bazel b/pkg/sql/rowenc/valueside/BUILD.bazel new file mode 100644 index 000000000000..432a1fbcc908 --- /dev/null +++ b/pkg/sql/rowenc/valueside/BUILD.bazel @@ -0,0 +1,54 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "valueside", + srcs = [ + "array.go", + "decode.go", + "doc.go", + "encode.go", + "legacy.go", + "tuple.go", + ], + importpath = "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside", + visibility = ["//visibility:public"], + deps = [ + "//pkg/geo", + "//pkg/roachpb:with-mocks", + "//pkg/sql/catalog", + "//pkg/sql/catalog/descpb", + "//pkg/sql/lex", + "//pkg/sql/sem/tree", + "//pkg/sql/types", + "//pkg/util/encoding", + "//pkg/util/ipaddr", + "//pkg/util/json", + "//pkg/util/timeutil/pgdate", + "//pkg/util/uuid", + "@com_github_cockroachdb_errors//:errors", + "@com_github_lib_pq//oid", + ], +) + +go_test( + name = "valueside_test", + srcs = [ + "array_test.go", + "valueside_test.go", + ], + embed = [":valueside"], + deps = [ + "//pkg/roachpb:with-mocks", + "//pkg/settings/cluster", + "//pkg/sql/randgen", + "//pkg/sql/sem/tree", + "//pkg/sql/types", + "//pkg/testutils", + "//pkg/util/timeofday", + "//pkg/util/timeutil", + "//pkg/util/timeutil/pgdate", + "@com_github_leanovate_gopter//:gopter", + "@com_github_leanovate_gopter//prop", + "@com_github_stretchr_testify//require", + ], +) diff --git a/pkg/sql/rowenc/valueside/array.go b/pkg/sql/rowenc/valueside/array.go new file mode 100644 index 000000000000..55d361762bed --- /dev/null +++ b/pkg/sql/rowenc/valueside/array.go @@ -0,0 +1,313 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package valueside + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/cockroach/pkg/util/json" + "github.com/cockroachdb/errors" +) + +// encodeArray produces the value encoding for an array. +func encodeArray(d *tree.DArray, scratch []byte) ([]byte, error) { + if err := d.Validate(); err != nil { + return scratch, err + } + scratch = scratch[0:0] + elementType, err := DatumTypeToArrayElementEncodingType(d.ParamTyp) + + if err != nil { + return nil, err + } + header := arrayHeader{ + hasNulls: d.HasNulls, + // TODO(justin): support multiple dimensions. + numDimensions: 1, + elementType: elementType, + length: uint64(d.Len()), + // We don't encode the NULL bitmap in this function because we do it in lockstep with the + // main data. + } + scratch, err = encodeArrayHeader(header, scratch) + if err != nil { + return nil, err + } + nullBitmapStart := len(scratch) + if d.HasNulls { + for i := 0; i < numBytesInBitArray(d.Len()); i++ { + scratch = append(scratch, 0) + } + } + for i, e := range d.Array { + var err error + if d.HasNulls && e == tree.DNull { + setBit(scratch[nullBitmapStart:], i) + } else { + scratch, err = encodeArrayElement(scratch, e) + if err != nil { + return nil, err + } + } + } + return scratch, nil +} + +// decodeArray decodes the value encoding for an array. +func decodeArray(a *tree.DatumAlloc, elementType *types.T, b []byte) (tree.Datum, []byte, error) { + header, b, err := decodeArrayHeader(b) + if err != nil { + return nil, b, err + } + result := tree.DArray{ + Array: make(tree.Datums, header.length), + ParamTyp: elementType, + } + var val tree.Datum + for i := uint64(0); i < header.length; i++ { + if header.isNull(i) { + result.Array[i] = tree.DNull + result.HasNulls = true + } else { + result.HasNonNulls = true + val, b, err = DecodeUntaggedDatum(a, elementType, b) + if err != nil { + return nil, b, err + } + result.Array[i] = val + } + } + return &result, b, nil +} + +// arrayHeader is a parameter passing struct between +// encodeArray/decodeArray and encodeArrayHeader/decodeArrayHeader. +// +// It describes the important properties of an array that are useful +// for an efficient value encoding. +type arrayHeader struct { + // hasNulls is set if the array contains any NULL values. + hasNulls bool + // numDimensions is the number of dimensions in the array. + numDimensions int + // elementType is the encoding type of the array elements. + elementType encoding.Type + // length is the total number of elements encoded. + length uint64 + // nullBitmap is a compact representation of which array indexes + // have NULL values. + nullBitmap []byte +} + +// isNull returns true iff the array element at the given index is +// NULL. +func (h arrayHeader) isNull(i uint64) bool { + return h.hasNulls && ((h.nullBitmap[i/8]>>(i%8))&1) == 1 +} + +// setBit sets the bit in the given bitmap at index idx to 1. It's used to +// construct the NULL bitmap within arrays. +func setBit(bitmap []byte, idx int) { + bitmap[idx/8] = bitmap[idx/8] | (1 << uint(idx%8)) +} + +// numBytesInBitArray returns the minimum number of bytes necessary to +// store the given number of bits. +func numBytesInBitArray(numBits int) int { + return (numBits + 7) / 8 +} + +// makeBitVec carves a bitmap (byte array intended to store bits) for +// the given number of bits out of its first argument. It returns the +// remainder of the first argument after the bitmap has been reserved +// into it. +func makeBitVec(src []byte, length int) (b, bitVec []byte) { + nullBitmapNumBytes := numBytesInBitArray(length) + return src[nullBitmapNumBytes:], src[:nullBitmapNumBytes] +} + +const hasNullFlag = 1 << 4 + +// encodeArrayHeader is used by encodeArray to encode the header +// at the beginning of the value encoding. +func encodeArrayHeader(h arrayHeader, buf []byte) ([]byte, error) { + // The header byte we append here is formatted as follows: + // * The low 4 bits encode the number of dimensions in the array. + // * The high 4 bits are flags, with the lowest representing whether the array + // contains NULLs, and the rest reserved. + headerByte := h.numDimensions + if h.hasNulls { + headerByte = headerByte | hasNullFlag + } + buf = append(buf, byte(headerByte)) + buf = encoding.EncodeValueTag(buf, encoding.NoColumnID, h.elementType) + buf = encoding.EncodeNonsortingUvarint(buf, h.length) + return buf, nil +} + +// decodeArrayHeader is used by decodeArray to decode the header at +// the beginning of the value encoding. +func decodeArrayHeader(b []byte) (arrayHeader, []byte, error) { + if len(b) < 2 { + return arrayHeader{}, b, errors.Errorf("buffer too small") + } + hasNulls := b[0]&hasNullFlag != 0 + b = b[1:] + _, dataOffset, _, encType, err := encoding.DecodeValueTag(b) + if err != nil { + return arrayHeader{}, b, err + } + b = b[dataOffset:] + b, _, length, err := encoding.DecodeNonsortingUvarint(b) + if err != nil { + return arrayHeader{}, b, err + } + nullBitmap := []byte(nil) + if hasNulls { + b, nullBitmap = makeBitVec(b, int(length)) + } + return arrayHeader{ + hasNulls: hasNulls, + // TODO(justin): support multiple dimensions. + numDimensions: 1, + elementType: encType, + length: length, + nullBitmap: nullBitmap, + }, b, nil +} + +// DatumTypeToArrayElementEncodingType decides an encoding type to +// place in the array header given a datum type. The element encoding +// type is then used to encode/decode array elements. +func DatumTypeToArrayElementEncodingType(t *types.T) (encoding.Type, error) { + switch t.Family() { + case types.IntFamily: + return encoding.Int, nil + case types.OidFamily: + return encoding.Int, nil + case types.FloatFamily: + return encoding.Float, nil + case types.Box2DFamily: + return encoding.Box2D, nil + case types.GeometryFamily: + return encoding.Geo, nil + case types.GeographyFamily: + return encoding.Geo, nil + case types.DecimalFamily: + return encoding.Decimal, nil + case types.BytesFamily, types.StringFamily, types.CollatedStringFamily, types.EnumFamily: + return encoding.Bytes, nil + case types.TimestampFamily, types.TimestampTZFamily: + return encoding.Time, nil + // Note: types.Date was incorrectly mapped to encoding.Time when arrays were + // first introduced. If any 1.1 users used date arrays, they would have been + // persisted with incorrect elementType values. + case types.DateFamily, types.TimeFamily: + return encoding.Int, nil + case types.TimeTZFamily: + return encoding.TimeTZ, nil + case types.IntervalFamily: + return encoding.Duration, nil + case types.BoolFamily: + return encoding.True, nil + case types.BitFamily: + return encoding.BitArray, nil + case types.UuidFamily: + return encoding.UUID, nil + case types.INetFamily: + return encoding.IPAddr, nil + case types.JsonFamily: + return encoding.JSON, nil + case types.TupleFamily: + return encoding.Tuple, nil + default: + return 0, errors.AssertionFailedf("no known encoding type for %s", t) + } +} +func checkElementType(paramType *types.T, elemType *types.T) error { + if paramType.Family() != elemType.Family() { + return errors.Errorf("type of array contents %s doesn't match column type %s", + paramType, elemType.Family()) + } + if paramType.Family() == types.CollatedStringFamily { + if paramType.Locale() != elemType.Locale() { + return errors.Errorf("locale of collated string array being inserted (%s) doesn't match locale of column type (%s)", + paramType.Locale(), elemType.Locale()) + } + } + return nil +} + +// encodeArrayElement appends the encoded form of one array element to +// the target byte buffer. +func encodeArrayElement(b []byte, d tree.Datum) ([]byte, error) { + switch t := tree.UnwrapDatum(nil, d).(type) { + case *tree.DInt: + return encoding.EncodeUntaggedIntValue(b, int64(*t)), nil + case *tree.DString: + bytes := []byte(*t) + b = encoding.EncodeUntaggedBytesValue(b, bytes) + return b, nil + case *tree.DBytes: + bytes := []byte(*t) + b = encoding.EncodeUntaggedBytesValue(b, bytes) + return b, nil + case *tree.DBitArray: + return encoding.EncodeUntaggedBitArrayValue(b, t.BitArray), nil + case *tree.DFloat: + return encoding.EncodeUntaggedFloatValue(b, float64(*t)), nil + case *tree.DBool: + return encoding.EncodeBoolValue(b, encoding.NoColumnID, bool(*t)), nil + case *tree.DDecimal: + return encoding.EncodeUntaggedDecimalValue(b, &t.Decimal), nil + case *tree.DDate: + return encoding.EncodeUntaggedIntValue(b, t.UnixEpochDaysWithOrig()), nil + case *tree.DBox2D: + return encoding.EncodeUntaggedBox2DValue(b, t.CartesianBoundingBox.BoundingBox) + case *tree.DGeography: + return encoding.EncodeUntaggedGeoValue(b, t.SpatialObjectRef()) + case *tree.DGeometry: + return encoding.EncodeUntaggedGeoValue(b, t.SpatialObjectRef()) + case *tree.DTime: + return encoding.EncodeUntaggedIntValue(b, int64(*t)), nil + case *tree.DTimeTZ: + return encoding.EncodeUntaggedTimeTZValue(b, t.TimeTZ), nil + case *tree.DTimestamp: + return encoding.EncodeUntaggedTimeValue(b, t.Time), nil + case *tree.DTimestampTZ: + return encoding.EncodeUntaggedTimeValue(b, t.Time), nil + case *tree.DInterval: + return encoding.EncodeUntaggedDurationValue(b, t.Duration), nil + case *tree.DUuid: + return encoding.EncodeUntaggedUUIDValue(b, t.UUID), nil + case *tree.DIPAddr: + return encoding.EncodeUntaggedIPAddrValue(b, t.IPAddr), nil + case *tree.DOid: + return encoding.EncodeUntaggedIntValue(b, int64(t.DInt)), nil + case *tree.DCollatedString: + return encoding.EncodeUntaggedBytesValue(b, []byte(t.Contents)), nil + case *tree.DOidWrapper: + return encodeArrayElement(b, t.Wrapped) + case *tree.DEnum: + return encoding.EncodeUntaggedBytesValue(b, t.PhysicalRep), nil + case *tree.DJSON: + encoded, err := json.EncodeJSON(nil, t.JSON) + if err != nil { + return nil, err + } + return encoding.EncodeUntaggedBytesValue(b, encoded), nil + case *tree.DTuple: + return encodeUntaggedTuple(t, b, encoding.NoColumnID, nil) + default: + return nil, errors.Errorf("don't know how to encode %s (%T)", d, d) + } +} diff --git a/pkg/sql/rowenc/valueside/array_test.go b/pkg/sql/rowenc/valueside/array_test.go new file mode 100644 index 000000000000..7704c961ed6a --- /dev/null +++ b/pkg/sql/rowenc/valueside/array_test.go @@ -0,0 +1,152 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package valueside + +import ( + "bytes" + "testing" + + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/types" +) + +type arrayEncodingTest struct { + name string + datum tree.DArray + encoding []byte +} + +func TestArrayEncoding(t *testing.T) { + tests := []arrayEncodingTest{ + { + "empty int array", + tree.DArray{ + ParamTyp: types.Int, + Array: tree.Datums{}, + }, + []byte{1, 3, 0}, + }, { + "single int array", + tree.DArray{ + ParamTyp: types.Int, + Array: tree.Datums{tree.NewDInt(1)}, + }, + []byte{1, 3, 1, 2}, + }, { + "multiple int array", + tree.DArray{ + ParamTyp: types.Int, + Array: tree.Datums{tree.NewDInt(1), tree.NewDInt(2), tree.NewDInt(3)}, + }, + []byte{1, 3, 3, 2, 4, 6}, + }, { + "string array", + tree.DArray{ + ParamTyp: types.String, + Array: tree.Datums{tree.NewDString("foo"), tree.NewDString("bar"), tree.NewDString("baz")}, + }, + []byte{1, 6, 3, 3, 102, 111, 111, 3, 98, 97, 114, 3, 98, 97, 122}, + }, { + "name array", + tree.DArray{ + ParamTyp: types.Name, + Array: tree.Datums{tree.NewDName("foo"), tree.NewDName("bar"), tree.NewDName("baz")}, + }, + []byte{1, 6, 3, 3, 102, 111, 111, 3, 98, 97, 114, 3, 98, 97, 122}, + }, + { + "bool array", + tree.DArray{ + ParamTyp: types.Bool, + Array: tree.Datums{tree.MakeDBool(true), tree.MakeDBool(false)}, + }, + []byte{1, 10, 2, 10, 11}, + }, { + "array containing a single null", + tree.DArray{ + ParamTyp: types.Int, + Array: tree.Datums{tree.DNull}, + HasNulls: true, + }, + []byte{17, 3, 1, 1}, + }, { + "array containing multiple nulls", + tree.DArray{ + ParamTyp: types.Int, + Array: tree.Datums{tree.NewDInt(1), tree.DNull, tree.DNull}, + HasNulls: true, + }, + []byte{17, 3, 3, 6, 2}, + }, { + "array whose NULL bitmap spans exactly one byte", + tree.DArray{ + ParamTyp: types.Int, + Array: tree.Datums{ + tree.NewDInt(1), tree.DNull, tree.DNull, tree.NewDInt(2), tree.NewDInt(3), + tree.NewDInt(4), tree.NewDInt(5), tree.NewDInt(6), + }, + HasNulls: true, + }, + []byte{17, 3, 8, 6, 2, 4, 6, 8, 10, 12}, + }, { + "array whose NULL bitmap spans more than one byte", + tree.DArray{ + ParamTyp: types.Int, + Array: tree.Datums{ + tree.NewDInt(1), tree.DNull, tree.DNull, tree.NewDInt(2), tree.NewDInt(3), + tree.NewDInt(4), tree.NewDInt(5), tree.NewDInt(6), tree.DNull, + }, + HasNulls: true, + }, + []byte{17, 3, 9, 6, 1, 2, 4, 6, 8, 10, 12}, + }, + } + + for _, test := range tests { + t.Run("encode "+test.name, func(t *testing.T) { + enc, err := encodeArray(&test.datum, nil) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(enc, test.encoding) { + t.Fatalf("expected %s to encode to %v, got %v", test.datum.String(), test.encoding, enc) + } + }) + + t.Run("decode "+test.name, func(t *testing.T) { + d, _, err := decodeArray(&tree.DatumAlloc{}, test.datum.ParamTyp, test.encoding) + hasNulls := d.(*tree.DArray).HasNulls + if test.datum.HasNulls != hasNulls { + t.Fatalf("expected %v to have HasNulls=%t, got %t", test.encoding, test.datum.HasNulls, hasNulls) + } + if err != nil { + t.Fatal(err) + } + evalContext := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) + if d.Compare(evalContext, &test.datum) != 0 { + t.Fatalf("expected %v to decode to %s, got %s", test.encoding, test.datum.String(), d.String()) + } + }) + } +} + +func BenchmarkArrayEncoding(b *testing.B) { + ary := tree.DArray{ParamTyp: types.Int, Array: tree.Datums{}} + for i := 0; i < 10000; i++ { + _ = ary.Append(tree.NewDInt(1)) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = encodeArray(&ary, nil) + } +} diff --git a/pkg/sql/rowenc/valueside/decode.go b/pkg/sql/rowenc/valueside/decode.go new file mode 100644 index 000000000000..5e2214a4c3be --- /dev/null +++ b/pkg/sql/rowenc/valueside/decode.go @@ -0,0 +1,275 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package valueside + +import ( + "github.com/cockroachdb/cockroach/pkg/geo" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/cockroach/pkg/util/json" + "github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate" + "github.com/cockroachdb/errors" +) + +// Decode decodes a value encoded by Encode. +func Decode( + a *tree.DatumAlloc, valType *types.T, b []byte, +) (_ tree.Datum, remaining []byte, _ error) { + _, dataOffset, _, typ, err := encoding.DecodeValueTag(b) + if err != nil { + return nil, b, err + } + // NULL is special because it is a valid value for any type. + if typ == encoding.Null { + return tree.DNull, b[dataOffset:], nil + } + // Bool is special because the value is stored in the value tag. + if valType.Family() != types.BoolFamily { + b = b[dataOffset:] + } + return DecodeUntaggedDatum(a, valType, b) +} + +// DecodeUntaggedDatum is used to decode a Datum whose type is known, +// and which doesn't have a value tag (either due to it having been +// consumed already or not having one in the first place). +// +// This is used to decode datums encoded using value encoding. +// +// If t is types.Bool, the value tag must be present, as its value is encoded in +// the tag directly. +func DecodeUntaggedDatum( + a *tree.DatumAlloc, t *types.T, buf []byte, +) (_ tree.Datum, remaining []byte, _ error) { + switch t.Family() { + case types.IntFamily: + b, i, err := encoding.DecodeUntaggedIntValue(buf) + if err != nil { + return nil, b, err + } + return a.NewDInt(tree.DInt(i)), b, nil + case types.StringFamily: + b, data, err := encoding.DecodeUntaggedBytesValue(buf) + if err != nil { + return nil, b, err + } + return a.NewDString(tree.DString(data)), b, nil + case types.CollatedStringFamily: + b, data, err := encoding.DecodeUntaggedBytesValue(buf) + if err != nil { + return nil, b, err + } + d, err := a.NewDCollatedString(string(data), t.Locale()) + return d, b, err + case types.BitFamily: + b, data, err := encoding.DecodeUntaggedBitArrayValue(buf) + return a.NewDBitArray(tree.DBitArray{BitArray: data}), b, err + case types.BoolFamily: + // A boolean's value is encoded in its tag directly, so we don't have an + // "Untagged" version of this function. + b, data, err := encoding.DecodeBoolValue(buf) + if err != nil { + return nil, b, err + } + return tree.MakeDBool(tree.DBool(data)), b, nil + case types.FloatFamily: + b, data, err := encoding.DecodeUntaggedFloatValue(buf) + if err != nil { + return nil, b, err + } + return a.NewDFloat(tree.DFloat(data)), b, nil + case types.DecimalFamily: + b, data, err := encoding.DecodeUntaggedDecimalValue(buf) + if err != nil { + return nil, b, err + } + return a.NewDDecimal(tree.DDecimal{Decimal: data}), b, nil + case types.BytesFamily: + b, data, err := encoding.DecodeUntaggedBytesValue(buf) + if err != nil { + return nil, b, err + } + return a.NewDBytes(tree.DBytes(data)), b, nil + case types.DateFamily: + b, data, err := encoding.DecodeUntaggedIntValue(buf) + if err != nil { + return nil, b, err + } + return a.NewDDate(tree.MakeDDate(pgdate.MakeCompatibleDateFromDisk(data))), b, nil + case types.Box2DFamily: + b, data, err := encoding.DecodeUntaggedBox2DValue(buf) + if err != nil { + return nil, b, err + } + return a.NewDBox2D(tree.DBox2D{ + CartesianBoundingBox: geo.CartesianBoundingBox{BoundingBox: data}, + }), b, nil + case types.GeographyFamily: + g := a.NewDGeographyEmpty() + so := g.Geography.SpatialObjectRef() + b, err := encoding.DecodeUntaggedGeoValue(buf, so) + a.DoneInitNewDGeo(so) + if err != nil { + return nil, b, err + } + return g, b, nil + case types.GeometryFamily: + g := a.NewDGeometryEmpty() + so := g.Geometry.SpatialObjectRef() + b, err := encoding.DecodeUntaggedGeoValue(buf, so) + a.DoneInitNewDGeo(so) + if err != nil { + return nil, b, err + } + return g, b, nil + case types.TimeFamily: + b, data, err := encoding.DecodeUntaggedIntValue(buf) + if err != nil { + return nil, b, err + } + return a.NewDTime(tree.DTime(data)), b, nil + case types.TimeTZFamily: + b, data, err := encoding.DecodeUntaggedTimeTZValue(buf) + if err != nil { + return nil, b, err + } + return a.NewDTimeTZ(tree.DTimeTZ{TimeTZ: data}), b, nil + case types.TimestampFamily: + b, data, err := encoding.DecodeUntaggedTimeValue(buf) + if err != nil { + return nil, b, err + } + return a.NewDTimestamp(tree.DTimestamp{Time: data}), b, nil + case types.TimestampTZFamily: + b, data, err := encoding.DecodeUntaggedTimeValue(buf) + if err != nil { + return nil, b, err + } + return a.NewDTimestampTZ(tree.DTimestampTZ{Time: data}), b, nil + case types.IntervalFamily: + b, data, err := encoding.DecodeUntaggedDurationValue(buf) + return a.NewDInterval(tree.DInterval{Duration: data}), b, err + case types.UuidFamily: + b, data, err := encoding.DecodeUntaggedUUIDValue(buf) + return a.NewDUuid(tree.DUuid{UUID: data}), b, err + case types.INetFamily: + b, data, err := encoding.DecodeUntaggedIPAddrValue(buf) + return a.NewDIPAddr(tree.DIPAddr{IPAddr: data}), b, err + case types.JsonFamily: + b, data, err := encoding.DecodeUntaggedBytesValue(buf) + if err != nil { + return nil, b, err + } + // We copy the byte buffer here, because the JSON decoding is lazy, and we + // do not want to hang on to the backing byte buffer, which might be an + // entire KV batch. + cpy := make([]byte, len(data)) + copy(cpy, data) + j, err := json.FromEncoding(cpy) + if err != nil { + return nil, b, err + } + return a.NewDJSON(tree.DJSON{JSON: j}), b, nil + case types.OidFamily: + b, data, err := encoding.DecodeUntaggedIntValue(buf) + return a.NewDOid(tree.MakeDOid(tree.DInt(data))), b, err + case types.ArrayFamily: + // Skip the encoded data length. + b, _, _, err := encoding.DecodeNonsortingUvarint(buf) + if err != nil { + return nil, nil, err + } + return decodeArray(a, t.ArrayContents(), b) + case types.TupleFamily: + return decodeTuple(a, t, buf) + case types.EnumFamily: + b, data, err := encoding.DecodeUntaggedBytesValue(buf) + if err != nil { + return nil, b, err + } + phys, log, err := tree.GetEnumComponentsFromPhysicalRep(t, data) + if err != nil { + return nil, nil, err + } + return a.NewDEnum(tree.DEnum{EnumTyp: t, PhysicalRep: phys, LogicalRep: log}), b, nil + case types.VoidFamily: + return a.NewDVoid(), buf, nil + default: + return nil, buf, errors.Errorf("couldn't decode type %s", t) + } +} + +// Decoder is a helper for decoding rows that contain multiple encoded values. +// +// This helper is intended for non-performance-critical uses (like processing +// rangefeed KVs). The query execution engines have more specialized +// implementations for performance reasons. +type Decoder struct { + colIdxMap catalog.TableColMap + types []*types.T +} + +// MakeDecoder creates a Decoder for the given columns. +// +// Once created, the Decoder is immutable. +func MakeDecoder(cols []catalog.Column) Decoder { + var d Decoder + d.types = make([]*types.T, len(cols)) + for i, col := range cols { + d.colIdxMap.Set(col.GetID(), i) + d.types[i] = col.GetType() + } + return d +} + +// Decode processes multiple encoded values. Values for the columns used to +// create the decoder are populated in the corresponding positions in the Datums +// slice. +// +// If a given column is not encoded, the datum will be DNull. +// +// Values for any other column IDs are ignored. +// +// Decode can be called concurrently on the same Decoder. +func (d *Decoder) Decode(a *tree.DatumAlloc, bytes []byte) (tree.Datums, error) { + datums := make(tree.Datums, len(d.types)) + for i := range datums { + datums[i] = tree.DNull + } + + var lastColID descpb.ColumnID + for len(bytes) > 0 { + _, dataOffset, colIDDiff, typ, err := encoding.DecodeValueTag(bytes) + if err != nil { + return nil, err + } + colID := lastColID + descpb.ColumnID(colIDDiff) + lastColID = colID + idx, ok := d.colIdxMap.Get(colID) + if !ok { + // This column wasn't requested, so read its length and skip it. + l, err := encoding.PeekValueLengthWithOffsetsAndType(bytes, dataOffset, typ) + if err != nil { + return nil, err + } + bytes = bytes[l:] + continue + } + datums[idx], bytes, err = Decode(a, d.types[idx], bytes) + if err != nil { + return nil, err + } + } + return datums, nil +} diff --git a/pkg/sql/rowenc/valueside/doc.go b/pkg/sql/rowenc/valueside/doc.go new file mode 100644 index 000000000000..77a6bb5e9ee0 --- /dev/null +++ b/pkg/sql/rowenc/valueside/doc.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Package valueside contains low-level primitives used to encode/decode SQL +// values into/from KV Values (see roachpb.Value). +// +// Low-level here means that these primitives do not operate with table or index +// descriptors. +// +// There are two separate schemes for encoding values: +// +// - version 1 (legacy): the original encoding, which supported at most one SQL +// value (column) per roachpb.Value. It is still used for old table +// descriptors that went through many upgrades, and for some system tables. +// Primitives related to this version contain the name `Legacy`. +// +// - version 2 (column families): the current encoding which supports multiple +// SQL values (columns) per roachpb.Value. +// +// See also: docs/tech-notes/encoding.md. +package valueside diff --git a/pkg/sql/rowenc/valueside/encode.go b/pkg/sql/rowenc/valueside/encode.go new file mode 100644 index 000000000000..d673ba3d01a9 --- /dev/null +++ b/pkg/sql/rowenc/valueside/encode.go @@ -0,0 +1,120 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package valueside + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/cockroach/pkg/util/json" + "github.com/cockroachdb/errors" +) + +// Encode encodes `val` using value encoding and appends it to `appendTo`, +// returning the new buffer. +// +// This is suitable for generating the value part of individual columns +// in a column family. +// +// The encoded value is guaranteed to round trip and decode exactly to its +// input, but is not lexicographically sortable. +// +// The scratch buffer is optional and is used as a temporary buffer for certain +// datum types (JSON, arrays, tuples). +// +// See also: docs/tech-notes/encoding.md, keyside.Encode(). +func Encode(appendTo []byte, colID ColumnIDDelta, val tree.Datum, scratch []byte) ([]byte, error) { + if val == tree.DNull { + return encoding.EncodeNullValue(appendTo, uint32(colID)), nil + } + switch t := tree.UnwrapDatum(nil, val).(type) { + case *tree.DBitArray: + return encoding.EncodeBitArrayValue(appendTo, uint32(colID), t.BitArray), nil + case *tree.DBool: + return encoding.EncodeBoolValue(appendTo, uint32(colID), bool(*t)), nil + case *tree.DInt: + return encoding.EncodeIntValue(appendTo, uint32(colID), int64(*t)), nil + case *tree.DFloat: + return encoding.EncodeFloatValue(appendTo, uint32(colID), float64(*t)), nil + case *tree.DDecimal: + return encoding.EncodeDecimalValue(appendTo, uint32(colID), &t.Decimal), nil + case *tree.DString: + return encoding.EncodeBytesValue(appendTo, uint32(colID), []byte(*t)), nil + case *tree.DBytes: + return encoding.EncodeBytesValue(appendTo, uint32(colID), []byte(*t)), nil + case *tree.DDate: + return encoding.EncodeIntValue(appendTo, uint32(colID), t.UnixEpochDaysWithOrig()), nil + case *tree.DBox2D: + return encoding.EncodeBox2DValue(appendTo, uint32(colID), t.CartesianBoundingBox.BoundingBox) + case *tree.DGeography: + return encoding.EncodeGeoValue(appendTo, uint32(colID), t.SpatialObjectRef()) + case *tree.DGeometry: + return encoding.EncodeGeoValue(appendTo, uint32(colID), t.SpatialObjectRef()) + case *tree.DTime: + return encoding.EncodeIntValue(appendTo, uint32(colID), int64(*t)), nil + case *tree.DTimeTZ: + return encoding.EncodeTimeTZValue(appendTo, uint32(colID), t.TimeTZ), nil + case *tree.DTimestamp: + return encoding.EncodeTimeValue(appendTo, uint32(colID), t.Time), nil + case *tree.DTimestampTZ: + return encoding.EncodeTimeValue(appendTo, uint32(colID), t.Time), nil + case *tree.DInterval: + return encoding.EncodeDurationValue(appendTo, uint32(colID), t.Duration), nil + case *tree.DUuid: + return encoding.EncodeUUIDValue(appendTo, uint32(colID), t.UUID), nil + case *tree.DIPAddr: + return encoding.EncodeIPAddrValue(appendTo, uint32(colID), t.IPAddr), nil + case *tree.DJSON: + encoded, err := json.EncodeJSON(scratch, t.JSON) + if err != nil { + return nil, err + } + return encoding.EncodeJSONValue(appendTo, uint32(colID), encoded), nil + case *tree.DArray: + a, err := encodeArray(t, scratch) + if err != nil { + return nil, err + } + return encoding.EncodeArrayValue(appendTo, uint32(colID), a), nil + case *tree.DTuple: + return encodeTuple(t, appendTo, uint32(colID), scratch) + case *tree.DCollatedString: + return encoding.EncodeBytesValue(appendTo, uint32(colID), []byte(t.Contents)), nil + case *tree.DOid: + return encoding.EncodeIntValue(appendTo, uint32(colID), int64(t.DInt)), nil + case *tree.DEnum: + return encoding.EncodeBytesValue(appendTo, uint32(colID), t.PhysicalRep), nil + case *tree.DVoid: + return encoding.EncodeVoidValue(appendTo, uint32(colID)), nil + default: + return nil, errors.Errorf("unable to encode table value: %T", t) + } +} + +// ColumnIDDelta is the difference between two descpb.ColumnIDs. When multiple +// columns are encoded in a single value, the difference relative to the +// previous column ID is encoded for each column (to minimize space usage). +type ColumnIDDelta uint32 + +// NoColumnID is a sentinel used when we aren't encoding a specific column ID. +// This is used when we use value encodings not to write KV Values but other +// purposes, for example transferring a value over DistSQL (in the row engine). +const NoColumnID = ColumnIDDelta(encoding.NoColumnID) + +// MakeColumnIDDelta creates the ColumnIDDelta for the difference between the +// given columns in the same value. For the first column in the value, +// `previous` should be zero / NoColumnID. +func MakeColumnIDDelta(previous, current descpb.ColumnID) ColumnIDDelta { + if previous > current { + panic(errors.AssertionFailedf("cannot write column id %d after %d", current, previous)) + } + return ColumnIDDelta(current - previous) +} diff --git a/pkg/sql/rowenc/valueside/legacy.go b/pkg/sql/rowenc/valueside/legacy.go new file mode 100644 index 000000000000..1fa6f04d8d60 --- /dev/null +++ b/pkg/sql/rowenc/valueside/legacy.go @@ -0,0 +1,363 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package valueside + +import ( + "github.com/cockroachdb/cockroach/pkg/geo" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/lex" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/ipaddr" + "github.com/cockroachdb/cockroach/pkg/util/json" + "github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate" + "github.com/cockroachdb/cockroach/pkg/util/uuid" + "github.com/cockroachdb/errors" + "github.com/lib/pq/oid" +) + +// MarshalLegacy produces the value encoding of the given datum (constrained by +// the given column type) into a roachpb.Value, using the legacy version 1 +// encoding (see docs/tech-notes/encoding.md). +// +// This encoding is used when when the table format does not use column +// families, such as pre-2.0 tables and some system tables. +// +// If val's type is incompatible with colType, or if colType is not yet +// implemented by this function, an error is returned. +func MarshalLegacy(colType *types.T, val tree.Datum) (roachpb.Value, error) { + var r roachpb.Value + + if val == tree.DNull { + return r, nil + } + + switch colType.Family() { + case types.BitFamily: + if v, ok := val.(*tree.DBitArray); ok { + r.SetBitArray(v.BitArray) + return r, nil + } + case types.BoolFamily: + if v, ok := val.(*tree.DBool); ok { + r.SetBool(bool(*v)) + return r, nil + } + case types.IntFamily: + if v, ok := tree.AsDInt(val); ok { + r.SetInt(int64(v)) + return r, nil + } + case types.FloatFamily: + if v, ok := val.(*tree.DFloat); ok { + r.SetFloat(float64(*v)) + return r, nil + } + case types.DecimalFamily: + if v, ok := val.(*tree.DDecimal); ok { + err := r.SetDecimal(&v.Decimal) + return r, err + } + case types.StringFamily: + if v, ok := tree.AsDString(val); ok { + r.SetString(string(v)) + return r, nil + } + case types.BytesFamily: + if v, ok := val.(*tree.DBytes); ok { + r.SetString(string(*v)) + return r, nil + } + case types.DateFamily: + if v, ok := val.(*tree.DDate); ok { + r.SetInt(v.UnixEpochDaysWithOrig()) + return r, nil + } + case types.Box2DFamily: + if v, ok := val.(*tree.DBox2D); ok { + r.SetBox2D(v.CartesianBoundingBox.BoundingBox) + return r, nil + } + case types.GeographyFamily: + if v, ok := val.(*tree.DGeography); ok { + err := r.SetGeo(v.SpatialObject()) + return r, err + } + case types.GeometryFamily: + if v, ok := val.(*tree.DGeometry); ok { + err := r.SetGeo(v.SpatialObject()) + return r, err + } + case types.TimeFamily: + if v, ok := val.(*tree.DTime); ok { + r.SetInt(int64(*v)) + return r, nil + } + case types.TimeTZFamily: + if v, ok := val.(*tree.DTimeTZ); ok { + r.SetTimeTZ(v.TimeTZ) + return r, nil + } + case types.TimestampFamily: + if v, ok := val.(*tree.DTimestamp); ok { + r.SetTime(v.Time) + return r, nil + } + case types.TimestampTZFamily: + if v, ok := val.(*tree.DTimestampTZ); ok { + r.SetTime(v.Time) + return r, nil + } + case types.IntervalFamily: + if v, ok := val.(*tree.DInterval); ok { + err := r.SetDuration(v.Duration) + return r, err + } + case types.UuidFamily: + if v, ok := val.(*tree.DUuid); ok { + r.SetBytes(v.GetBytes()) + return r, nil + } + case types.INetFamily: + if v, ok := val.(*tree.DIPAddr); ok { + data := v.ToBuffer(nil) + r.SetBytes(data) + return r, nil + } + case types.JsonFamily: + if v, ok := val.(*tree.DJSON); ok { + data, err := json.EncodeJSON(nil, v.JSON) + if err != nil { + return r, err + } + r.SetBytes(data) + return r, nil + } + case types.ArrayFamily: + if v, ok := val.(*tree.DArray); ok { + if err := checkElementType(v.ParamTyp, colType.ArrayContents()); err != nil { + return r, err + } + b, err := encodeArray(v, nil) + if err != nil { + return r, err + } + r.SetBytes(b) + return r, nil + } + case types.CollatedStringFamily: + if v, ok := val.(*tree.DCollatedString); ok { + if lex.LocaleNamesAreEqual(v.Locale, colType.Locale()) { + r.SetString(v.Contents) + return r, nil + } + // We can't fail here with a locale mismatch, this is a sign + // that the proper validation has not been performed upstream in + // the mutation planning code. + return r, errors.AssertionFailedf( + "locale mismatch %q vs %q", + v.Locale, colType.Locale(), + ) + } + case types.OidFamily: + if v, ok := val.(*tree.DOid); ok { + r.SetInt(int64(v.DInt)) + return r, nil + } + case types.EnumFamily: + if v, ok := val.(*tree.DEnum); ok { + r.SetBytes(v.PhysicalRep) + return r, nil + } + default: + return r, errors.AssertionFailedf("unsupported column type: %s", colType.Family()) + } + return r, errors.AssertionFailedf("mismatched type %q vs %q", val.ResolvedType(), colType.Family()) +} + +// UnmarshalLegacy is the counterpart to MarshalLegacy. +// +// It decodes the value from a roachpb.Value using the type expected +// by the column. An error is returned if the value's type does not +// match the column's type. +func UnmarshalLegacy(a *tree.DatumAlloc, typ *types.T, value roachpb.Value) (tree.Datum, error) { + if value.RawBytes == nil { + return tree.DNull, nil + } + + switch typ.Family() { + case types.BitFamily: + d, err := value.GetBitArray() + if err != nil { + return nil, err + } + return a.NewDBitArray(tree.DBitArray{BitArray: d}), nil + case types.BoolFamily: + v, err := value.GetBool() + if err != nil { + return nil, err + } + return tree.MakeDBool(tree.DBool(v)), nil + case types.IntFamily: + v, err := value.GetInt() + if err != nil { + return nil, err + } + return a.NewDInt(tree.DInt(v)), nil + case types.FloatFamily: + v, err := value.GetFloat() + if err != nil { + return nil, err + } + return a.NewDFloat(tree.DFloat(v)), nil + case types.DecimalFamily: + v, err := value.GetDecimal() + if err != nil { + return nil, err + } + dd := a.NewDDecimal(tree.DDecimal{Decimal: v}) + return dd, nil + case types.StringFamily: + v, err := value.GetBytes() + if err != nil { + return nil, err + } + if typ.Oid() == oid.T_name { + return a.NewDName(tree.DString(v)), nil + } + return a.NewDString(tree.DString(v)), nil + case types.BytesFamily: + v, err := value.GetBytes() + if err != nil { + return nil, err + } + return a.NewDBytes(tree.DBytes(v)), nil + case types.DateFamily: + v, err := value.GetInt() + if err != nil { + return nil, err + } + return a.NewDDate(tree.MakeDDate(pgdate.MakeCompatibleDateFromDisk(v))), nil + case types.Box2DFamily: + v, err := value.GetBox2D() + if err != nil { + return nil, err + } + return a.NewDBox2D(tree.DBox2D{ + CartesianBoundingBox: geo.CartesianBoundingBox{BoundingBox: v}, + }), nil + case types.GeographyFamily: + v, err := value.GetGeo() + if err != nil { + return nil, err + } + return a.NewDGeography(tree.DGeography{Geography: geo.MakeGeographyUnsafe(v)}), nil + case types.GeometryFamily: + v, err := value.GetGeo() + if err != nil { + return nil, err + } + return a.NewDGeometry(tree.DGeometry{Geometry: geo.MakeGeometryUnsafe(v)}), nil + case types.TimeFamily: + v, err := value.GetInt() + if err != nil { + return nil, err + } + return a.NewDTime(tree.DTime(v)), nil + case types.TimeTZFamily: + v, err := value.GetTimeTZ() + if err != nil { + return nil, err + } + return a.NewDTimeTZ(tree.DTimeTZ{TimeTZ: v}), nil + case types.TimestampFamily: + v, err := value.GetTime() + if err != nil { + return nil, err + } + return a.NewDTimestamp(tree.DTimestamp{Time: v}), nil + case types.TimestampTZFamily: + v, err := value.GetTime() + if err != nil { + return nil, err + } + return a.NewDTimestampTZ(tree.DTimestampTZ{Time: v}), nil + case types.IntervalFamily: + d, err := value.GetDuration() + if err != nil { + return nil, err + } + return a.NewDInterval(tree.DInterval{Duration: d}), nil + case types.CollatedStringFamily: + v, err := value.GetBytes() + if err != nil { + return nil, err + } + return a.NewDCollatedString(string(v), typ.Locale()) + case types.UuidFamily: + v, err := value.GetBytes() + if err != nil { + return nil, err + } + u, err := uuid.FromBytes(v) + if err != nil { + return nil, err + } + return a.NewDUuid(tree.DUuid{UUID: u}), nil + case types.INetFamily: + v, err := value.GetBytes() + if err != nil { + return nil, err + } + var ipAddr ipaddr.IPAddr + _, err = ipAddr.FromBuffer(v) + if err != nil { + return nil, err + } + return a.NewDIPAddr(tree.DIPAddr{IPAddr: ipAddr}), nil + case types.OidFamily: + v, err := value.GetInt() + if err != nil { + return nil, err + } + return a.NewDOid(tree.MakeDOid(tree.DInt(v))), nil + case types.ArrayFamily: + v, err := value.GetBytes() + if err != nil { + return nil, err + } + datum, _, err := decodeArray(a, typ.ArrayContents(), v) + // TODO(yuzefovich): do we want to create a new object via tree.DatumAlloc? + return datum, err + case types.JsonFamily: + v, err := value.GetBytes() + if err != nil { + return nil, err + } + _, jsonDatum, err := json.DecodeJSON(v) + if err != nil { + return nil, err + } + return tree.NewDJSON(jsonDatum), nil + case types.EnumFamily: + v, err := value.GetBytes() + if err != nil { + return nil, err + } + phys, log, err := tree.GetEnumComponentsFromPhysicalRep(typ, v) + if err != nil { + return nil, err + } + return a.NewDEnum(tree.DEnum{EnumTyp: typ, PhysicalRep: phys, LogicalRep: log}), nil + default: + return nil, errors.Errorf("unsupported column type: %s", typ.Family()) + } +} diff --git a/pkg/sql/rowenc/valueside/tuple.go b/pkg/sql/rowenc/valueside/tuple.go new file mode 100644 index 000000000000..e3ec8489fbc2 --- /dev/null +++ b/pkg/sql/rowenc/valueside/tuple.go @@ -0,0 +1,60 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package valueside + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/encoding" +) + +// encodeTuple produces the value encoding for a tuple. +func encodeTuple(t *tree.DTuple, appendTo []byte, colID uint32, scratch []byte) ([]byte, error) { + appendTo = encoding.EncodeValueTag(appendTo, colID, encoding.Tuple) + return encodeUntaggedTuple(t, appendTo, colID, scratch) +} + +// encodeUntaggedTuple produces the value encoding for a tuple without a value tag. +func encodeUntaggedTuple( + t *tree.DTuple, appendTo []byte, colID uint32, scratch []byte, +) ([]byte, error) { + appendTo = encoding.EncodeNonsortingUvarint(appendTo, uint64(len(t.D))) + + var err error + for _, dd := range t.D { + appendTo, err = Encode(appendTo, NoColumnID, dd, scratch) + if err != nil { + return nil, err + } + } + return appendTo, nil +} + +// decodeTuple decodes a tuple from its value encoding. It is the +// counterpart of encodeTuple(). +func decodeTuple(a *tree.DatumAlloc, tupTyp *types.T, b []byte) (tree.Datum, []byte, error) { + b, _, _, err := encoding.DecodeNonsortingUvarint(b) + if err != nil { + return nil, nil, err + } + + result := *(tree.NewDTuple(tupTyp)) + result.D = a.NewDatums(len(tupTyp.TupleContents())) + var datum tree.Datum + for i := range tupTyp.TupleContents() { + datum, b, err = Decode(a, tupTyp.TupleContents()[i], b) + if err != nil { + return nil, b, err + } + result.D[i] = datum + } + return a.NewDTuple(result), b, nil +} diff --git a/pkg/sql/rowenc/valueside/valueside_test.go b/pkg/sql/rowenc/valueside/valueside_test.go new file mode 100644 index 000000000000..a1f7ae3afe9b --- /dev/null +++ b/pkg/sql/rowenc/valueside/valueside_test.go @@ -0,0 +1,332 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package valueside_test + +import ( + "fmt" + "math" + "reflect" + "testing" + "time" + + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/randgen" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/util/timeofday" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/cockroach/pkg/util/timeutil/pgdate" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/prop" + "github.com/stretchr/testify/require" +) + +func TestEncodeDecode(t *testing.T) { + a := &tree.DatumAlloc{} + ctx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) + parameters := gopter.DefaultTestParameters() + parameters.MinSuccessfulTests = 10000 + properties := gopter.NewProperties(parameters) + var scratch []byte + properties.Property("roundtrip", prop.ForAll( + func(d tree.Datum) string { + b, err := valueside.Encode(nil, 0, d, scratch) + if err != nil { + return "error: " + err.Error() + } + newD, leftoverBytes, err := valueside.Decode(a, d.ResolvedType(), b) + if len(leftoverBytes) > 0 { + return "Leftover bytes" + } + if err != nil { + return "error: " + err.Error() + } + if newD.Compare(ctx, d) != 0 { + return "unequal" + } + return "" + }, + genDatum(), + )) + properties.TestingRun(t) +} + +func TestDecode(t *testing.T) { + a := &tree.DatumAlloc{} + for _, tc := range []struct { + in tree.Datum + typ *types.T + err string + }{ + // These test cases are not intended to be exhaustive, but rather exercise + // the special casing and error handling of Decode. + {tree.DNull, types.Bool, ""}, + {tree.DBoolTrue, types.Bool, ""}, + {tree.NewDInt(tree.DInt(4)), types.Bool, "value type is not True or False: Int"}, + {tree.DNull, types.Int, ""}, + {tree.NewDInt(tree.DInt(4)), types.Int, ""}, + {tree.DBoolTrue, types.Int, "decoding failed"}, + } { + t.Run("", func(t *testing.T) { + var prefix, scratch []byte + buf, err := valueside.Encode(prefix, 0 /* colID */, tc.in, scratch) + if err != nil { + t.Fatal(err) + } + d, _, err := valueside.Decode(a, tc.typ, buf) + if !testutils.IsError(err, tc.err) { + t.Fatalf("expected error %q, but got %v", tc.err, err) + } else if err != nil { + return + } + if tc.in.Compare(tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()), d) != 0 { + t.Fatalf("decoded datum %[1]v (%[1]T) does not match encoded datum %[2]v (%[2]T)", d, tc.in) + } + }) + } +} + +// TestDecodeTableValueOutOfRangeTimestamp deliberately tests out of range timestamps +// can still be decoded from disk. See #46973. +func TestDecodeTableValueOutOfRangeTimestamp(t *testing.T) { + for _, d := range []tree.Datum{ + &tree.DTimestamp{Time: timeutil.Unix(-9223372036854775808, 0).In(time.UTC)}, + &tree.DTimestampTZ{Time: timeutil.Unix(-9223372036854775808, 0).In(time.UTC)}, + } { + t.Run(d.String(), func(t *testing.T) { + var b []byte + encoded, err := valueside.Encode(b, 1 /* colID */, d, []byte{}) + require.NoError(t, err) + a := &tree.DatumAlloc{} + decoded, _, err := valueside.Decode(a, d.ResolvedType(), encoded) + require.NoError(t, err) + require.Equal(t, d, decoded) + }) + } +} + +// This test ensures that decoding a tuple value with a specific, labeled tuple +// type preserves the labels. +func TestDecodeTupleValueWithType(t *testing.T) { + tupleType := types.MakeLabeledTuple([]*types.T{types.Int, types.String}, []string{"a", "b"}) + datum := tree.NewDTuple(tupleType, tree.NewDInt(tree.DInt(1)), tree.NewDString("foo")) + buf, err := valueside.Encode(nil, valueside.NoColumnID, datum, nil) + if err != nil { + t.Fatal(err) + } + da := tree.DatumAlloc{} + var decoded tree.Datum + decoded, _, err = valueside.Decode(&da, tupleType, buf) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, decoded, datum) +} + +func TestLegacy(t *testing.T) { + tests := []struct { + typ *types.T + datum tree.Datum + exp roachpb.Value + }{ + { + typ: types.Bool, + datum: tree.MakeDBool(true), + exp: func() (v roachpb.Value) { v.SetBool(true); return }(), + }, + { + typ: types.Bool, + datum: tree.MakeDBool(false), + exp: func() (v roachpb.Value) { v.SetBool(false); return }(), + }, + { + typ: types.Int, + datum: tree.NewDInt(314159), + exp: func() (v roachpb.Value) { v.SetInt(314159); return }(), + }, + { + typ: types.Float, + datum: tree.NewDFloat(3.14159), + exp: func() (v roachpb.Value) { v.SetFloat(3.14159); return }(), + }, + { + typ: types.Decimal, + datum: func() (v tree.Datum) { + v, err := tree.ParseDDecimal("1234567890.123456890") + if err != nil { + t.Fatalf("Unexpected error while creating expected value: %s", err) + } + return + }(), + exp: func() (v roachpb.Value) { + dDecimal, err := tree.ParseDDecimal("1234567890.123456890") + if err != nil { + t.Fatalf("Unexpected error while creating expected value: %s", err) + } + err = v.SetDecimal(&dDecimal.Decimal) + if err != nil { + t.Fatalf("Unexpected error while creating expected value: %s", err) + } + return + }(), + }, + { + typ: types.Date, + datum: tree.NewDDate(pgdate.MakeCompatibleDateFromDisk(314159)), + exp: func() (v roachpb.Value) { v.SetInt(314159); return }(), + }, + { + typ: types.Date, + datum: tree.NewDDate(pgdate.MakeCompatibleDateFromDisk(math.MinInt64)), + exp: func() (v roachpb.Value) { v.SetInt(math.MinInt64); return }(), + }, + { + typ: types.Date, + datum: tree.NewDDate(pgdate.MakeCompatibleDateFromDisk(math.MaxInt64)), + exp: func() (v roachpb.Value) { v.SetInt(math.MaxInt64); return }(), + }, + { + typ: types.Time, + datum: tree.MakeDTime(timeofday.FromInt(314159)), + exp: func() (v roachpb.Value) { v.SetInt(314159); return }(), + }, + { + typ: types.Timestamp, + datum: tree.MustMakeDTimestamp(timeutil.Unix(314159, 1000), time.Microsecond), + exp: func() (v roachpb.Value) { v.SetTime(timeutil.Unix(314159, 1000)); return }(), + }, + { + typ: types.TimestampTZ, + datum: tree.MustMakeDTimestampTZ(timeutil.Unix(314159, 1000), time.Microsecond), + exp: func() (v roachpb.Value) { v.SetTime(timeutil.Unix(314159, 1000)); return }(), + }, + { + typ: types.String, + datum: tree.NewDString("testing123"), + exp: func() (v roachpb.Value) { v.SetString("testing123"); return }(), + }, + { + typ: types.Name, + datum: tree.NewDName("testingname123"), + exp: func() (v roachpb.Value) { v.SetString("testingname123"); return }(), + }, + { + typ: types.Bytes, + datum: tree.NewDBytes(tree.DBytes([]byte{0x31, 0x41, 0x59})), + exp: func() (v roachpb.Value) { v.SetBytes([]byte{0x31, 0x41, 0x59}); return }(), + }, + { + typ: types.Uuid, + datum: func() (v tree.Datum) { + v, err := tree.ParseDUuidFromString("63616665-6630-3064-6465-616462656562") + if err != nil { + t.Fatalf("Unexpected error while creating expected value: %s", err) + } + return + }(), + exp: func() (v roachpb.Value) { + dUUID, err := tree.ParseDUuidFromString("63616665-6630-3064-6465-616462656562") + if err != nil { + t.Fatalf("Unexpected error while creating expected value: %s", err) + } + v.SetBytes(dUUID.GetBytes()) + return + }(), + }, + { + typ: types.INet, + datum: func() (v tree.Datum) { + v, err := tree.ParseDIPAddrFromINetString("192.168.0.1") + if err != nil { + t.Fatalf("Unexpected error while creating expected value: %s", err) + } + return + }(), + exp: func() (v roachpb.Value) { + ipAddr, err := tree.ParseDIPAddrFromINetString("192.168.0.1") + if err != nil { + t.Fatalf("Unexpected error while creating expected value: %s", err) + } + data := ipAddr.ToBuffer(nil) + v.SetBytes(data) + return + }(), + }, + } + + for i, testCase := range tests { + typ := testCase.typ + if actual, err := valueside.MarshalLegacy(typ, testCase.datum); err != nil { + t.Errorf("%d: unexpected error with column type %v: %v", i, typ, err) + } else if !reflect.DeepEqual(actual, testCase.exp) { + t.Errorf("%d: MarshalColumnValue() got %v, expected %v", i, actual, testCase.exp) + } + } +} + +func TestLegacyRoundtrip(t *testing.T) { + a := &tree.DatumAlloc{} + ctx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings()) + parameters := gopter.DefaultTestParameters() + parameters.MinSuccessfulTests = 10000 + properties := gopter.NewProperties(parameters) + + properties.Property("roundtrip", + prop.ForAll( + func(typ *types.T) string { + d, ok := genDatumWithType(typ).Sample() + if !ok { + return "error generating datum" + } + datum := d.(tree.Datum) + value, err := valueside.MarshalLegacy(typ, datum) + if err != nil { + return "error marshaling: " + err.Error() + } + outDatum, err := valueside.UnmarshalLegacy(a, typ, value) + if err != nil { + return "error unmarshaling: " + err.Error() + } + if datum.Compare(ctx, outDatum) != 0 { + return fmt.Sprintf("datum didn't roundtrip.\ninput: %v\noutput: %v", datum, outDatum) + } + return "" + }, + genColumnType(), + ), + ) + properties.TestingRun(t) +} + +func genColumnType() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + columnType := randgen.RandColumnType(genParams.Rng) + return gopter.NewGenResult(columnType, gopter.NoShrinker) + } +} + +func genDatum() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + return gopter.NewGenResult(randgen.RandDatum(genParams.Rng, randgen.RandColumnType(genParams.Rng), + false), gopter.NoShrinker) + } +} + +func genDatumWithType(columnType interface{}) gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + datum := randgen.RandDatum(genParams.Rng, columnType.(*types.T), false) + return gopter.NewGenResult(datum, gopter.NoShrinker) + } +} diff --git a/pkg/sql/rowexec/BUILD.bazel b/pkg/sql/rowexec/BUILD.bazel index 86abfd9bbf31..53d149731093 100644 --- a/pkg/sql/rowexec/BUILD.bazel +++ b/pkg/sql/rowexec/BUILD.bazel @@ -27,7 +27,6 @@ go_library( "rowfetcher.go", "sample_aggregator.go", "sampler.go", - "scrub_tablereader.go", "sorter.go", "stats.go", "stream_group_accumulator.go", @@ -45,6 +44,8 @@ go_library( "//pkg/jobs/jobspb", "//pkg/keys", "//pkg/kv", + "//pkg/kv/kvclient/kvstreamer", + "//pkg/kv/kvserver/concurrency/lock", "//pkg/kv/kvserver/kvserverbase", "//pkg/roachpb:with-mocks", "//pkg/server/telemetry", @@ -131,6 +132,7 @@ go_test( "zigzagjoiner_test.go", ], embed = [":rowexec"], + tags = ["no-remote"], deps = [ "//pkg/base", "//pkg/gossip", @@ -161,6 +163,7 @@ go_test( "//pkg/sql/randgen", "//pkg/sql/rowcontainer", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", "//pkg/sql/sem/tree", "//pkg/sql/sqlutil", "//pkg/sql/stats", @@ -183,6 +186,7 @@ go_test( "//pkg/util/mon", "//pkg/util/protoutil", "//pkg/util/randutil", + "//pkg/util/stop", "//pkg/util/syncutil", "//pkg/util/timeutil", "//pkg/util/tracing", diff --git a/pkg/sql/rowexec/aggregator.go b/pkg/sql/rowexec/aggregator.go index e1b8acd98531..3531e05b1abe 100644 --- a/pkg/sql/rowexec/aggregator.go +++ b/pkg/sql/rowexec/aggregator.go @@ -56,7 +56,7 @@ type aggregatorBase struct { inputTypes []*types.T funcs []*aggregateFuncHolder outputTypes []*types.T - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc rowAlloc rowenc.EncDatumRowAlloc bucketsAcc mon.BoundAccount @@ -116,7 +116,7 @@ func (ag *aggregatorBase) init( // grouped-by values for each bucket. ag.funcs is updated to contain all // the functions which need to be fed values. ag.inputTypes = input.OutputTypes() - semaCtx := flowCtx.TypeResolverFactory.NewSemaContext(flowCtx.EvalCtx.Txn) + semaCtx := flowCtx.NewSemaContext(flowCtx.EvalCtx.Txn) for i, aggInfo := range spec.Aggregations { if aggInfo.FilterColIdx != nil { col := *aggInfo.FilterColIdx @@ -905,7 +905,7 @@ func (ag *aggregatorBase) newAggregateFuncHolder( // row in the group. func (a *aggregateFuncHolder) isDistinct( ctx context.Context, - alloc *rowenc.DatumAlloc, + alloc *tree.DatumAlloc, prefix []byte, firstArg tree.Datum, otherArgs tree.Datums, diff --git a/pkg/sql/rowexec/bulk_row_writer.go b/pkg/sql/rowexec/bulk_row_writer.go index 18aeeeab44e9..ec31d2e42142 100644 --- a/pkg/sql/rowexec/bulk_row_writer.go +++ b/pkg/sql/rowexec/bulk_row_writer.go @@ -59,7 +59,7 @@ func newBulkRowWriterProcessor( flowCtx: flowCtx, processorID: processorID, batchIdxAtomic: 0, - tableDesc: spec.BuildTableDescriptor(), + tableDesc: flowCtx.TableDescriptor(&spec.Table), spec: spec, input: input, output: output, @@ -184,7 +184,7 @@ func (sp *bulkRowWriter) convertLoop( defer close(kvCh) done := false - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} typs := sp.input.OutputTypes() for { diff --git a/pkg/sql/rowexec/columnbackfiller.go b/pkg/sql/rowexec/columnbackfiller.go index 659cc364cf17..cce2d581b336 100644 --- a/pkg/sql/rowexec/columnbackfiller.go +++ b/pkg/sql/rowexec/columnbackfiller.go @@ -65,7 +65,7 @@ func newColumnBackfiller( columnBackfillerMon := execinfra.NewMonitor(ctx, flowCtx.Cfg.BackfillerMonitor, "column-backfill-mon") cb := &columnBackfiller{ - desc: spec.BuildTableDescriptor(), + desc: flowCtx.TableDescriptor(&spec.Table), backfiller: backfiller{ name: "Column", filter: backfill.ColumnMutationFilter, diff --git a/pkg/sql/rowexec/distinct.go b/pkg/sql/rowexec/distinct.go index cc03bfe01f87..afe62519d6cf 100644 --- a/pkg/sql/rowexec/distinct.go +++ b/pkg/sql/rowexec/distinct.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/mon" @@ -43,7 +44,7 @@ type distinct struct { nonOrdered []uint32 } memAcc mon.BoundAccount - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc scratch []byte nullsAreDistinct bool nullCount uint32 diff --git a/pkg/sql/rowexec/indexbackfiller.go b/pkg/sql/rowexec/indexbackfiller.go index 9d3619ea586e..9104f2ad06b5 100644 --- a/pkg/sql/rowexec/indexbackfiller.go +++ b/pkg/sql/rowexec/indexbackfiller.go @@ -88,7 +88,7 @@ func newIndexBackfiller( indexBackfillerMon := execinfra.NewMonitor(ctx, flowCtx.Cfg.BackfillerMonitor, "index-backfill-mon") ib := &indexBackfiller{ - desc: spec.BuildTableDescriptor(), + desc: flowCtx.TableDescriptor(&spec.Table), spec: spec, flowCtx: flowCtx, output: output, diff --git a/pkg/sql/rowexec/inverted_filterer.go b/pkg/sql/rowexec/inverted_filterer.go index df7801f379ee..e4907520e424 100644 --- a/pkg/sql/rowexec/inverted_filterer.go +++ b/pkg/sql/rowexec/inverted_filterer.go @@ -125,7 +125,7 @@ func newInvertedFilterer( } if spec.PreFiltererSpec != nil { - semaCtx := flowCtx.TypeResolverFactory.NewSemaContext(flowCtx.EvalCtx.Txn) + semaCtx := flowCtx.NewSemaContext(flowCtx.EvalCtx.Txn) var exprHelper execinfrapb.ExprHelper colTypes := []*types.T{spec.PreFiltererSpec.Type} if err := exprHelper.Init(spec.PreFiltererSpec.Expression, colTypes, semaCtx, ifr.EvalCtx); err != nil { diff --git a/pkg/sql/rowexec/inverted_joiner.go b/pkg/sql/rowexec/inverted_joiner.go index c6ec6cb14a16..5a680846ff1b 100644 --- a/pkg/sql/rowexec/inverted_joiner.go +++ b/pkg/sql/rowexec/inverted_joiner.go @@ -87,7 +87,7 @@ type invertedJoiner struct { fetcher rowFetcher // rowsRead is the total number of rows that the fetcher read from disk. rowsRead int64 - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc rowAlloc rowenc.EncDatumRowAlloc // tableRow represents a row with all the columns of the table, where only @@ -195,7 +195,7 @@ func newInvertedJoiner( return nil, errors.AssertionFailedf("unexpected inverted join type %s", spec.Type) } ij := &invertedJoiner{ - desc: spec.BuildTableDescriptor(), + desc: flowCtx.TableDescriptor(&spec.Table), input: input, inputTypes: input.OutputTypes(), prefixEqualityCols: spec.PrefixEqualityColumns, @@ -215,14 +215,18 @@ func newInvertedJoiner( // Initialize tableRow, indexRow, indexRowTypes, and indexRowToTableRowMap, // a mapping from indexRow column ordinal to tableRow column ordinals. - indexColumnIDs, _ := catalog.FullIndexColumnIDs(ij.index) + indexColumns := ij.desc.IndexFullColumns(ij.index) // Inverted joins are not used for mutations. ij.tableRow = make(rowenc.EncDatumRow, len(ij.desc.PublicColumns())) - ij.indexRow = make(rowenc.EncDatumRow, len(indexColumnIDs)-1) + ij.indexRow = make(rowenc.EncDatumRow, len(indexColumns)-1) ij.indexRowTypes = make([]*types.T, len(ij.indexRow)) ij.indexRowToTableRowMap = make([]int, len(ij.indexRow)) indexRowIdx := 0 - for _, colID := range indexColumnIDs { + for _, col := range indexColumns { + if col == nil { + continue + } + colID := col.GetID() // Do not include the inverted column in the map. if colID == ij.invertedColID { continue @@ -269,7 +273,7 @@ func newInvertedJoiner( return nil, err } - semaCtx := flowCtx.TypeResolverFactory.NewSemaContext(flowCtx.EvalCtx.Txn) + semaCtx := flowCtx.NewSemaContext(flowCtx.EvalCtx.Txn) onExprColTypes := make([]*types.T, 0, len(ij.inputTypes)+len(rightColTypes)) onExprColTypes = append(onExprColTypes, ij.inputTypes...) onExprColTypes = append(onExprColTypes, rightColTypes...) @@ -308,14 +312,17 @@ func newInvertedJoiner( // here. For now, we do the simple thing, since we have no idea whether // such workloads actually occur in practice. allIndexCols := util.MakeFastIntSet() - for _, colID := range indexColumnIDs { - allIndexCols.Add(ij.colIdxMap.GetDefault(colID)) + for _, col := range indexColumns { + if col == nil { + continue + } + allIndexCols.Add(ij.colIdxMap.GetDefault(col.GetID())) } // We use ScanVisibilityPublic since inverted joins are not used for mutations, // and so do not need to see in-progress schema changes. _, _, err = initRowFetcher( flowCtx, &fetcher, ij.desc, int(spec.IndexIdx), ij.colIdxMap, false, /* reverse */ - allIndexCols, false /* isCheck */, flowCtx.EvalCtx.Mon, &ij.alloc, execinfra.ScanVisibilityPublic, + allIndexCols, flowCtx.EvalCtx.Mon, &ij.alloc, descpb.ScanLockingStrength_FOR_NONE, descpb.ScanLockingWaitPolicy_BLOCK, false /* withSystemColumns */, nil, /* virtualColumn */ ) diff --git a/pkg/sql/rowexec/joinerbase.go b/pkg/sql/rowexec/joinerbase.go index fdebb09d33d4..7c83e25316b1 100644 --- a/pkg/sql/rowexec/joinerbase.go +++ b/pkg/sql/rowexec/joinerbase.go @@ -97,7 +97,7 @@ func (jb *joinerBase) init( ); err != nil { return err } - semaCtx := flowCtx.TypeResolverFactory.NewSemaContext(flowCtx.EvalCtx.Txn) + semaCtx := flowCtx.NewSemaContext(flowCtx.EvalCtx.Txn) return jb.onCond.Init(onExpr, onCondTypes, semaCtx, jb.EvalCtx) } diff --git a/pkg/sql/rowexec/joinreader.go b/pkg/sql/rowexec/joinreader.go index 2a37721d24a3..5bcd42bcf33b 100644 --- a/pkg/sql/rowexec/joinreader.go +++ b/pkg/sql/rowexec/joinreader.go @@ -12,9 +12,12 @@ package rowexec import ( "context" + "math" "sort" "unsafe" + "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvstreamer" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog" @@ -110,11 +113,24 @@ type joinReader struct { // fetcher wraps the row.Fetcher used to perform lookups. This enables the // joinReader to wrap the fetcher with a stat collector when necessary. fetcher rowFetcher - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc rowAlloc rowenc.EncDatumRowAlloc shouldLimitBatches bool readerType joinReaderType + keyLocking descpb.ScanLockingStrength + lockWaitPolicy lock.WaitPolicy + + // usesStreamer indicates whether the joinReader performs the lookups using + // the kvcoord.Streamer API. + usesStreamer bool + streamerInfo struct { + *kvstreamer.Streamer + unlimitedMemMonitor *mon.BytesMonitor + budgetAcc mon.BoundAccount + budgetLimit int64 + } + input execinfra.RowSource // lookupCols and lookupExpr (and optionally remoteLookupExpr) represent the @@ -145,6 +161,11 @@ type joinReader struct { batchSizeBytes int64 curBatchSizeBytes int64 + // pendingRow tracks the row that has already been read from the input but + // was not included into the lookup batch because it would make the batch + // exceed batchSizeBytes. + pendingRow rowenc.EncDatumRow + // rowsRead is the total number of rows that this fetcher read from // disk. rowsRead int64 @@ -256,7 +277,7 @@ func newJoinReader( } var lookupCols []uint32 - tableDesc := spec.BuildTableDescriptor() + tableDesc := flowCtx.TableDescriptor(&spec.Table) switch readerType { case indexJoinReaderType: lookupCols = make([]uint32, tableDesc.GetPrimaryIndex().NumKeyColumns()) @@ -280,6 +301,8 @@ func newJoinReader( if flowCtx.EvalCtx.SessionData().ParallelizeMultiKeyLookupJoinsEnabled { shouldLimitBatches = false } + tryStreamer := row.CanUseStreamer(flowCtx.EvalCtx.Ctx(), flowCtx.EvalCtx.Settings) && !spec.MaintainOrdering + jr := &joinReader{ desc: tableDesc, maintainOrdering: spec.MaintainOrdering, @@ -288,6 +311,9 @@ func newJoinReader( outputGroupContinuationForLeftRow: spec.OutputGroupContinuationForLeftRow, shouldLimitBatches: shouldLimitBatches, readerType: readerType, + keyLocking: spec.LockingStrength, + lockWaitPolicy: row.GetWaitPolicy(spec.LockingWaitPolicy), + usesStreamer: (readerType == indexJoinReaderType) && tryStreamer, lookupBatchBytesLimit: rowinfra.BytesLimit(spec.LookupBatchBytesLimit), } if readerType != indexJoinReaderType { @@ -301,17 +327,17 @@ func newJoinReader( } jr.index = jr.desc.ActiveIndexes()[indexIdx] isSecondary = !jr.index.Primary() - cols := jr.desc.PublicColumns() - if spec.Visibility == execinfra.ScanVisibilityPublicAndNotPublic { - cols = jr.desc.DeletableColumns() - } + cols := jr.desc.DeletableColumns() jr.colIdxMap = catalog.ColumnIDToOrdinalMap(cols) columnTypes := catalog.ColumnTypes(cols) - columnIDs, _ := catalog.FullIndexColumnIDs(jr.index) - indexCols := make([]uint32, len(columnIDs)) - for i, columnID := range columnIDs { - indexCols[i] = uint32(columnID) + columns := jr.desc.IndexFullColumns(jr.index) + indexCols := make([]uint32, len(columns)) + for i, col := range columns { + if col == nil { + continue + } + indexCols[i] = uint32(col.GetID()) } // Add all requested system columns to the output. @@ -380,7 +406,7 @@ func newJoinReader( var fetcher row.Fetcher _, _, err = initRowFetcher( flowCtx, &fetcher, jr.desc, int(spec.IndexIdx), jr.colIdxMap, false, /* reverse */ - rightCols, false /* isCheck */, jr.EvalCtx.Mon, &jr.alloc, spec.Visibility, spec.LockingStrength, + rightCols, jr.EvalCtx.Mon, &jr.alloc, spec.LockingStrength, spec.LockingWaitPolicy, spec.HasSystemColumns, nil, /* virtualColumn */ ) @@ -400,7 +426,7 @@ func newJoinReader( lookupExprTypes = append(lookupExprTypes, leftTypes...) lookupExprTypes = append(lookupExprTypes, columnTypes...) - semaCtx := flowCtx.TypeResolverFactory.NewSemaContext(flowCtx.EvalCtx.Txn) + semaCtx := flowCtx.NewSemaContext(flowCtx.EvalCtx.Txn) if err := jr.lookupExpr.Init(spec.LookupExpr, lookupExprTypes, semaCtx, jr.EvalCtx); err != nil { return nil, err } @@ -430,11 +456,37 @@ func newJoinReader( jr.MemMonitor.Start(flowCtx.EvalCtx.Ctx(), flowCtx.EvalCtx.Mon, mon.BoundAccount{}) jr.memAcc = jr.MemMonitor.MakeBoundAccount() - if err := jr.initJoinReaderStrategy(flowCtx, columnTypes, len(columnIDs), rightCols, readerType); err != nil { + if err := jr.initJoinReaderStrategy(flowCtx, columnTypes, len(columns), rightCols, readerType); err != nil { return nil, err } jr.batchSizeBytes = jr.strategy.getLookupRowsBatchSizeHint(flowCtx.EvalCtx.SessionData()) + if jr.usesStreamer { + maxKeysPerRow, err := jr.desc.KeysPerRow(jr.index.GetID()) + if err != nil { + return nil, err + } + if maxKeysPerRow > 1 { + // Currently, the streamer only supports cases with a single column + // family. + jr.usesStreamer = false + } else { + // jr.batchSizeBytes will be used up by the input batch, and we'll + // give everything else to the streamer budget. Note that + // budgetLimit will always be positive given that memoryLimit is at + // least 8MiB and batchSizeBytes is at most 4MiB. + jr.streamerInfo.budgetLimit = memoryLimit - jr.batchSizeBytes + // We need to use an unlimited monitor for the streamer's budget + // since the streamer itself is responsible for staying under the + // limit. + jr.streamerInfo.unlimitedMemMonitor = mon.NewMonitorInheritWithLimit( + "joinreader-streamer-unlimited" /* name */, math.MaxInt64, flowCtx.EvalCtx.Mon, + ) + jr.streamerInfo.unlimitedMemMonitor.Start(flowCtx.EvalCtx.Ctx(), flowCtx.EvalCtx.Mon, mon.BoundAccount{}) + jr.streamerInfo.budgetAcc = jr.streamerInfo.unlimitedMemMonitor.MakeBoundAccount() + } + } + // TODO(radu): verify the input types match the index key types return jr, nil } @@ -471,9 +523,12 @@ func (jr *joinReader) initJoinReaderStrategy( // localityOptimizedSpanGenerator, which support looking up multiple spans // per input row. tableOrdToIndexOrd := util.FastIntMap{} - columnIDs, _ := catalog.FullIndexColumnIDs(jr.index) - for i, colID := range columnIDs { - tabOrd := jr.colIdxMap.GetDefault(colID) + columns := jr.desc.IndexFullColumns(jr.index) + for i, col := range columns { + if col == nil { + continue + } + tabOrd := jr.colIdxMap.GetDefault(col.GetID()) tableOrdToIndexOrd.Set(tabOrd, i) } @@ -698,28 +753,53 @@ func (jr *joinReader) readInput() ( } // Read the next batch of input rows. - for jr.curBatchSizeBytes < jr.batchSizeBytes { - row, meta := jr.input.Next() - if meta != nil { - if meta.Err != nil { - jr.MoveToDraining(nil /* err */) - return jrStateUnknown, nil, meta + for { + var encDatumRow rowenc.EncDatumRow + var rowSize int64 + if jr.pendingRow == nil { + // There is no pending row, so we have to get the next one from the + // input. + var meta *execinfrapb.ProducerMetadata + encDatumRow, meta = jr.input.Next() + if meta != nil { + if meta.Err != nil { + jr.MoveToDraining(nil /* err */) + return jrStateUnknown, nil, meta + } + + if err := jr.performMemoryAccounting(); err != nil { + jr.MoveToDraining(err) + return jrStateUnknown, nil, meta + } + + return jrReadingInput, nil, meta } - - if err := jr.performMemoryAccounting(); err != nil { - jr.MoveToDraining(err) - return jrStateUnknown, nil, meta + if encDatumRow == nil { + break } - - return jrReadingInput, nil, meta - } - if row == nil { - break + rowSize = int64(encDatumRow.Size()) + if jr.curBatchSizeBytes > 0 && jr.curBatchSizeBytes+rowSize > jr.batchSizeBytes { + // Adding this row to the current batch will make the batch + // exceed jr.batchSizeBytes. Additionally, the batch is not + // empty, so we'll store this row as "pending" and will include + // it into the next batch. + // + // The batch being non-empty is important because in case it was + // empty and we decided to not include this (first) row into it, + // then we'd be stalled - we'd generate no spans, so we'd not + // perform the lookup of anything. + jr.pendingRow = encDatumRow + break + } + } else { + encDatumRow = jr.pendingRow + jr.pendingRow = nil + rowSize = int64(encDatumRow.Size()) } - jr.curBatchSizeBytes += int64(row.Size()) + jr.curBatchSizeBytes += rowSize if jr.groupingState != nil { // Lookup Join. - if err := jr.processContinuationValForRow(row); err != nil { + if err := jr.processContinuationValForRow(encDatumRow); err != nil { jr.MoveToDraining(err) return jrStateUnknown, nil, jr.DrainHelper() } @@ -728,12 +808,11 @@ func (jr *joinReader) readInput() ( // // We need to subtract the EncDatumRowOverhead because that is already // tracked in jr.accountedFor.scratchInputRows. - rowSize := int64(row.Size() - rowenc.EncDatumRowOverhead) - if err := jr.memAcc.Grow(jr.Ctx, rowSize); err != nil { + if err := jr.memAcc.Grow(jr.Ctx, rowSize-int64(rowenc.EncDatumRowOverhead)); err != nil { jr.MoveToDraining(err) return jrStateUnknown, nil, jr.DrainHelper() } - jr.scratchInputRows = append(jr.scratchInputRows, jr.rowAlloc.CopyRow(row)) + jr.scratchInputRows = append(jr.scratchInputRows, jr.rowAlloc.CopyRow(encDatumRow)) } if err := jr.performMemoryAccounting(); err != nil { @@ -804,24 +883,35 @@ func (jr *joinReader) readInput() ( } log.VEventf(jr.Ctx, 1, "scanning %d spans", len(spans)) - var bytesLimit rowinfra.BytesLimit - if !jr.shouldLimitBatches { - bytesLimit = rowinfra.NoBytesLimit - } else { - bytesLimit = jr.lookupBatchBytesLimit - if jr.lookupBatchBytesLimit == 0 { - bytesLimit = rowinfra.DefaultBatchBytesLimit - } - } // Note that the fetcher takes ownership of the spans slice - it will modify // it and perform the memory accounting. We don't care about the // modification here, but we want to be conscious about the memory // accounting - we don't double count for any memory of spans because the // joinReaderStrategy doesn't account for any memory used by the spans. - if err := jr.fetcher.StartScan( - jr.Ctx, jr.FlowCtx.Txn, spans, bytesLimit, rowinfra.NoRowLimit, - jr.FlowCtx.TraceKV, jr.EvalCtx.TestingKnobs.ForceProductionBatchSizes, - ); err != nil { + if jr.usesStreamer { + var kvBatchFetcher *row.TxnKVStreamer + kvBatchFetcher, err = row.NewTxnKVStreamer(jr.Ctx, jr.streamerInfo.Streamer, spans, jr.keyLocking) + if err != nil { + jr.MoveToDraining(err) + return jrStateUnknown, nil, jr.DrainHelper() + } + err = jr.fetcher.StartScanFrom(jr.Ctx, kvBatchFetcher, jr.FlowCtx.TraceKV) + } else { + var bytesLimit rowinfra.BytesLimit + if !jr.shouldLimitBatches { + bytesLimit = rowinfra.NoBytesLimit + } else { + bytesLimit = jr.lookupBatchBytesLimit + if jr.lookupBatchBytesLimit == 0 { + bytesLimit = rowinfra.DefaultBatchBytesLimit + } + } + err = jr.fetcher.StartScan( + jr.Ctx, jr.FlowCtx.Txn, spans, bytesLimit, rowinfra.NoRowLimit, + jr.FlowCtx.TraceKV, jr.EvalCtx.TestingKnobs.ForceProductionBatchSizes, + ) + } + if err != nil { jr.MoveToDraining(err) return jrStateUnknown, nil, jr.DrainHelper() } @@ -936,6 +1026,21 @@ func (jr *joinReader) performMemoryAccounting() error { func (jr *joinReader) Start(ctx context.Context) { ctx = jr.StartInternal(ctx, joinReaderProcName) jr.input.Start(ctx) + if jr.usesStreamer { + jr.streamerInfo.Streamer = kvstreamer.NewStreamer( + jr.FlowCtx.Cfg.DistSender, + jr.FlowCtx.Stopper(), + jr.FlowCtx.Txn, + jr.FlowCtx.EvalCtx.Settings, + jr.lockWaitPolicy, + jr.streamerInfo.budgetLimit, + &jr.streamerInfo.budgetAcc, + ) + jr.streamerInfo.Streamer.Init( + kvstreamer.OutOfOrder, + kvstreamer.Hints{UniqueRequests: true}, + ) + } jr.runningState = jrReadingInput } @@ -950,6 +1055,16 @@ func (jr *joinReader) close() { if jr.fetcher != nil { jr.fetcher.Close(jr.Ctx) } + if jr.usesStreamer { + // We have to cleanup the streamer after closing the fetcher because + // the latter might release some memory tracked by the budget of the + // streamer. + if jr.streamerInfo.Streamer != nil { + jr.streamerInfo.Streamer.Close() + } + jr.streamerInfo.budgetAcc.Close(jr.Ctx) + jr.streamerInfo.unlimitedMemMonitor.Stop(jr.Ctx) + } jr.strategy.close(jr.Ctx) jr.memAcc.Close(jr.Ctx) if jr.limitedMemMonitor != nil { diff --git a/pkg/sql/rowexec/joinreader_test.go b/pkg/sql/rowexec/joinreader_test.go index fdafdb722511..60a386a3f762 100644 --- a/pkg/sql/rowexec/joinreader_test.go +++ b/pkg/sql/rowexec/joinreader_test.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" @@ -1314,6 +1315,8 @@ func TestIndexJoiner(t *testing.T) { c.outputTypes, c.expected, txn, + s.Stopper(), + s.DistSenderI().(*kvcoord.DistSender), ) }) } diff --git a/pkg/sql/rowexec/processor_utils_test.go b/pkg/sql/rowexec/processor_utils_test.go index 836ba394a37d..f3445d0c5d89 100644 --- a/pkg/sql/rowexec/processor_utils_test.go +++ b/pkg/sql/rowexec/processor_utils_test.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils/distsqlutils" @@ -74,7 +75,7 @@ func toEncDatum(datumType *types.T, v interface{}) rowenc.EncDatum { } }() // Initialize both EncDatum.Datum, and EncDatum.encoded. - encoded, err := rowenc.EncodeTableKey(nil, d, encoding.Ascending) + encoded, err := keyside.Encode(nil, d, encoding.Ascending) if err != nil { panic(err) } diff --git a/pkg/sql/rowexec/processors.go b/pkg/sql/rowexec/processors.go index 0c045c6dbe14..b649964e85e8 100644 --- a/pkg/sql/rowexec/processors.go +++ b/pkg/sql/rowexec/processors.go @@ -133,8 +133,8 @@ func NewProcessor( if err := checkNumInOut(inputs, outputs, 0, 1); err != nil { return nil, err } - if core.TableReader.IsCheck { - return newScrubTableReader(flowCtx, processorID, core.TableReader, post, outputs[0]) + if core.TableReader.DeprecatedIsCheck { + return nil, errors.New("scrubbing TableReader no longer implemented") } return newTableReader(flowCtx, processorID, core.TableReader, post, outputs[0]) } @@ -148,7 +148,7 @@ func NewProcessor( if err := checkNumInOut(inputs, outputs, 1, 1); err != nil { return nil, err } - if len(core.JoinReader.LookupColumns) == 0 && core.JoinReader.LookupExpr.Empty() { + if core.JoinReader.IsIndexJoin() { return newJoinReader( flowCtx, processorID, core.JoinReader, inputs[0], post, outputs[0], indexJoinReaderType) } diff --git a/pkg/sql/rowexec/project_set.go b/pkg/sql/rowexec/project_set.go index fde924f2d301..403494b3afa7 100644 --- a/pkg/sql/rowexec/project_set.go +++ b/pkg/sql/rowexec/project_set.go @@ -104,7 +104,7 @@ func newProjectSetProcessor( } // Initialize exprHelpers. - semaCtx := ps.FlowCtx.TypeResolverFactory.NewSemaContext(ps.EvalCtx.Txn) + semaCtx := ps.FlowCtx.NewSemaContext(ps.EvalCtx.Txn) for i, expr := range ps.spec.Exprs { var helper execinfrapb.ExprHelper err := helper.Init(expr, ps.input.OutputTypes(), semaCtx, ps.EvalCtx) diff --git a/pkg/sql/rowexec/project_set_test.go b/pkg/sql/rowexec/project_set_test.go index ae695d067a11..bb02898a91f2 100644 --- a/pkg/sql/rowexec/project_set_test.go +++ b/pkg/sql/rowexec/project_set_test.go @@ -116,6 +116,8 @@ func TestProjectSet(t *testing.T) { append(c.inputTypes, c.spec.GeneratedColumns...), /* outputTypes */ c.expected, nil, + nil, + nil, ) }) } diff --git a/pkg/sql/rowexec/rowfetcher.go b/pkg/sql/rowexec/rowfetcher.go index 07175c4ef7f1..7b006ee57ef6 100644 --- a/pkg/sql/rowexec/rowfetcher.go +++ b/pkg/sql/rowexec/rowfetcher.go @@ -19,10 +19,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" - "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/mon" @@ -56,7 +56,6 @@ type rowFetcher interface { PartialKey(int) (roachpb.Key, error) Reset() GetBytesRead() int64 - NextRowWithErrors(context.Context) (rowenc.EncDatumRow, error) // Close releases any resources held by this fetcher. Close(ctx context.Context) } @@ -70,10 +69,8 @@ func initRowFetcher( colIdxMap catalog.TableColMap, reverseScan bool, valNeededForCol util.FastIntSet, - isCheck bool, mon *mon.BytesMonitor, - alloc *rowenc.DatumAlloc, - scanVisibility execinfrapb.ScanVisibility, + alloc *tree.DatumAlloc, lockStrength descpb.ScanLockingStrength, lockWaitPolicy descpb.ScanLockingWaitPolicy, withSystemColumns bool, @@ -92,7 +89,7 @@ func initRowFetcher( IsSecondaryIndex: isSecondaryIndex, ValNeededForCol: valNeededForCol, } - tableArgs.InitCols(desc, scanVisibility, withSystemColumns, virtualColumn) + tableArgs.InitCols(desc, withSystemColumns, virtualColumn) if err := fetcher.Init( flowCtx.EvalCtx.Context, @@ -101,7 +98,6 @@ func initRowFetcher( lockStrength, lockWaitPolicy, flowCtx.EvalCtx.SessionData().LockTimeout, - isCheck, alloc, mon, tableArgs, diff --git a/pkg/sql/rowexec/sample_aggregator.go b/pkg/sql/rowexec/sample_aggregator.go index aa33ff64fca3..7ddf07e0871b 100644 --- a/pkg/sql/rowexec/sample_aggregator.go +++ b/pkg/sql/rowexec/sample_aggregator.go @@ -237,7 +237,7 @@ func (s *sampleAggregator) mainLoop(ctx context.Context) (earlyExit bool, err er var rowsProcessed uint64 progressUpdates := util.Every(SampleAggregatorProgressInterval) - var da rowenc.DatumAlloc + var da tree.DatumAlloc for { row, meta := s.input.Next() if meta != nil { @@ -339,7 +339,7 @@ func (s *sampleAggregator) mainLoop(ctx context.Context) (earlyExit bool, err er } func (s *sampleAggregator) processSketchRow( - sketch *sketchInfo, row rowenc.EncDatumRow, da *rowenc.DatumAlloc, + sketch *sketchInfo, row rowenc.EncDatumRow, da *tree.DatumAlloc, ) error { var tmpSketch hyperloglog.Sketch diff --git a/pkg/sql/rowexec/sample_aggregator_test.go b/pkg/sql/rowexec/sample_aggregator_test.go index e4c840dd5ed4..0f48dfd72d59 100644 --- a/pkg/sql/rowexec/sample_aggregator_test.go +++ b/pkg/sql/rowexec/sample_aggregator_test.go @@ -255,7 +255,7 @@ func runSampleAggregator( if err != nil { t.Fatal(err) } - var d rowenc.DatumAlloc + var d tree.DatumAlloc if err := ed.EnsureDecoded(histEncType, &d); err != nil { t.Fatal(err) } diff --git a/pkg/sql/rowexec/sampler.go b/pkg/sql/rowexec/sampler.go index 79e15201543d..365349fbeae9 100644 --- a/pkg/sql/rowexec/sampler.go +++ b/pkg/sql/rowexec/sampler.go @@ -239,7 +239,7 @@ func (s *samplerProcessor) Run(ctx context.Context) { func (s *samplerProcessor) mainLoop(ctx context.Context) (earlyExit bool, err error) { rng, _ := randutil.NewPseudoRand() - var da rowenc.DatumAlloc + var da tree.DatumAlloc var buf []byte rowCount := 0 lastWakeupTime := timeutil.Now() @@ -493,7 +493,7 @@ func (s *samplerProcessor) DoesNotUseTxn() bool { // addRow adds a row to the sketch and updates row counts. func (s *sketchInfo) addRow( - ctx context.Context, row rowenc.EncDatumRow, typs []*types.T, buf *[]byte, da *rowenc.DatumAlloc, + ctx context.Context, row rowenc.EncDatumRow, typs []*types.T, buf *[]byte, da *tree.DatumAlloc, ) error { var err error s.numRows++ diff --git a/pkg/sql/rowexec/scrub_tablereader.go b/pkg/sql/rowexec/scrub_tablereader.go deleted file mode 100644 index b76563d96766..000000000000 --- a/pkg/sql/rowexec/scrub_tablereader.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2018 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package rowexec - -import ( - "bytes" - "context" - - "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/execinfra" - "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" - "github.com/cockroachdb/cockroach/pkg/sql/row" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" - "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" - "github.com/cockroachdb/cockroach/pkg/sql/scrub" - "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" - "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/errors" -) - -// ScrubTypes is the schema for TableReaders that are doing a SCRUB -// check. This schema is what TableReader output streams are overrided -// to for check. The column types correspond to: -// - Error type. -// - Primary key as a string, if it was obtainable. -// - JSON of all decoded column values. -// -// TODO(joey): If we want a way find the key for the error, we will need -// additional data such as the key bytes and the table descriptor ID. -// Repair won't be possible without this. -var ScrubTypes = []*types.T{ - types.String, - types.String, - types.Jsonb, -} - -type scrubTableReader struct { - tableReader - tableDesc catalog.TableDescriptor - // fetcherResultToColIdx maps Fetcher results to the column index in - // the TableDescriptor. This is only initialized and used during scrub - // physical checks. - fetcherResultToColIdx []int - // indexIdx refers to the index being scanned. This is only used - // during scrub physical checks. - indexIdx int -} - -var _ execinfra.Processor = &scrubTableReader{} -var _ execinfra.RowSource = &scrubTableReader{} - -var scrubTableReaderProcName = "scrub" - -// newScrubTableReader creates a scrubTableReader. -func newScrubTableReader( - flowCtx *execinfra.FlowCtx, - processorID int32, - spec *execinfrapb.TableReaderSpec, - post *execinfrapb.PostProcessSpec, - output execinfra.RowReceiver, -) (*scrubTableReader, error) { - // NB: we hit this with a zero NodeID (but !ok) with multi-tenancy. - if nodeID, ok := flowCtx.NodeID.OptionalNodeID(); nodeID == 0 && ok { - return nil, errors.Errorf("attempting to create a tableReader with uninitialized NodeID") - } - tr := &scrubTableReader{ - indexIdx: int(spec.IndexIdx), - } - - tr.tableDesc = spec.BuildTableDescriptor() - tr.limitHint = rowinfra.RowLimit(execinfra.LimitHint(spec.LimitHint, post)) - - if err := tr.Init( - tr, - post, - ScrubTypes, - flowCtx, - processorID, - output, - nil, /* memMonitor */ - execinfra.ProcStateOpts{ - // We don't pass tr.input as an inputToDrain; tr.input is just an adapter - // on top of a Fetcher; draining doesn't apply to it. Moreover, Andrei - // doesn't trust that the adapter will do the right thing on a Next() call - // after it had previously returned an error. - InputsToDrain: nil, - TrailingMetaCallback: tr.generateTrailingMeta, - }, - ); err != nil { - return nil, err - } - - var neededColumns util.FastIntSet - // If we are doing a scrub physical check, NeededColumns needs to be - // changed to be all columns available in the index we are scanning. - // This is because the emitted schema is ScrubTypes so NeededColumns - // does not correctly represent the data being scanned. - if spec.IndexIdx == 0 { - neededColumns.AddRange(0, len(tr.tableDesc.PublicColumns())-1) - for i := range tr.tableDesc.PublicColumns() { - tr.fetcherResultToColIdx = append(tr.fetcherResultToColIdx, i) - } - } else { - colIdxMap := catalog.ColumnIDToOrdinalMap(tr.tableDesc.PublicColumns()) - idx := tr.tableDesc.PublicNonPrimaryIndexes()[spec.IndexIdx-1] - colIDs := idx.CollectKeyColumnIDs() - colIDs.UnionWith(idx.CollectSecondaryStoredColumnIDs()) - colIDs.UnionWith(idx.CollectKeySuffixColumnIDs()) - colIDs.ForEach(func(colID descpb.ColumnID) { - neededColumns.Add(colIdxMap.GetDefault(colID)) - }) - } - - var fetcher row.Fetcher - if _, _, err := initRowFetcher( - flowCtx, &fetcher, tr.tableDesc, int(spec.IndexIdx), catalog.ColumnIDToOrdinalMap(tr.tableDesc.PublicColumns()), - spec.Reverse, neededColumns, true /* isCheck */, flowCtx.EvalCtx.Mon, &tr.alloc, - execinfra.ScanVisibilityPublic, spec.LockingStrength, spec.LockingWaitPolicy, - false /* withSystemColumns */, nil, /* virtualColumn */ - ); err != nil { - return nil, err - } - tr.fetcher = &fetcher - - tr.Spans = spec.Spans - tr.MakeSpansCopy() - - return tr, nil -} - -// generateScrubErrorRow will create an EncDatumRow describing a -// physical check error encountered when scanning table data. The schema -// of the EncDatumRow is the ScrubTypes constant. -func (tr *scrubTableReader) generateScrubErrorRow( - row rowenc.EncDatumRow, scrubErr *scrub.Error, -) (rowenc.EncDatumRow, error) { - details := make(map[string]interface{}) - index := tr.tableDesc.ActiveIndexes()[tr.indexIdx] - // Collect all the row values into JSON - rowDetails := make(map[string]interface{}) - for i, colIdx := range tr.fetcherResultToColIdx { - col := tr.tableDesc.PublicColumns()[colIdx] - // TODO(joey): We should maybe try to get the underlying type. - rowDetails[col.GetName()] = row[i].String(col.GetType()) - } - details["row_data"] = rowDetails - details["index_name"] = index.GetName() - details["error_message"] = scrub.UnwrapScrubError(error(scrubErr)).Error() - - detailsJSON, err := tree.MakeDJSON(details) - if err != nil { - return nil, err - } - - primaryKeyValues := tr.prettyPrimaryKeyValues(row, tr.tableDesc.TableDesc()) - return rowenc.EncDatumRow{ - rowenc.DatumToEncDatum( - ScrubTypes[0], - tree.NewDString(scrubErr.Code), - ), - rowenc.DatumToEncDatum( - ScrubTypes[1], - tree.NewDString(primaryKeyValues), - ), - rowenc.DatumToEncDatum( - ScrubTypes[2], - detailsJSON, - ), - }, nil -} - -func (tr *scrubTableReader) prettyPrimaryKeyValues( - row rowenc.EncDatumRow, table *descpb.TableDescriptor, -) string { - var colIdxMap catalog.TableColMap - for i := range table.Columns { - id := table.Columns[i].ID - colIdxMap.Set(id, i) - } - var colIDToRowIdxMap catalog.TableColMap - for rowIdx, colIdx := range tr.fetcherResultToColIdx { - colIDToRowIdxMap.Set(tr.tableDesc.PublicColumns()[colIdx].GetID(), rowIdx) - } - var primaryKeyValues bytes.Buffer - primaryKeyValues.WriteByte('(') - for i, id := range table.PrimaryIndex.KeyColumnIDs { - if i > 0 { - primaryKeyValues.WriteByte(',') - } - primaryKeyValues.WriteString( - row[colIDToRowIdxMap.GetDefault(id)].String(table.Columns[colIdxMap.GetDefault(id)].Type)) - } - primaryKeyValues.WriteByte(')') - return primaryKeyValues.String() -} - -// Start is part of the RowSource interface. -func (tr *scrubTableReader) Start(ctx context.Context) { - if tr.FlowCtx.Txn == nil { - tr.MoveToDraining(errors.Errorf("scrubTableReader outside of txn")) - } - - ctx = tr.StartInternal(ctx, scrubTableReaderProcName) - - log.VEventf(ctx, 1, "starting") - - if err := tr.fetcher.StartScan( - ctx, tr.FlowCtx.Txn, tr.Spans, rowinfra.DefaultBatchBytesLimit, tr.limitHint, - tr.FlowCtx.TraceKV, tr.EvalCtx.TestingKnobs.ForceProductionBatchSizes, - ); err != nil { - tr.MoveToDraining(err) - } -} - -// Next is part of the RowSource interface. -func (tr *scrubTableReader) Next() (rowenc.EncDatumRow, *execinfrapb.ProducerMetadata) { - for tr.State == execinfra.StateRunning { - var row rowenc.EncDatumRow - var err error - // If we are running a scrub physical check, we use a specialized - // procedure that runs additional checks while fetching the row - // data. - row, err = tr.fetcher.NextRowWithErrors(tr.Ctx) - // There are four cases that can happen after NextRowWithErrors: - // 1) We encounter a ScrubError. We do not propagate the error up, - // but instead generate and emit a row for the final results. - // 2) No errors were found. We simply continue scanning the data - // and discard the row values, as they are not needed for any - // results. - // 3) A non-scrub error was encountered. This was not considered a - // physical data error, and so we propagate this to the user - // immediately. - // 4) There was no error or row data. This signals that there is - // no more data to scan. - // - // NB: Cases 3 and 4 are handled further below, in the standard - // table scanning code path. - var v *scrub.Error - if errors.As(err, &v) { - row, err = tr.generateScrubErrorRow(row, v) - } else if err == nil && row != nil { - continue - } - if row == nil || err != nil { - tr.MoveToDraining(scrub.UnwrapScrubError(err)) - break - } - - if outRow := tr.ProcessRowHelper(row); outRow != nil { - return outRow, nil - } - } - return nil, tr.DrainHelper() -} diff --git a/pkg/sql/rowexec/sorter.go b/pkg/sql/rowexec/sorter.go index 17394e5fa0ad..5124cbd7d562 100644 --- a/pkg/sql/rowexec/sorter.go +++ b/pkg/sql/rowexec/sorter.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/rowcontainer" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/optional" "github.com/cockroachdb/errors" @@ -386,7 +387,7 @@ func (s *sortTopKProcessor) ConsumerClosed() { type sortChunksProcessor struct { sorterBase - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc // sortChunksProcessor accumulates rows that are equal on a prefix, until it // encounters a row that is greater. It stores that greater row in nextChunkRow diff --git a/pkg/sql/rowexec/stream_group_accumulator.go b/pkg/sql/rowexec/stream_group_accumulator.go index 37e48584a501..ad23030fae43 100644 --- a/pkg/sql/rowexec/stream_group_accumulator.go +++ b/pkg/sql/rowexec/stream_group_accumulator.go @@ -35,7 +35,7 @@ type streamGroupAccumulator struct { // curGroup maintains the rows accumulated in the current group. curGroup []rowenc.EncDatumRow - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc // leftoverRow is the first row of the next group. It's saved in the // accumulator after the current group is returned, so the accumulator can diff --git a/pkg/sql/rowexec/stream_merger.go b/pkg/sql/rowexec/stream_merger.go index 9ff0b8319b70..80b2527d072a 100644 --- a/pkg/sql/rowexec/stream_merger.go +++ b/pkg/sql/rowexec/stream_merger.go @@ -37,7 +37,7 @@ type streamMerger struct { // when we want NULL to be meaningful during equality, for example // during SCRUB secondary index checks. nullEquality bool - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc } func (sm *streamMerger) start(ctx context.Context) { @@ -111,7 +111,7 @@ func CompareEncDatumRowForMerge( lhs, rhs rowenc.EncDatumRow, leftOrdering, rightOrdering colinfo.ColumnOrdering, nullEquality bool, - da *rowenc.DatumAlloc, + da *tree.DatumAlloc, evalCtx *tree.EvalContext, ) (int, error) { if lhs == nil && rhs == nil { diff --git a/pkg/sql/rowexec/tablereader.go b/pkg/sql/rowexec/tablereader.go index 6b59d3ca879b..b139b384e3f5 100644 --- a/pkg/sql/rowexec/tablereader.go +++ b/pkg/sql/rowexec/tablereader.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/optional" "github.com/cockroachdb/errors" @@ -49,7 +50,7 @@ type tableReader struct { // fetcher wraps a row.Fetcher, allowing the tableReader to add a stat // collection layer. fetcher rowFetcher - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc scanStats execinfra.ScanStats @@ -103,12 +104,9 @@ func newTableReader( tr.batchBytesLimit = batchBytesLimit tr.maxTimestampAge = time.Duration(spec.MaxTimestampAgeNanos) - tableDesc := spec.BuildTableDescriptor() + tableDesc := flowCtx.TableDescriptor(&spec.Table) invertedColumn := tabledesc.FindInvertedColumn(tableDesc, spec.InvertedColumn) - cols := tableDesc.PublicColumns() - if spec.Visibility == execinfra.ScanVisibilityPublicAndNotPublic { - cols = tableDesc.DeletableColumns() - } + cols := tableDesc.DeletableColumns() columnIdxMap := catalog.ColumnIDToOrdinalMap(cols) resultTypes := catalog.ColumnTypesWithInvertedCol(cols, invertedColumn) @@ -152,10 +150,8 @@ func newTableReader( columnIdxMap, spec.Reverse, neededColumns, - spec.IsCheck, flowCtx.EvalCtx.Mon, &tr.alloc, - spec.Visibility, spec.LockingStrength, spec.LockingWaitPolicy, spec.HasSystemColumns, diff --git a/pkg/sql/rowexec/tablereader_test.go b/pkg/sql/rowexec/tablereader_test.go index 013dd1f16322..d368c69d8dfe 100644 --- a/pkg/sql/rowexec/tablereader_test.go +++ b/pkg/sql/rowexec/tablereader_test.go @@ -73,7 +73,7 @@ func TestTableReader(t *testing.T) { makeIndexSpan := func(start, end int) roachpb.Span { var span roachpb.Span - prefix := roachpb.Key(rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, td, td.PublicNonPrimaryIndexes()[0].GetID())) + prefix := roachpb.Key(rowenc.MakeIndexKeyPrefix(keys.SystemSQLCodec, td.GetID(), td.PublicNonPrimaryIndexes()[0].GetID())) span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) span.EndKey = append(span.EndKey, prefix...) span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) diff --git a/pkg/sql/rowexec/utils_test.go b/pkg/sql/rowexec/utils_test.go index 45a7a5034dae..fffa1d97d290 100644 --- a/pkg/sql/rowexec/utils_test.go +++ b/pkg/sql/rowexec/utils_test.go @@ -16,6 +16,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -25,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils/distsqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" + "github.com/cockroachdb/cockroach/pkg/util/stop" ) // runProcessorTest instantiates a processor with the provided spec, runs it @@ -38,6 +40,8 @@ func runProcessorTest( outputTypes []*types.T, expected rowenc.EncDatumRows, txn *kv.Txn, + stopper *stop.Stopper, + distSender *kvcoord.DistSender, ) { in := distsqlutils.NewRowBuffer(inputTypes, inputRows, distsqlutils.RowBufferArgs{}) out := &distsqlutils.RowBuffer{} @@ -46,7 +50,7 @@ func runProcessorTest( evalCtx := tree.MakeTestingEvalContext(st) defer evalCtx.Stop(context.Background()) flowCtx := execinfra.FlowCtx{ - Cfg: &execinfra.ServerConfig{Settings: st}, + Cfg: &execinfra.ServerConfig{Settings: st, Stopper: stopper, DistSender: distSender}, EvalCtx: &evalCtx, Txn: txn, } diff --git a/pkg/sql/rowexec/values_test.go b/pkg/sql/rowexec/values_test.go index e4c0055be3f9..9b5324224c36 100644 --- a/pkg/sql/rowexec/values_test.go +++ b/pkg/sql/rowexec/values_test.go @@ -72,7 +72,7 @@ func TestValuesProcessor(t *testing.T) { t.Fatalf("incorrect number of rows %d, expected %d", len(res), numRows) } - var a rowenc.DatumAlloc + var a tree.DatumAlloc for i := 0; i < numRows; i++ { if len(res[i]) != numCols { t.Fatalf("row %d incorrect length %d, expected %d", i, len(res[i]), numCols) diff --git a/pkg/sql/rowexec/windower.go b/pkg/sql/rowexec/windower.go index cd4b151a4095..ff62832c984c 100644 --- a/pkg/sql/rowexec/windower.go +++ b/pkg/sql/rowexec/windower.go @@ -64,7 +64,7 @@ type windower struct { inputDone bool inputTypes []*types.T outputTypes []*types.T - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc acc mon.BoundAccount diskMonitor *mon.BytesMonitor diff --git a/pkg/sql/rowexec/zigzagjoiner.go b/pkg/sql/rowexec/zigzagjoiner.go index 36762572b709..e2a63f681d7b 100644 --- a/pkg/sql/rowexec/zigzagjoiner.go +++ b/pkg/sql/rowexec/zigzagjoiner.go @@ -277,8 +277,10 @@ func newZigzagJoiner( post *execinfrapb.PostProcessSpec, output execinfra.RowReceiver, ) (*zigzagJoiner, error) { - // TODO(ajwerner): Utilize a cached copy of these tables. - tables := spec.BuildTableDescriptors() + tables := make([]catalog.TableDescriptor, len(spec.Tables)) + for i := range spec.Tables { + tables[i] = flowCtx.TableDescriptor(&spec.Tables[i]) + } if len(tables) != 2 { return nil, errors.AssertionFailedf("zigzag joins only of two tables (or indexes) are supported, %d requested", len(tables)) } @@ -383,7 +385,7 @@ type zigzagJoinerInfo struct { fetcher rowFetcher // rowsRead is the total number of rows that this fetcher read from disk. rowsRead int64 - alloc *rowenc.DatumAlloc + alloc *tree.DatumAlloc table catalog.TableDescriptor index catalog.Index indexTypes []*types.T @@ -427,18 +429,22 @@ func (z *zigzagJoiner) setupInfo( z.side = side info := z.infos[side] - info.alloc = &rowenc.DatumAlloc{} + info.alloc = &tree.DatumAlloc{} info.table = tables[side] info.eqColumns = spec.EqColumns[side].Columns indexOrdinal := spec.IndexOrdinals[side] info.index = info.table.ActiveIndexes()[indexOrdinal] - var columnIDs []descpb.ColumnID - columnIDs, info.indexDirs = catalog.FullIndexColumnIDs(info.index) - info.indexTypes = make([]*types.T, len(columnIDs)) + info.indexDirs = info.table.IndexFullColumnDirections(info.index) + columns := info.table.IndexFullColumns(info.index) + info.indexTypes = make([]*types.T, len(columns)) columnTypes := catalog.ColumnTypes(info.table.PublicColumns()) colIdxMap := catalog.ColumnIDToOrdinalMap(info.table.PublicColumns()) - for i, columnID := range columnIDs { + for i, col := range columns { + if col == nil { + continue + } + columnID := col.GetID() if info.index.GetType() == descpb.IndexDescriptor_INVERTED && columnID == info.index.InvertedColumnID() { // Inverted key columns have type Bytes. @@ -458,7 +464,7 @@ func (z *zigzagJoiner) setupInfo( // Add the fixed columns. for i := 0; i < len(info.fixedValues); i++ { - neededCols.Add(colIdxMap.GetDefault(columnIDs[i])) + neededCols.Add(colIdxMap.GetDefault(columns[i].GetID())) } // Add the equality columns. @@ -483,10 +489,8 @@ func (z *zigzagJoiner) setupInfo( catalog.ColumnIDToOrdinalMap(info.table.PublicColumns()), false, /* reverse */ neededCols, - false, /* check */ flowCtx.EvalCtx.Mon, info.alloc, - execinfra.ScanVisibilityPublic, // NB: zigzag joins are disabled when a row-level locking clause is // supplied, so there is no locking strength on *ZigzagJoinerSpec. descpb.ScanLockingStrength_FOR_NONE, @@ -504,7 +508,7 @@ func (z *zigzagJoiner) setupInfo( info.fetcher = &fetcher } - info.prefix = rowenc.MakeIndexKeyPrefix(flowCtx.Codec(), info.table, info.index.GetID()) + info.prefix = rowenc.MakeIndexKeyPrefix(flowCtx.Codec(), info.table.GetID(), info.index.GetID()) span, err := z.produceSpanFromBaseRow() if err != nil { @@ -718,7 +722,7 @@ func (z *zigzagJoiner) matchBase(curRow rowenc.EncDatumRow, side int) (bool, err } // Compare the equality columns of the baseRow to that of the curRow. - da := &rowenc.DatumAlloc{} + da := &tree.DatumAlloc{} cmp, err := prevEqDatums.Compare(eqColTypes, da, ordering, z.FlowCtx.EvalCtx, curEqDatums) if err != nil { return false, err @@ -866,7 +870,7 @@ func (z *zigzagJoiner) nextRow(ctx context.Context, txn *kv.Txn) (rowenc.EncDatu if err != nil { return nil, err } - da := &rowenc.DatumAlloc{} + da := &tree.DatumAlloc{} cmp, err := prevEqCols.Compare(eqColTypes, da, ordering, z.FlowCtx.EvalCtx, currentEqCols) if err != nil { return nil, err diff --git a/pkg/sql/rowflow/BUILD.bazel b/pkg/sql/rowflow/BUILD.bazel index bda53523c7f8..2ffd09dc319b 100644 --- a/pkg/sql/rowflow/BUILD.bazel +++ b/pkg/sql/rowflow/BUILD.bazel @@ -38,6 +38,7 @@ go_test( "routers_test.go", ], embed = [":rowflow"], + tags = ["no-remote"], deps = [ "//pkg/base", "//pkg/keys", diff --git a/pkg/sql/rowflow/input_sync.go b/pkg/sql/rowflow/input_sync.go index 7cfda237794a..26861eb72f25 100644 --- a/pkg/sql/rowflow/input_sync.go +++ b/pkg/sql/rowflow/input_sync.go @@ -114,7 +114,7 @@ type serialOrderedSynchronizer struct { // err can be set by the Less function (used by the heap implementation) err error - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc // metadata is accumulated from all the sources and is passed on as soon as // possible. diff --git a/pkg/sql/rowflow/routers.go b/pkg/sql/rowflow/routers.go index 558298bff647..0ee4bbd52d92 100644 --- a/pkg/sql/rowflow/routers.go +++ b/pkg/sql/rowflow/routers.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/flowinfra" "github.com/cockroachdb/cockroach/pkg/sql/rowcontainer" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" @@ -516,7 +517,7 @@ type hashRouter struct { hashCols []uint32 buffer []byte - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc } // rangeRouter is a router that assumes the keyColumn'th column of incoming @@ -527,7 +528,7 @@ type hashRouter struct { type rangeRouter struct { routerBase - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc // b is a temp storage location used during encoding b []byte encodings []execinfrapb.OutputRouterSpec_RangeRouterSpec_ColumnEncoding diff --git a/pkg/sql/rowflow/routers_test.go b/pkg/sql/rowflow/routers_test.go index def7cef05070..8d0e399ae67b 100644 --- a/pkg/sql/rowflow/routers_test.go +++ b/pkg/sql/rowflow/routers_test.go @@ -80,7 +80,7 @@ func TestRouters(t *testing.T) { const numRows = 200 rng, _ := randutil.NewTestRand() - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} ctx := context.Background() st := cluster.MakeTestingClusterSettings() evalCtx := tree.NewTestingEvalContext(st) @@ -241,7 +241,7 @@ func TestRouters(t *testing.T) { case execinfrapb.OutputRouterSpec_BY_RANGE: // Verify each row is in the correct output stream. enc := testRangeRouterSpec.Encodings[0] - var alloc rowenc.DatumAlloc + var alloc tree.DatumAlloc for bIdx := range rows { for _, row := range rows[bIdx] { data, err := row[enc.Column].Encode(types[enc.Column], &alloc, enc.Encoding, nil) @@ -790,7 +790,7 @@ func TestRouterDiskSpill(t *testing.T) { }, DiskMonitor: diskMonitor, } - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} extraMemMonitor := execinfra.NewTestMemMonitor(ctx, st) defer extraMemMonitor.Stop(ctx) diff --git a/pkg/sql/rowflow/row_based_flow.go b/pkg/sql/rowflow/row_based_flow.go index 281d70864e07..958b65161e45 100644 --- a/pkg/sql/rowflow/row_based_flow.go +++ b/pkg/sql/rowflow/row_based_flow.go @@ -285,7 +285,7 @@ func (f *rowBasedFlow) setupInputSyncs( // than processors that scan over tables get their inputs from here, so // this is a convenient place to do the hydration. Processors that scan // over tables will have their hydration performed in ProcessorBase.Init. - resolver := f.TypeResolverFactory.NewTypeResolver(f.EvalCtx.Txn) + resolver := f.NewTypeResolver(f.EvalCtx.Txn) if err := resolver.HydrateTypeSlice(ctx, is.ColumnTypes); err != nil { return nil, err } diff --git a/pkg/sql/scan.go b/pkg/sql/scan.go index 9c4be63007ca..f5827d41a132 100644 --- a/pkg/sql/scan.go +++ b/pkg/sql/scan.go @@ -12,17 +12,13 @@ package sql import ( "context" - "fmt" "sync" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/execinfra" - "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" - "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util" @@ -48,11 +44,6 @@ type scanNode struct { desc catalog.TableDescriptor index catalog.Index - // Set if an index was explicitly specified. - specifiedIndex catalog.Index - // Set if the NO_INDEX_JOIN hint was given. - noIndexJoin bool - colCfg scanColumnsConfig // The table columns, possibly including ones currently in schema changes. // TODO(radu/knz): currently we always load the entire row from KV and only @@ -114,32 +105,16 @@ type scanNode struct { // scanColumnsConfig controls the "schema" of a scan node. type scanColumnsConfig struct { // wantedColumns contains all the columns are part of the scan node schema, - // in this order (with the caveat that the addUnwantedAsHidden flag below - // can add more columns). Non public columns can only be added if allowed - // by the visibility flag below. + // in this order. Must not be nil (even if empty). wantedColumns []tree.ColumnID - // wantedColumnsOrdinals contains the ordinals of all columns in - // wantedColumns. Note that if addUnwantedAsHidden flag is set, the hidden - // columns are not included here. - wantedColumnsOrdinals []uint32 - - // invertedColumn maps the column ID of the inverted column (if it exists) - // to the column type actually stored in the index. For example, the - // inverted column of an inverted index has type bytes, even though the - // column descriptor matches the source column (Geometry, Geography, JSON or - // Array). - invertedColumn *struct { - colID tree.ColumnID - typ *types.T - } - - // When set, the columns that are not in the wantedColumns list are added to - // the list of columns as hidden columns. - addUnwantedAsHidden bool - // If visibility is set to execinfra.ScanVisibilityPublicAndNotPublic, then - // mutation columns can be added to the list of columns. - visibility execinfrapb.ScanVisibility + // invertedColumnID/invertedColumnType are used to map the column ID of the + // inverted column (if it exists) to the column type actually stored in the + // index. For example, the inverted column of an inverted index has type + // bytes, even though the column descriptor matches the source column + // (Geometry, Geography, JSON or Array). + invertedColumnID tree.ColumnID + invertedColumnType *types.T } func (cfg scanColumnsConfig) assertValidReqOrdering(reqOrdering exec.OutputOrdering) error { @@ -198,102 +173,38 @@ func (n *scanNode) disableBatchLimit() { // Initializes a scanNode with a table descriptor. func (n *scanNode) initTable( - ctx context.Context, - p *planner, - desc catalog.TableDescriptor, - indexFlags *tree.IndexFlags, - colCfg scanColumnsConfig, + ctx context.Context, p *planner, desc catalog.TableDescriptor, colCfg scanColumnsConfig, ) error { n.desc = desc - if !p.skipSelectPrivilegeChecks { - if err := p.CheckPrivilege(ctx, n.desc, privilege.SELECT); err != nil { - return err - } - } - - if indexFlags != nil { - if err := n.lookupSpecifiedIndex(indexFlags); err != nil { - return err - } - } - // Check if any system columns are requested, as they need special handling. n.containsSystemColumns = scanContainsSystemColumns(&colCfg) - n.noIndexJoin = (indexFlags != nil && indexFlags.NoIndexJoin) return n.initDescDefaults(colCfg) } -func (n *scanNode) lookupSpecifiedIndex(indexFlags *tree.IndexFlags) error { - if indexFlags.Index != "" { - // Search index by name. - foundIndex, _ := n.desc.FindIndexWithName(string(indexFlags.Index)) - if foundIndex == nil || !foundIndex.Public() { - return errors.Errorf("index %q not found", tree.ErrString(&indexFlags.Index)) - } - n.specifiedIndex = foundIndex - } else if indexFlags.IndexID != 0 { - // Search index by ID. - foundIndex, _ := n.desc.FindIndexWithID(descpb.IndexID(indexFlags.IndexID)) - if foundIndex == nil || !foundIndex.Public() { - return errors.Errorf("index [%d] not found", indexFlags.IndexID) - } - n.specifiedIndex = foundIndex - } - return nil -} - // initColsForScan initializes cols according to desc and colCfg. func initColsForScan( desc catalog.TableDescriptor, colCfg scanColumnsConfig, ) (cols []catalog.Column, err error) { if colCfg.wantedColumns == nil { - return nil, errors.AssertionFailedf("unexpectedly wantedColumns is nil") + return nil, errors.AssertionFailedf("wantedColumns is nil") } - cols = make([]catalog.Column, 0, len(desc.DeletableColumns())) - for _, wc := range colCfg.wantedColumns { - id := descpb.ColumnID(wc) - col, err := desc.FindColumnWithID(id) + cols = make([]catalog.Column, len(colCfg.wantedColumns)) + for i, colID := range colCfg.wantedColumns { + col, err := desc.FindColumnWithID(colID) if err != nil { return cols, err } - if !col.IsSystemColumn() { - if colCfg.visibility != execinfra.ScanVisibilityPublic { - col = desc.ReadableColumns()[col.Ordinal()] - } else if !col.Public() { - return cols, fmt.Errorf("column-id \"%d\" does not exist", id) - } - } // If this is an inverted column, create a new descriptor with the // correct type. - if vc := colCfg.invertedColumn; vc != nil && vc.colID == wc && !vc.typ.Identical(col.GetType()) { + if colCfg.invertedColumnID == colID && !colCfg.invertedColumnType.Identical(col.GetType()) { col = col.DeepCopy() - col.ColumnDesc().Type = vc.typ - } - cols = append(cols, col) - } - - if colCfg.addUnwantedAsHidden { - for _, c := range desc.PublicColumns() { - found := false - for _, wc := range colCfg.wantedColumns { - if descpb.ColumnID(wc) == c.GetID() { - found = true - break - } - } - if !found { - // NB: we could amortize this allocation using a second slice, - // but addUnwantedAsHidden is only used by scrub, so doing so - // doesn't seem worth it. - col := c.DeepCopy() - col.ColumnDesc().Hidden = true - cols = append(cols, col) - } + col.ColumnDesc().Type = colCfg.invertedColumnType } + cols[i] = col } return cols, nil diff --git a/pkg/sql/scatter_test.go b/pkg/sql/scatter_test.go index fab00c2394ce..d082c76a4adb 100644 --- a/pkg/sql/scatter_test.go +++ b/pkg/sql/scatter_test.go @@ -21,11 +21,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/randgen" + "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/errors" ) func TestScatterRandomizeLeases(t *testing.T) { @@ -125,6 +127,21 @@ func TestScatterResponse(t *testing.T) { tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") r := sqlutils.MakeSQLRunner(sqlDB) + + // Range split decisions happen asynchronously and in this test we check for + // the actual split boundaries. Wait until the table itself is split off + // into its own range. + testutils.SucceedsSoon(t, func() error { + row := r.QueryRow(t, `SELECT count(*) FROM crdb_internal.ranges_no_leases WHERE table_id = $1`, + tableDesc.GetID()) + var nRanges int + row.Scan(&nRanges) + if nRanges != 1 { + return errors.Newf("expected to find single range for table, found %d", nRanges) + } + return nil + }) + r.Exec(t, "ALTER TABLE test.t SPLIT AT (SELECT i*10 FROM generate_series(1, 99) AS g(i))") rows := r.Query(t, "ALTER TABLE test.t SCATTER") diff --git a/pkg/sql/schema_change_plan_node.go b/pkg/sql/schema_change_plan_node.go index c8dfedf2832d..aa62308f2d06 100644 --- a/pkg/sql/schema_change_plan_node.go +++ b/pkg/sql/schema_change_plan_node.go @@ -28,13 +28,25 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scrun" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/retry" + "github.com/cockroachdb/redact" ) +// FormatAstAsRedactableString implements scbuild.AstFormatter +func (p *planner) FormatAstAsRedactableString( + statement tree.Statement, annotations *tree.Annotations, +) redact.RedactableString { + return formatStmtKeyAsRedactableString(p.getVirtualTabler(), + statement, + annotations) +} + // SchemaChange provides the planNode for the new schema changer. func (p *planner) SchemaChange(ctx context.Context, stmt tree.Statement) (planNode, bool, error) { + // TODO(ajwerner): Call featureflag.CheckEnabled appropriately. mode := p.extendedEvalCtx.SchemaChangerState.mode // When new schema changer is on we will not support it for explicit @@ -53,11 +65,12 @@ func (p *planner) SchemaChange(ctx context.Context, stmt tree.Statement) (planNo p.Descriptors(), p, p, + p, p.SessionData(), p.ExecCfg().Settings, scs.stmts, ) - outputNodes, err := scbuild.Build(ctx, deps, scs.state, stmt) + state, err := scbuild.Build(ctx, deps, scs.state, stmt) if scerrors.HasNotImplemented(err) && mode != sessiondatapb.UseNewSchemaChangerUnsafeAlways { return nil, false, nil @@ -70,9 +83,7 @@ func (p *planner) SchemaChange(ctx context.Context, stmt tree.Statement) (planNo } return nil, false, err } - return &schemaChangePlanNode{ - plannedState: outputNodes, - }, true, nil + return &schemaChangePlanNode{plannedState: state}, true, nil } // WaitForDescriptorSchemaChanges polls the specified descriptor (in separate @@ -125,17 +136,17 @@ func (p *planner) WaitForDescriptorSchemaChanges( // schemaChangePlanNode is the planNode utilized by the new schema changer to // perform all schema changes, unified in the new schema changer. type schemaChangePlanNode struct { - // plannedState contains the set of states produced by the builder combining + // plannedState contains the state produced by the builder combining // the nodes that existed preceding the current statement with the output of // the built current statement. - plannedState scpb.State + plannedState scpb.CurrentState } func (s *schemaChangePlanNode) startExec(params runParams) error { p := params.p scs := p.ExtendedEvalContext().SchemaChangerState runDeps := newSchemaChangerTxnRunDependencies( - p.User(), p.ExecCfg(), p.Txn(), p.Descriptors(), p.EvalContext(), scs.jobID, scs.stmts, + p.SessionData(), p.User(), p.ExecCfg(), p.Txn(), p.Descriptors(), p.EvalContext(), scs.jobID, scs.stmts, ) after, jobID, err := scrun.RunStatementPhase( params.ctx, p.ExecCfg().DeclarativeSchemaChangerTestingKnobs, runDeps, s.plannedState, @@ -149,6 +160,7 @@ func (s *schemaChangePlanNode) startExec(params runParams) error { } func newSchemaChangerTxnRunDependencies( + sessionData *sessiondata.SessionData, user security.SQLUsername, execCfg *ExecutorConfig, txn *kv.Txn, @@ -159,6 +171,7 @@ func newSchemaChangerTxnRunDependencies( ) scexec.Dependencies { return scdeps.NewExecutorDependencies( execCfg.Codec, + sessionData, txn, user, descriptors, @@ -171,6 +184,7 @@ func newSchemaChangerTxnRunDependencies( scdeps.NewNoopPeriodicProgressFlusher(), execCfg.IndexValidator, scdeps.NewPartitioner(execCfg.Settings, evalContext), + execCfg.CommentUpdaterFactory, NewSchemaChangerEventLogger(txn, execCfg, 1), schemaChangerJobID, stmts, diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index 1bc829a48ba5..47f6314d9609 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -31,6 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" @@ -1214,10 +1215,10 @@ func (sc *SchemaChanger) done(ctx context.Context) error { return err } switch localityConfigToSwapTo.Locality.(type) { - case *descpb.TableDescriptor_LocalityConfig_RegionalByTable_, - *descpb.TableDescriptor_LocalityConfig_Global_: + case *catpb.LocalityConfig_RegionalByTable_, + *catpb.LocalityConfig_Global_: scTable.PartitionAllBy = false - case *descpb.TableDescriptor_LocalityConfig_RegionalByRow_: + case *catpb.LocalityConfig_RegionalByRow_: scTable.PartitionAllBy = true default: return errors.AssertionFailedf( @@ -1886,7 +1887,7 @@ type SchemaChangerTestingKnobs struct { // RunBeforeConstraintValidation is called just before starting the checks validation, // after setting the job status to validating. - RunBeforeConstraintValidation func() error + RunBeforeConstraintValidation func(constraints []catalog.ConstraintToUpdate) error // RunBeforeMutationReversal runs at the beginning of maybeReverseMutations. RunBeforeMutationReversal func(jobID jobspb.JobID) error @@ -2412,9 +2413,9 @@ func (sc *SchemaChanger) applyZoneConfigChangeForMutation( ) } switch lcSwap.NewLocalityConfig.Locality.(type) { - case *descpb.TableDescriptor_LocalityConfig_Global_, - *descpb.TableDescriptor_LocalityConfig_RegionalByTable_: - case *descpb.TableDescriptor_LocalityConfig_RegionalByRow_: + case *catpb.LocalityConfig_Global_, + *catpb.LocalityConfig_RegionalByTable_: + case *catpb.LocalityConfig_RegionalByRow_: // Apply new zone configurations for all newly partitioned indexes. newIndexIDs := make([]descpb.IndexID, 0, pkSwap.NumNewIndexes()) _ = pkSwap.ForEachNewIndexIDs(func(id descpb.IndexID) error { diff --git a/pkg/sql/schema_changer_state.go b/pkg/sql/schema_changer_state.go index 8f35330bbce9..078f586465ae 100644 --- a/pkg/sql/schema_changer_state.go +++ b/pkg/sql/schema_changer_state.go @@ -19,7 +19,7 @@ import ( // SchemaChangerState is state associated with the new schema changer. type SchemaChangerState struct { mode sessiondatapb.NewSchemaChangerMode - state scpb.State + state scpb.CurrentState // jobID contains the ID of the schema changer job, if it is to be created. jobID jobspb.JobID // stmts contains the SQL statements involved in the schema change. This is diff --git a/pkg/sql/schema_changer_test.go b/pkg/sql/schema_changer_test.go index 09b6bb0a4d7d..16eba544a6e7 100644 --- a/pkg/sql/schema_changer_test.go +++ b/pkg/sql/schema_changer_test.go @@ -5914,7 +5914,7 @@ func TestSchemaChangeJobRunningStatusValidation(t *testing.T) { var runBeforeConstraintValidation func() error params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{ - RunBeforeConstraintValidation: func() error { + RunBeforeConstraintValidation: func(constraints []catalog.ConstraintToUpdate) error { return runBeforeConstraintValidation() }, }, @@ -5967,7 +5967,7 @@ func TestFKReferencesAddedOnlyOnceOnRetry(t *testing.T) { errorReturned := false params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{ - RunBeforeConstraintValidation: func() error { + RunBeforeConstraintValidation: func(constraints []catalog.ConstraintToUpdate) error { return runBeforeConstraintValidation() }, }, @@ -7479,3 +7479,43 @@ func TestJobsWithoutMutationsAreCancelable(t *testing.T) { ).Scan(&id) require.Equal(t, scJobID, id) } + +func TestShardColumnConstraintSkipValidation(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + ctx := context.Background() + + constraintsToValidate := make(chan []catalog.ConstraintToUpdate, 1) + params, _ := tests.CreateTestServerParams() + params.Knobs = base.TestingKnobs{ + SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{ + RunBeforeConstraintValidation: func(constraints []catalog.ConstraintToUpdate) error { + constraintsToValidate <- constraints + return nil + }, + }, + } + + s, sqlDB, _ := serverutils.StartServer(t, params) + defer s.Stopper().Stop(ctx) + tdb := sqlutils.MakeSQLRunner(sqlDB) + + tdb.Exec(t, ` +CREATE DATABASE t; +CREATE TABLE t.test(a INT PRIMARY KEY, b INT NOT NULL); +INSERT INTO t.test VALUES (1, 2); +`, + ) + + // Make sure non-shard column constraint is validated. + tdb.Exec(t, `ALTER TABLE t.test ADD CONSTRAINT check_b_positive CHECK (b > 0);`) + require.Len(t, <-constraintsToValidate, 1) + + // Make sure shard column constraint is not validated. + tdb.Exec(t, ` +SET experimental_enable_hash_sharded_indexes = ON; +CREATE INDEX ON t.test (b) USING HASH WITH BUCKET_COUNT = 8; +`, + ) + require.Len(t, constraintsToValidate, 0) +} diff --git a/pkg/sql/schemachanger/end_to_end_test.go b/pkg/sql/schemachanger/end_to_end_test.go index a9431c6250b3..9bcf77200722 100644 --- a/pkg/sql/schemachanger/end_to_end_test.go +++ b/pkg/sql/schemachanger/end_to_end_test.go @@ -133,7 +133,7 @@ func waitForSchemaChangesToComplete(t *testing.T, tdb *sqlutils.SQLRunner) { func execStatementWithTestDeps( ctx context.Context, t *testing.T, deps *sctestdeps.TestState, stmt parser.Statement, ) { - state, err := scbuild.Build(ctx, deps, scpb.State{}, stmt.AST) + state, err := scbuild.Build(ctx, deps, scpb.CurrentState{}, stmt.AST) require.NoError(t, err, "error in builder") var jobID jobspb.JobID @@ -160,7 +160,7 @@ func execStatementWithTestDeps( progress := job.Progress.(jobspb.NewSchemaChangeProgress) const rollback = false err = scrun.RunSchemaChangesInJob( - ctx, deps.TestingKnobs(), deps.ClusterSettings(), deps, jobID, job.DescriptorIDs, details, progress, rollback, + ctx, deps.TestingKnobs(), deps.ClusterSettings(), deps, jobID, details, progress, rollback, ) require.NoError(t, err, "error in mock schema change job execution") deps.LogSideEffectf("# end %s", deps.Phase()) diff --git a/pkg/sql/schemachanger/scbuild/BUILD.bazel b/pkg/sql/schemachanger/scbuild/BUILD.bazel index b020c3f02c28..8f0f20b18793 100644 --- a/pkg/sql/schemachanger/scbuild/BUILD.bazel +++ b/pkg/sql/schemachanger/scbuild/BUILD.bazel @@ -3,14 +3,15 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "scbuild", srcs = [ + "ast_annotator.go", "build.go", "builder_state.go", "descriptor_reader.go", "event_log_state.go", "name_resolver.go", - "node_enqueuer_and_checker.go", "privilege_checker.go", "table_element_id_generator.go", + "target_enqueuer_and_checker.go", "tree_context_builder.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scbuild", @@ -20,6 +21,7 @@ go_library( "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", "//pkg/sql/faketreeeval", + "//pkg/sql/parser", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/schemachanger/scbuild/internal/scbuildstmt", diff --git a/pkg/sql/schemachanger/scbuild/ast_annotator.go b/pkg/sql/schemachanger/scbuild/ast_annotator.go new file mode 100644 index 000000000000..8ee40739a2d1 --- /dev/null +++ b/pkg/sql/schemachanger/scbuild/ast_annotator.go @@ -0,0 +1,88 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package scbuild + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/parser" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scbuild/internal/scbuildstmt" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/errors" +) + +var _ scbuildstmt.TreeAnnotator = (*astAnnotator)(nil) + +// astAnnotator creates a copy of the AST that can be annotated or modified +// safely without impacting the original statement. +type astAnnotator struct { + statement tree.Statement + annotation tree.Annotations + nonExistentNames map[*tree.TableName]struct{} +} + +func newAstAnnotator(original tree.Statement) (*astAnnotator, error) { + // Clone the original tree by re-parsing the input back into an AST. + statement, err := parser.ParseOne(original.String()) + if err != nil { + return nil, err + } + return &astAnnotator{ + nonExistentNames: map[*tree.TableName]struct{}{}, + statement: statement.AST, + annotation: tree.MakeAnnotations(statement.NumAnnotations), + }, nil +} + +// GetStatement returns the cloned copy of the AST that is stored inside +// the annotator. +func (ann *astAnnotator) GetStatement() tree.Statement { + return ann.statement +} + +// GetAnnotations implements scbuildstmt.TreeAnnotator. +func (ann *astAnnotator) GetAnnotations() *tree.Annotations { + // Sanity: Validate the annotations before returning them + return &ann.annotation +} + +// MarkNameAsNonExistent implements scbuildstmt.TreeAnnotator. +func (ann *astAnnotator) MarkNameAsNonExistent(name *tree.TableName) { + ann.nonExistentNames[name] = struct{}{} +} + +// SetUnresolvedNameAnnotation implements scbuildstmt.TreeAnnotator. +func (ann *astAnnotator) SetUnresolvedNameAnnotation( + unresolvedName *tree.UnresolvedObjectName, annotation interface{}, +) { + unresolvedName.SetAnnotation(&ann.annotation, annotation) +} + +// ValidateAnnotations validates if expected modifications have been applied +// on the AST. After the build phase all names should be fully resolved either +// by directly modifying the AST or through annotations. +func (ann *astAnnotator) ValidateAnnotations() { + // Sanity: Goes through the entire AST and confirms that and table names that + // appear inside it are fully resolved, meaning that both the database and + // schema are known. + f := tree.NewFmtCtx( + tree.FmtAlwaysQualifyTableNames|tree.FmtMarkRedactionNode, + tree.FmtAnnotations(&ann.annotation), + tree.FmtReformatTableNames(func(ctx *tree.FmtCtx, name *tree.TableName) { + // Name was not found during lookup inside the builder. + if _, ok := ann.nonExistentNames[name]; ok { + return + } + if name.CatalogName == "" || name.SchemaName == "" { + panic(errors.AssertionFailedf("unresolved name inside annotated AST "+ + "(%v)", name.String())) + } + })) + f.FormatNode(ann.statement) +} diff --git a/pkg/sql/schemachanger/scbuild/build.go b/pkg/sql/schemachanger/scbuild/build.go index f1e238a348e3..9b2cb44c6796 100644 --- a/pkg/sql/schemachanger/scbuild/build.go +++ b/pkg/sql/schemachanger/scbuild/build.go @@ -24,15 +24,24 @@ import ( // The function takes an AST for a DDL statement and constructs targets // which represent schema changes to be performed. func Build( - ctx context.Context, dependencies Dependencies, initial scpb.State, n tree.Statement, -) (_ scpb.State, err error) { + ctx context.Context, dependencies Dependencies, initial scpb.CurrentState, n tree.Statement, +) (_ scpb.CurrentState, err error) { + initial = initial.DeepCopy() bs := newBuilderState(initial) els := newEventLogState(dependencies, initial, n) + // TODO(fqazi): The optimizer can end up already modifying the statement above + // to fully resolve names. We need to take this into account for CTAS/CREATE + // VIEW statements. + an, err := newAstAnnotator(n) + if err != nil { + return scpb.CurrentState{}, err + } b := buildCtx{ Context: ctx, Dependencies: dependencies, BuilderState: bs, EventLogState: els, + TreeAnnotator: an, } defer func() { if recErr := recover(); recErr != nil { @@ -43,12 +52,21 @@ func Build( } } }() - scbuildstmt.Process(b, n) - return scpb.State{ - Nodes: bs.output, + scbuildstmt.Process(b, an.GetStatement()) + an.ValidateAnnotations() + els.statements[len(els.statements)-1].RedactedStatement = + string(els.astFormatter.FormatAstAsRedactableString(an.GetStatement(), &an.annotation)) + ts := scpb.TargetState{ + Targets: make([]scpb.Target, len(bs.output)), Statements: els.statements, Authorization: els.authorization, - }, nil + } + current := make([]scpb.Status, len(bs.output)) + for i, e := range bs.output { + ts.Targets[i] = scpb.MakeTarget(e.targetStatus, e.element, &e.metadata) + current[i] = e.currentStatus + } + return scpb.CurrentState{TargetState: ts, Current: current}, nil } // Export dependency interfaces. @@ -64,24 +82,43 @@ type ( // AuthorizationAccessor contains all privilege checking operations required // by the builder. AuthorizationAccessor = scbuildstmt.AuthorizationAccessor + + // AstFormatter contains operations for formatting out AST nodes into + // SQL statement text. + AstFormatter = scbuildstmt.AstFormatter ) +type elementState struct { + element scpb.Element + targetStatus, currentStatus scpb.Status + metadata scpb.TargetMetadata +} + // builderState is the backing struct for scbuildstmt.BuilderState interface. type builderState struct { // output contains the schema change targets that have been planned so far. - output []*scpb.Node + output []elementState } // newBuilderState constructs a builderState. -func newBuilderState(initial scpb.State) *builderState { - return &builderState{output: initial.Clone().Nodes} +func newBuilderState(initial scpb.CurrentState) *builderState { + bs := builderState{output: make([]elementState, len(initial.Current))} + for i, t := range initial.TargetState.Targets { + bs.output[i] = elementState{ + element: t.Element(), + targetStatus: t.TargetStatus, + currentStatus: initial.Current[i], + metadata: t.Metadata, + } + } + return &bs } // eventLogState is the backing struct for scbuildstmt.EventLogState interface. type eventLogState struct { // statements contains the statements in the schema changer state. - statements []*scpb.Statement + statements []scpb.Statement // authorization contains application and user names for the current session. authorization scpb.Authorization @@ -94,20 +131,24 @@ type eventLogState struct { // for any new elements added. This is used for detailed // tracking during cascade operations. sourceElementID *scpb.SourceElementID + + // astFormatter used to format AST elements as redactable strings. + astFormatter AstFormatter } // newEventLogState constructs an eventLogState. func newEventLogState( - d scbuildstmt.Dependencies, initial scpb.State, n tree.Statement, + d scbuildstmt.Dependencies, initial scpb.CurrentState, n tree.Statement, ) *eventLogState { - stmts := initial.Clone().Statements + stmts := initial.Statements els := eventLogState{ - statements: append(stmts, &scpb.Statement{ - Statement: n.String(), + statements: append(stmts, scpb.Statement{ + Statement: n.String(), + StatementTag: n.StatementTag(), }), authorization: scpb.Authorization{ AppName: d.SessionData().ApplicationName, - Username: d.SessionData().SessionUser().Normalized(), + UserName: d.SessionData().SessionUser().Normalized(), }, sourceElementID: new(scpb.SourceElementID), statementMetaData: scpb.TargetMetadata{ @@ -115,6 +156,7 @@ func newEventLogState( SubWorkID: 1, SourceElementID: 1, }, + astFormatter: d.AstFormatter(), } *els.sourceElementID = 1 return &els @@ -128,6 +170,7 @@ type buildCtx struct { scbuildstmt.Dependencies scbuildstmt.BuilderState scbuildstmt.EventLogState + scbuildstmt.TreeAnnotator } var _ scbuildstmt.BuildCtx = buildCtx{} @@ -138,6 +181,7 @@ func (b buildCtx) WithNewSourceElementID() scbuildstmt.BuildCtx { Context: b.Context, Dependencies: b.Dependencies, BuilderState: b.BuilderState, + TreeAnnotator: b.TreeAnnotator, EventLogState: b.EventLogStateWithNewSourceElementID(), } } diff --git a/pkg/sql/schemachanger/scbuild/builder_state.go b/pkg/sql/schemachanger/scbuild/builder_state.go index 92e1b63cbc7f..c2a4afeee7d4 100644 --- a/pkg/sql/schemachanger/scbuild/builder_state.go +++ b/pkg/sql/schemachanger/scbuild/builder_state.go @@ -19,24 +19,28 @@ import ( var _ scbuildstmt.BuilderState = (*builderState)(nil) -// AddNode implements the scbuildstmt.BuilderState interface. -func (b *builderState) AddNode( - status, targetStatus scpb.Status, elem scpb.Element, meta scpb.TargetMetadata, +// AddElementStatus implements the scbuildstmt.BuilderState interface. +func (b *builderState) AddElementStatus( + currentStatus, targetStatus scpb.Status, elem scpb.Element, meta scpb.TargetMetadata, ) { - for _, node := range b.output { - if screl.EqualElements(node.Element(), elem) { + for _, e := range b.output { + if screl.EqualElements(e.element, elem) { panic(errors.AssertionFailedf("element already present in builder state: %s", elem)) } } - b.output = append(b.output, &scpb.Node{ - Target: scpb.NewTarget(targetStatus, elem, &meta), - Status: status, + b.output = append(b.output, elementState{ + element: elem, + targetStatus: targetStatus, + currentStatus: currentStatus, + metadata: meta, }) } -// ForEachNode implements the scbuildstmt.BuilderState interface. -func (b *builderState) ForEachNode(fn func(status, targetStatus scpb.Status, elem scpb.Element)) { - for _, node := range b.output { - fn(node.Status, node.TargetStatus, node.Element()) +// ForEachElementStatus implements the scpb.ElementStatusIterator interface. +func (b *builderState) ForEachElementStatus( + fn func(status, targetStatus scpb.Status, elem scpb.Element), +) { + for _, es := range b.output { + fn(es.currentStatus, es.targetStatus, es.element) } } diff --git a/pkg/sql/schemachanger/scbuild/builder_test.go b/pkg/sql/schemachanger/scbuild/builder_test.go index d34416e09375..332604f45ece 100644 --- a/pkg/sql/schemachanger/scbuild/builder_test.go +++ b/pkg/sql/schemachanger/scbuild/builder_test.go @@ -131,16 +131,16 @@ func run( return "" case "build": - var outputNodes scpb.State + var output scpb.CurrentState withDependencies(t, s, tdb, func(deps scbuild.Dependencies) { stmts, err := parser.Parse(d.Input) require.NoError(t, err) for i := range stmts { - outputNodes, err = scbuild.Build(ctx, deps, outputNodes, stmts[i].AST) + output, err = scbuild.Build(ctx, deps, output, stmts[i].AST) require.NoError(t, err) } }) - return marshalNodes(t, outputNodes) + return marshalState(t, output) case "unimplemented": withDependencies(t, s, tdb, func(deps scbuild.Dependencies) { @@ -152,7 +152,7 @@ func run( alter, ok := stmt.AST.(*tree.AlterTable) require.Truef(t, ok, "not an ALTER TABLE statement: %s", stmt.SQL) - _, err = scbuild.Build(ctx, deps, scpb.State{}, alter) + _, err = scbuild.Build(ctx, deps, scpb.CurrentState{}, alter) require.Truef(t, scerrors.HasNotImplemented(err), "expected unimplemented, got %v", err) }) return "" @@ -176,21 +176,25 @@ func indentText(input string, tab string) string { return result.String() } -// marshalNodes marshals a scpb.State to YAML. -func marshalNodes(t *testing.T, nodes scpb.State) string { +// marshalState marshals a scpb.CurrentState to YAML. +func marshalState(t *testing.T, state scpb.CurrentState) string { var sortedEntries []string - for _, node := range nodes.Nodes { + for i, status := range state.Current { + node := screl.Node{ + Target: &state.Targets[i], + CurrentStatus: status, + } yaml, err := sctestutils.ProtoToYAML(node.Target.Element()) require.NoError(t, err) entry := strings.Builder{} entry.WriteString("- ") - entry.WriteString(screl.NodeString(node)) + entry.WriteString(screl.NodeString(&node)) entry.WriteString("\n") entry.WriteString(indentText("details:\n", " ")) entry.WriteString(indentText(yaml, " ")) sortedEntries = append(sortedEntries, entry.String()) } - // Sort the output buffer of nodes for determinism. + // Sort the output buffer of state for determinism. result := strings.Builder{} sort.Strings(sortedEntries) for _, entry := range sortedEntries { diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/BUILD.bazel b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/BUILD.bazel index e010854bc4cf..197898538ae8 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/BUILD.bazel +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/BUILD.bazel @@ -24,6 +24,7 @@ go_library( "//pkg/keys", "//pkg/settings/cluster", "//pkg/sql/catalog", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/schemaexpr", "//pkg/sql/catalog/seqexpr", @@ -44,6 +45,7 @@ go_library( "//pkg/sql/types", "//pkg/util/errorutil/unimplemented", "@com_github_cockroachdb_errors//:errors", + "@com_github_cockroachdb_redact//:redact", "@com_github_lib_pq//oid", ], ) diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table.go index 0faa731511e6..396cd10e78d5 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table.go @@ -50,17 +50,16 @@ func init() { func AlterTable(b BuildCtx, n *tree.AlterTable) { // Hoist the constraints to separate clauses because other code assumes that // that is how the commands will look. - // - // TODO(ajwerner): Clone the AST here because this mutates it in place and - // that is bad. n.HoistAddColumnConstraints() - tn := n.Table.ToTableName() - _, tbl := b.ResolveTable(n.Table, ResolveParams{ + prefix, tbl := b.ResolveTable(n.Table, ResolveParams{ IsExistenceOptional: n.IfExists, RequiredPrivilege: privilege.CREATE, }) + tn.ObjectNamePrefix = prefix.NamePrefix() + b.SetUnresolvedNameAnnotation(n.Table, &tn) if tbl == nil { + b.MarkNameAsNonExistent(&tn) return } if catalog.HasConcurrentSchemaChanges(tbl) { diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/common_relation.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/common_relation.go index a07a0ec8ce53..aaf042f30682 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/common_relation.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/common_relation.go @@ -36,9 +36,9 @@ import ( func enqueue(b BuildCtx, targetStatus scpb.Status, elem scpb.Element) { switch targetStatus { case scpb.Status_PUBLIC: - b.AddNode(scpb.Status_ABSENT, targetStatus, elem, b.TargetMetadata()) + b.AddElementStatus(scpb.Status_ABSENT, targetStatus, elem, b.TargetMetadata()) case scpb.Status_ABSENT: - b.AddNode(scpb.Status_PUBLIC, targetStatus, elem, b.TargetMetadata()) + b.AddElementStatus(scpb.Status_PUBLIC, targetStatus, elem, b.TargetMetadata()) } } @@ -232,6 +232,15 @@ func decomposeDescToElements(b BuildCtx, tbl catalog.Descriptor, targetStatus sc Privileges: user.Privileges, }) } + + // When dropping always generate an element for any descriptor related + // comments. + if targetStatus == scpb.Status_ABSENT { + enqueue(b, targetStatus, &scpb.TableComment{ + TableID: tbl.GetID(), + Comment: scpb.PlaceHolderComment, + }) + } } func decomposeColumnIntoElements( @@ -300,6 +309,13 @@ func decomposeColumnIntoElements( }) } } + if targetStatus == scpb.Status_ABSENT { + enqueue(b, targetStatus, &scpb.ColumnComment{ + TableID: tbl.GetID(), + ColumnID: column.GetID(), + Comment: scpb.PlaceHolderComment, + }) + } } // decomposeViewDescToElements converts view specific @@ -385,11 +401,34 @@ func decomposeTableDescToElements( primaryIndex, indexName := primaryIndexElemFromDescriptor(index.IndexDesc(), tbl) enqueue(b, targetStatus, primaryIndex) enqueue(b, targetStatus, indexName) + if targetStatus == scpb.Status_ABSENT { + enqueue(b, targetStatus, &scpb.ConstraintComment{ + ConstraintType: scpb.ConstraintType_PrimaryKey, + ConstraintName: index.GetName(), + TableID: tbl.GetID(), + Comment: scpb.PlaceHolderComment, + }) + } } else { secondaryIndex, indexName := secondaryIndexElemFromDescriptor(index.IndexDesc(), tbl) enqueue(b, targetStatus, secondaryIndex) enqueue(b, targetStatus, indexName) + if targetStatus == scpb.Status_ABSENT && secondaryIndex.Unique { + enqueue(b, targetStatus, &scpb.ConstraintComment{ + ConstraintType: scpb.ConstraintType_PrimaryKey, + ConstraintName: index.GetName(), + TableID: tbl.GetID(), + Comment: scpb.PlaceHolderComment, + }) + } + } + if targetStatus == scpb.Status_ABSENT { + enqueue(b, targetStatus, &scpb.IndexComment{ + TableID: tbl.GetID(), + IndexID: index.GetID(), + Comment: scpb.PlaceHolderComment, + }) } } case tbl.IsSequence(): @@ -444,6 +483,14 @@ func decomposeTableDescToElements( IndexID: 0, // Invalid ID ColumnIDs: constraint.ColumnIDs, }) + if targetStatus == scpb.Status_ABSENT { + enqueue(b, targetStatus, &scpb.ConstraintComment{ + ConstraintType: scpb.ConstraintType_UniqueWithoutIndex, + ConstraintName: constraint.Name, + TableID: tbl.GetID(), + Comment: scpb.PlaceHolderComment, + }) + } } // Add any check constraints next. for idx, constraint := range tbl.AllActiveAndInactiveChecks() { @@ -470,7 +517,27 @@ func decomposeTableDescToElements( ColumnIDs: constraint.ColumnIDs, Expr: constraint.Expr, }) + if targetStatus == scpb.Status_ABSENT { + enqueue(b, targetStatus, &scpb.ConstraintComment{ + ConstraintType: scpb.ConstraintType_Check, + ConstraintName: constraint.Name, + TableID: tbl.GetID(), + Comment: scpb.PlaceHolderComment, + }) + } } + // Clean up comments foreign key constraints. + for _, fk := range tbl.AllActiveAndInactiveForeignKeys() { + if targetStatus == scpb.Status_ABSENT { + enqueue(b, targetStatus, &scpb.ConstraintComment{ + ConstraintType: scpb.ConstraintType_FK, + ConstraintName: fk.Name, + TableID: tbl.GetID(), + Comment: scpb.PlaceHolderComment, + }) + } + } + // Add locality information. enqueue(b, targetStatus, &scpb.Locality{ DescriptorID: tbl.GetID(), diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/common_util.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/common_util.go index efabed5b3cf4..c97f34e7fd35 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/common_util.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/common_util.go @@ -115,7 +115,7 @@ func checkIfDescOrElementAreDropped(b BuildCtx, id descpb.ID) bool { // statement for example DROP TABLE A, B. Otherwise, the statement phase // should have marked the descriptor already. matches := false - b.ForEachNode(func(_, targetStatus scpb.Status, elem scpb.Element) { + b.ForEachElementStatus(func(_, targetStatus scpb.Status, elem scpb.Element) { if matches { return } diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/create_index.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/create_index.go index 27e74d4e5054..3aafa52a4846 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/create_index.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/create_index.go @@ -12,6 +12,7 @@ package scbuildstmt import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" @@ -28,7 +29,7 @@ import ( // CreateIndex implements CREATE INDEX. func CreateIndex(b BuildCtx, n *tree.CreateIndex) { - _, rel, idx := b.ResolveIndex(n.Table.ToUnresolvedObjectName(), n.Name, ResolveParams{ + prefix, rel, idx := b.ResolveIndex(n.Table.ToUnresolvedObjectName(), n.Name, ResolveParams{ IsExistenceOptional: true, RequiredPrivilege: privilege.CREATE, }) @@ -36,6 +37,9 @@ func CreateIndex(b BuildCtx, n *tree.CreateIndex) { // Table must exist. panic(sqlerrors.NewUndefinedRelationError(n.Table.ToUnresolvedObjectName())) } + // Mutate the AST to have the fully resolved name from above, which will be + // used for both event logging and errors. + n.Table.ObjectNamePrefix = prefix.NamePrefix() if idx != nil { if n.IfNotExists { return @@ -99,7 +103,7 @@ func CreateIndex(b BuildCtx, n *tree.CreateIndex) { // Setup the column ID. for _, columnNode := range n.Columns { // If the column was just added the new schema changer is not supported. - if b.HasNode(func(status, _ scpb.Status, elem scpb.Element) bool { + if b.HasElementStatus(func(status, _ scpb.Status, elem scpb.Element) bool { if status != scpb.Status_ABSENT { return false } @@ -201,7 +205,7 @@ func CreateIndex(b BuildCtx, n *tree.CreateIndex) { if err != nil { panic(err) } - secondaryIndex.ShardedDescriptor = &descpb.ShardedDescriptor{ + secondaryIndex.ShardedDescriptor = &catpb.ShardedDescriptor{ IsSharded: true, Name: shardColName, ShardBuckets: buckets, diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/dependencies.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/dependencies.go index 04d059379f69..69094bbd1fd9 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/dependencies.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/dependencies.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" + "github.com/cockroachdb/redact" ) // BuildCtx wraps BuilderState and exposes various convenience methods for the @@ -33,12 +34,13 @@ type BuildCtx interface { Dependencies BuilderState EventLogState + TreeAnnotator TreeContextBuilder PrivilegeChecker DescriptorReader NameResolver - NodeEnqueuerAndChecker + TargetEnqueuerAndChecker TableElementIDGenerator // WithNewSourceElementID wraps BuilderStateWithNewSourceElementID in a @@ -64,6 +66,8 @@ type Dependencies interface { // Statements returns the statements behind this schema change. Statements() []string + + AstFormatter() AstFormatter } // CatalogReader should implement descriptor resolution, namespace lookups, and @@ -119,12 +123,10 @@ type AuthorizationAccessor interface { // its internal state to anything that ends up using it and only allowing // state changes via the provided methods. type BuilderState interface { + scpb.ElementStatusIterator - // AddNode adds a node into the NodeAccumulator. - AddNode(status, targetStatus scpb.Status, elem scpb.Element, meta scpb.TargetMetadata) - - // ForEachNode iterates over the accumulated notes in the NodeAccumulator. - ForEachNode(fn func(status, targetStatus scpb.Status, elem scpb.Element)) + // AddElementStatus adds an element into the BuilderState. + AddElementStatus(currentStatus, targetStatus scpb.Status, elem scpb.Element, meta scpb.TargetMetadata) } // EventLogState encapsulates the state of the metadata to decorate the eventlog @@ -225,9 +227,9 @@ type NameResolver interface { ) (catalog.ResolvedObjectPrefix, catalog.TableDescriptor, catalog.Index) } -// NodeEnqueuerAndChecker exposes convenient methods for enqueuing and checking -// nodes in the NodeAccumulator. -type NodeEnqueuerAndChecker interface { +// TargetEnqueuerAndChecker exposes convenient methods for enqueuing and checking +// nodes in the BuilderState. +type TargetEnqueuerAndChecker interface { // EnqueueAdd adds a node with a PUBLIC target status. // Panics if the element is already present. EnqueueAdd(elem scpb.Element) @@ -240,9 +242,9 @@ type NodeEnqueuerAndChecker interface { // panicking if the element is already present. EnqueueDropIfNotExists(elem scpb.Element) - // HasNode returns true iff the builder state has a node matching the provided - // filter function. - HasNode(filter func(status, targetStatus scpb.Status, elem scpb.Element) bool) bool + // HasElementStatus returns true iff the builder state has an element matching + // the provided filter function. + HasElementStatus(filter func(currentStatus, targetStatus scpb.Status, elem scpb.Element) bool) bool // HasTarget returns true iff the builder state has a node with an equal element // and the same target status, regardless of node status. @@ -268,3 +270,23 @@ type TableElementIDGenerator interface { // this table descriptor. NextIndexID(tbl catalog.TableDescriptor) descpb.IndexID } + +// AstFormatter provides interfaces for formatting AST nodes. +type AstFormatter interface { + // FormatAstAsRedactableString formats a tree.Statement into SQL with fully + // qualified names, where parts can be redacted. + FormatAstAsRedactableString(statement tree.Statement, annotations *tree.Annotations) redact.RedactableString +} + +// TreeAnnotator provides interfaces to be able to modify the AST safely, +// by providing a copy and support for adding annotations. +type TreeAnnotator interface { + + // SetUnresolvedNameAnnotation sets an annotation on an unresolved object name. + SetUnresolvedNameAnnotation(unresolvedName *tree.UnresolvedObjectName, ann interface{}) + + // MarkNameAsNonExistent indicates that a table name is non-existent + // in the AST, which will cause it to skip full namespace resolution + // validation. + MarkNameAsNonExistent(name *tree.TableName) +} diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_database.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_database.go index 5d8dd4e1970e..0e2cd1dd6607 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_database.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_database.go @@ -48,7 +48,7 @@ func DropDatabase(b BuildCtx, n *tree.DropDatabase) { // Block drops if cascade is not set. if n.DropBehavior == tree.DropRestrict && (nodeAdded || !schemaDroppedIDs.Empty()) { panic(pgerror.Newf(pgcode.DependentObjectsStillExist, - "database %q has a non-empty schema %q and CASCADE was not specified", db.GetName(), schema.GetName())) + "database %q is not empty and RESTRICT was specified", db.GetName())) } // If no schema exists to depend on, then depend on dropped IDs if !nodeAdded { @@ -76,4 +76,8 @@ func DropDatabase(b BuildCtx, n *tree.DropDatabase) { DatabaseID: db.GetID(), DependentObjects: dropIDs.Ordered(), }) + b.EnqueueDrop(&scpb.DatabaseComment{ + DatabaseID: db.GetID(), + Comment: scpb.PlaceHolderComment, + }) } diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_schema.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_schema.go index 0d605078e973..3c70341b4ff3 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_schema.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_schema.go @@ -42,11 +42,6 @@ func dropSchema( behavior tree.DropBehavior, ) (nodeAdded bool, dropIDs catalog.DescriptorIDSet) { descsThatNeedElements := catalog.DescriptorIDSet{} - // For non-user defined schemas, another check will be - // done each object as we go to drop them. - if sc.SchemaKind() == catalog.SchemaUserDefined { - b.MustOwn(sc) - } _, objectIDs := b.CatalogReader().ReadObjectNamesAndIDs(b, db, sc) for _, id := range objectIDs { // For dependency tracking we will still track that these elements were @@ -98,6 +93,10 @@ func dropSchema( DatabaseID: sc.GetParentID(), SchemaID: sc.GetID(), }) + b.EnqueueDrop(&scpb.SchemaComment{ + SchemaID: sc.GetID(), + Comment: scpb.PlaceHolderComment, + }) return true, dropIDs } panic(errors.AssertionFailedf("unexpected sc kind %q for sc %q (%d)", diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_sequence.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_sequence.go index 29533cb160ce..c014384c36d9 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_sequence.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_sequence.go @@ -23,14 +23,19 @@ import ( // DropSequence implements DROP SEQUENCE. func DropSequence(b BuildCtx, n *tree.DropSequence) { - for _, name := range n.Names { - _, seq := b.ResolveSequence(name.ToUnresolvedObjectName(), ResolveParams{ + for idx := range n.Names { + name := &n.Names[idx] + prefix, seq := b.ResolveSequence(name.ToUnresolvedObjectName(), ResolveParams{ IsExistenceOptional: n.IfExists, RequiredPrivilege: privilege.DROP, }) if seq == nil { + b.MarkNameAsNonExistent(name) continue } + // Mutate the AST to have the fully resolved name from above, which will be + // used for both event logging and errors. + name.ObjectNamePrefix = prefix.NamePrefix() dropSequence(b, seq, n.DropBehavior) b.IncrementSubWorkID() } diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_table.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_table.go index 8fe45c625ceb..f20aac72ec34 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_table.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_table.go @@ -28,14 +28,19 @@ func DropTable(b BuildCtx, n *tree.DropTable) { } // Find the table first. tables := make([]tblDropCtx, 0, len(n.Names)) - for _, name := range n.Names { - _, tbl := b.ResolveTable(name.ToUnresolvedObjectName(), ResolveParams{ + for idx := range n.Names { + name := &n.Names[idx] + prefix, tbl := b.ResolveTable(name.ToUnresolvedObjectName(), ResolveParams{ IsExistenceOptional: n.IfExists, RequiredPrivilege: privilege.DROP, }) if tbl == nil { + b.MarkNameAsNonExistent(name) continue } + // Mutate the AST to have the fully resolved name from above, which will be + // used for both event logging and errors. + name.ObjectNamePrefix = prefix.NamePrefix() // Only decompose the tables first into elements, next we will check for // dependent objects, in case they are all dropped *together*. newCtx := dropTableBasic(b, tbl) @@ -81,8 +86,8 @@ func dropTableDependents(b BuildCtx, tbl catalog.TableDescriptor, behavior tree. onErrPanic(err) return pgerror.Newf( - pgcode.DependentObjectsStillExist, "cannot drop table %q because view %q depends on it", - name, depViewName) + pgcode.DependentObjectsStillExist, "cannot drop relation %q because view %q depends on it", + name.Object(), depViewName.Object()) } dropView(c, dependentDesc, behavior) return nil @@ -119,14 +124,36 @@ func dropTableDependents(b BuildCtx, tbl catalog.TableDescriptor, behavior tree. ReferenceColumns: fk.OriginColumns, }) }) - // Detect any sequence ownerships and clean them up no cascade - // is required. + // Detect any sequence ownerships and clean them up if only the + // cascade option is specified, specifically if other objects rely + // on the sequence. + cleanSequenceOwnedBy := func(seq catalog.TableDescriptor) { + dropSequence(b, seq, tree.DropCascade) + if behavior == tree.DropCascade { + return + } + scpb.ForEachRelationDependedOnBy(c, func( + status scpb.Status, + targetStatus scpb.Status, + depBy *scpb.RelationDependedOnBy, + ) { + if depBy.TableID != seq.GetID() { + return + } + if depBy.DependedOnBy == tbl.GetID() { + return + } + panic(pgerror.Newf( + pgcode.DependentObjectsStillExist, + "cannot drop table a because other objects depend on it")) + }) + } scpb.ForEachSequenceOwnedBy(c, func(_, _ scpb.Status, sequenceOwnedBy *scpb.SequenceOwnedBy) { if sequenceOwnedBy.OwnerTableID != tbl.GetID() { return } - sequence := c.MustReadTable(sequenceOwnedBy.SequenceID) - dropSequence(b, sequence, tree.DropCascade) + seq := c.MustReadTable(sequenceOwnedBy.SequenceID) + cleanSequenceOwnedBy(seq) }) } } diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_type.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_type.go index 4c974798ff84..4512306f7c5e 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_type.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_type.go @@ -29,13 +29,17 @@ func DropType(b BuildCtx, n *tree.DropType) { panic(unimplemented.NewWithIssue(51480, "DROP TYPE CASCADE is not yet supported")) } for _, name := range n.Names { - _, typ := b.ResolveType(name, ResolveParams{ + prefix, typ := b.ResolveType(name, ResolveParams{ IsExistenceOptional: n.IfExists, RequiredPrivilege: privilege.DROP, }) if typ == nil { continue } + // Mutate the AST to have the fully resolved name from above, which will be + // used for both event logging and errors. + tn := tree.MakeTypeNameWithPrefix(prefix.NamePrefix(), typ.GetName()) + b.SetUnresolvedNameAnnotation(name, &tn) // If the descriptor is already being dropped, nothing to do. if checkIfDescOrElementAreDropped(b, typ.GetID()) { return diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_view.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_view.go index efb537eaac67..5a5ba81c7c25 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_view.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/drop_view.go @@ -28,14 +28,19 @@ func DropView(b BuildCtx, n *tree.DropView) { buildCtx BuildCtx } views := make([]viewDropCtx, 0, len(n.Names)) - for _, name := range n.Names { - _, view := b.ResolveView(name.ToUnresolvedObjectName(), ResolveParams{ + for idx := range n.Names { + name := &n.Names[idx] + prefix, view := b.ResolveView(name.ToUnresolvedObjectName(), ResolveParams{ IsExistenceOptional: n.IfExists, RequiredPrivilege: privilege.DROP, }) if view == nil { + b.MarkNameAsNonExistent(name) continue } + // Mutate the AST to have the fully resolved name from above, which will be + // used for both event logging and errors. + name.ObjectNamePrefix = prefix.NamePrefix() if view.MaterializedView() && !n.IsMaterialized { panic(errors.WithHint(pgerror.Newf(pgcode.WrongObjectType, "%q is a materialized view", view.GetName()), "use the corresponding MATERIALIZED VIEW command")) @@ -92,13 +97,12 @@ func dropViewDependents(b BuildCtx, view catalog.TableDescriptor, behavior tree. depViewName, err := b.CatalogReader().GetQualifiedTableNameByID(b, int64(dep.DependedOnBy), tree.ResolveRequireViewDesc) onErrPanic(err) if dependentDesc.GetParentID() != view.GetParentID() { - panic(sqlerrors.NewDependentObjectErrorf("cannot drop view %q because view %q depends on it", - name.FQString(), depViewName.FQString())) - } else { - panic(errors.WithHintf( - sqlerrors.NewDependentObjectErrorf("cannot drop view %q because view %q depends on it", - name.Object(), depViewName.Object()), + panic(errors.WithHintf(sqlerrors.NewDependentObjectErrorf("cannot drop relation %q because view %q depends on it", + name.Object(), depViewName.FQString()), "you can drop %s instead.", depViewName.Object())) + } else { + panic(sqlerrors.NewDependentObjectErrorf("cannot drop relation %q because view %q depends on it", + name.Object(), depViewName.Object())) } } // Decompose and recursively attempt to drop. diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/process.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/process.go index 464f9eb755ec..350c528c395d 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/process.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/process.go @@ -88,7 +88,6 @@ func Process(b BuildCtx, n tree.Statement) { if !info.IsFullySupported(b.EvalCtx().SessionData().NewSchemaChangerMode) { panic(scerrors.NotImplementedError(n)) } - // Next invoke the callback function, with the concrete types. fn := reflect.ValueOf(info.fn) in := []reflect.Value{reflect.ValueOf(b), reflect.ValueOf(n)} diff --git a/pkg/sql/schemachanger/scbuild/name_resolver.go b/pkg/sql/schemachanger/scbuild/name_resolver.go index 6833a932b8c2..cca47125012f 100644 --- a/pkg/sql/schemachanger/scbuild/name_resolver.go +++ b/pkg/sql/schemachanger/scbuild/name_resolver.go @@ -33,6 +33,9 @@ func (b buildCtx) ResolveDatabase( if p.IsExistenceOptional { return nil } + if string(name) == "" { + panic(pgerror.New(pgcode.Syntax, "empty database name")) + } panic(sqlerrors.NewUndefinedDatabaseError(name.String())) } if err := b.AuthorizationAccessor().CheckPrivilege(b, db, p.RequiredPrivilege); err != nil { @@ -50,7 +53,7 @@ func (b buildCtx) ResolveSchema( if p.IsExistenceOptional { return db, nil } - panic(sqlerrors.NewUndefinedSchemaError(name.String())) + panic(sqlerrors.NewUndefinedSchemaError(name.Schema())) } switch sc.SchemaKind() { case catalog.SchemaPublic, catalog.SchemaVirtual, catalog.SchemaTemporary: diff --git a/pkg/sql/schemachanger/scbuild/table_element_id_generator.go b/pkg/sql/schemachanger/scbuild/table_element_id_generator.go index 42b9b4b823f5..56951954a48d 100644 --- a/pkg/sql/schemachanger/scbuild/table_element_id_generator.go +++ b/pkg/sql/schemachanger/scbuild/table_element_id_generator.go @@ -55,7 +55,7 @@ func (b buildCtx) NextColumnFamilyID(tbl catalog.TableDescriptor) descpb.FamilyI // NextIndexID implements the scbuildstmt.TableElementIDGenerator interface. func (b buildCtx) NextIndexID(tbl catalog.TableDescriptor) descpb.IndexID { var maxAddedIndexID descpb.IndexID - b.ForEachNode(func(_, targetStatus scpb.Status, elem scpb.Element) { + b.ForEachElementStatus(func(_, targetStatus scpb.Status, elem scpb.Element) { if targetStatus != scpb.Status_PUBLIC || screl.GetDescID(elem) != tbl.GetID() { return } diff --git a/pkg/sql/schemachanger/scbuild/node_enqueuer_and_checker.go b/pkg/sql/schemachanger/scbuild/target_enqueuer_and_checker.go similarity index 55% rename from pkg/sql/schemachanger/scbuild/node_enqueuer_and_checker.go rename to pkg/sql/schemachanger/scbuild/target_enqueuer_and_checker.go index 10a50b69d33e..0cb980de5cc7 100644 --- a/pkg/sql/schemachanger/scbuild/node_enqueuer_and_checker.go +++ b/pkg/sql/schemachanger/scbuild/target_enqueuer_and_checker.go @@ -16,13 +16,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" ) -var _ scbuildstmt.NodeEnqueuerAndChecker = buildCtx{} +var _ scbuildstmt.TargetEnqueuerAndChecker = buildCtx{} -// HasNode implements the scbuildstmt.NodeEnqueuerAndChecker interface. -func (b buildCtx) HasNode( +// HasElementStatus implements the scbuildstmt.TargetEnqueuerAndChecker interface. +func (b buildCtx) HasElementStatus( filter func(status, targetStatus scpb.Status, elem scpb.Element) bool, ) (found bool) { - b.ForEachNode(func(status, targetStatus scpb.Status, elem scpb.Element) { + b.ForEachElementStatus(func(status, targetStatus scpb.Status, elem scpb.Element) { if filter(status, targetStatus, elem) { found = true } @@ -30,35 +30,35 @@ func (b buildCtx) HasNode( return found } -// HasTarget implements the scbuildstmt.NodeEnqueuerAndChecker interface. +// HasTarget implements the scbuildstmt.TargetEnqueuerAndChecker interface. func (b buildCtx) HasTarget(targetStatus scpb.Status, elem scpb.Element) (found bool) { - return b.HasNode(func(_, ts scpb.Status, e scpb.Element) bool { + return b.HasElementStatus(func(_, ts scpb.Status, e scpb.Element) bool { return ts == targetStatus && screl.EqualElements(e, elem) }) } -// HasElement implements the scbuildstmt.NodeEnqueuerAndChecker interface. +// HasElement implements the scbuildstmt.TargetEnqueuerAndChecker interface. func (b buildCtx) HasElement(elem scpb.Element) bool { - return b.HasNode(func(_, _ scpb.Status, e scpb.Element) bool { + return b.HasElementStatus(func(_, _ scpb.Status, e scpb.Element) bool { return screl.EqualElements(e, elem) }) } -// EnqueueAdd implements the scbuildstmt.NodeEnqueuerAndChecker interface. +// EnqueueAdd implements the scbuildstmt.TargetEnqueuerAndChecker interface. func (b buildCtx) EnqueueAdd(elem scpb.Element) { - b.AddNode(scpb.Status_ABSENT, scpb.Status_PUBLIC, elem, b.TargetMetadata()) + b.AddElementStatus(scpb.Status_ABSENT, scpb.Status_PUBLIC, elem, b.TargetMetadata()) } -// EnqueueDrop implements the scbuildstmt.NodeEnqueuerAndChecker interface. +// EnqueueDrop implements the scbuildstmt.TargetEnqueuerAndChecker interface. func (b buildCtx) EnqueueDrop(elem scpb.Element) { - b.AddNode(scpb.Status_PUBLIC, scpb.Status_ABSENT, elem, b.TargetMetadata()) + b.AddElementStatus(scpb.Status_PUBLIC, scpb.Status_ABSENT, elem, b.TargetMetadata()) } -// EnqueueDropIfNotExists implements the scbuildstmt.NodeEnqueuerAndChecker +// EnqueueDropIfNotExists implements the scbuildstmt.TargetEnqueuerAndChecker // interface. func (b buildCtx) EnqueueDropIfNotExists(elem scpb.Element) { if b.HasTarget(scpb.Status_ABSENT, elem) { return } - b.AddNode(scpb.Status_PUBLIC, scpb.Status_ABSENT, elem, b.TargetMetadata()) + b.AddElementStatus(scpb.Status_PUBLIC, scpb.Status_ABSENT, elem, b.TargetMetadata()) } diff --git a/pkg/sql/schemachanger/scbuild/testdata/drop_database b/pkg/sql/schemachanger/scbuild/testdata/drop_database index 7deb0449add9..b346850e7532 100644 --- a/pkg/sql/schemachanger/scbuild/testdata/drop_database +++ b/pkg/sql/schemachanger/scbuild/testdata/drop_database @@ -122,6 +122,42 @@ DROP DATABASE db1 CASCADE width: 64 usesSequenceIds: - 57 +- [[ColumnComment:{DescID: 59, ColumnID: 1}, ABSENT], PUBLIC] + details: + columnId: 1 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 59 +- [[ColumnComment:{DescID: 59, ColumnID: 2}, ABSENT], PUBLIC] + details: + columnId: 2 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 59 +- [[ColumnComment:{DescID: 59, ColumnID: 3}, ABSENT], PUBLIC] + details: + columnId: 3 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 59 +- [[ColumnComment:{DescID: 60, ColumnID: 1}, ABSENT], PUBLIC] + details: + columnId: 1 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 60 +- [[ColumnComment:{DescID: 60, ColumnID: 2}, ABSENT], PUBLIC] + details: + columnId: 2 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 60 +- [[ColumnComment:{DescID: 60, ColumnID: 3}, ABSENT], PUBLIC] + details: + columnId: 3 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 60 - [[ColumnName:{DescID: 59, ColumnID: 1, Name: id}, ABSENT], PUBLIC] details: columnId: 1 @@ -152,12 +188,31 @@ DROP DATABASE db1 CASCADE columnId: 3 name: val tableId: 60 +- [[ConstraintComment:{DescID: 59, ConstraintType: PrimaryKey, Name: t1_pkey}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + constraintName: t1_pkey + constraintType: PrimaryKey + tableId: 59 +- [[ConstraintComment:{DescID: 60, ConstraintType: PrimaryKey, Name: t1_pkey}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + constraintName: t1_pkey + constraintType: PrimaryKey + tableId: 60 - [[Database:{DescID: 54}, ABSENT], PUBLIC] details: databaseId: 54 dependentObjects: - 55 - 56 +- [[DatabaseComment:{DescID: 54}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + databaseId: 54 - [[DatabaseSchemaEntry:{DescID: 54, ReferencedDescID: 55}, ABSENT], PUBLIC] details: databaseId: 54 @@ -180,6 +235,18 @@ DROP DATABASE db1 CASCADE tableId: 60 usesSequenceIDs: - 57 +- [[IndexComment:{DescID: 59, IndexID: 1}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + indexId: 1 + tableId: 59 +- [[IndexComment:{DescID: 60, IndexID: 1}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + indexId: 1 + tableId: 60 - [[IndexName:{DescID: 59, IndexID: 1, Name: t1_pkey}, ABSENT], PUBLIC] details: indexId: 1 @@ -406,6 +473,16 @@ DROP DATABASE db1 CASCADE - 66 - 67 schemaId: 56 +- [[SchemaComment:{DescID: 55}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + schemaId: 55 +- [[SchemaComment:{DescID: 56}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + schemaId: 56 - [[Sequence:{DescID: 57}, ABSENT], PUBLIC] details: sequenceId: 57 @@ -418,6 +495,51 @@ DROP DATABASE db1 CASCADE - [[Table:{DescID: 60}, ABSENT], PUBLIC] details: tableId: 60 +- [[TableComment:{DescID: 57}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 57 +- [[TableComment:{DescID: 58}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 58 +- [[TableComment:{DescID: 59}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 59 +- [[TableComment:{DescID: 60}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 60 +- [[TableComment:{DescID: 61}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 61 +- [[TableComment:{DescID: 62}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 62 +- [[TableComment:{DescID: 63}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 63 +- [[TableComment:{DescID: 64}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 64 +- [[TableComment:{DescID: 67}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 67 - [[Type:{DescID: 65}, ABSENT], PUBLIC] details: typeId: 65 diff --git a/pkg/sql/schemachanger/scbuild/testdata/drop_schema b/pkg/sql/schemachanger/scbuild/testdata/drop_schema index bb72da409dc0..53bfb3492a81 100644 --- a/pkg/sql/schemachanger/scbuild/testdata/drop_schema +++ b/pkg/sql/schemachanger/scbuild/testdata/drop_schema @@ -81,6 +81,24 @@ DROP SCHEMA defaultdb.SC1 CASCADE width: 64 usesSequenceIds: - 56 +- [[ColumnComment:{DescID: 57, ColumnID: 1}, ABSENT], PUBLIC] + details: + columnId: 1 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 57 +- [[ColumnComment:{DescID: 57, ColumnID: 2}, ABSENT], PUBLIC] + details: + columnId: 2 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 57 +- [[ColumnComment:{DescID: 57, ColumnID: 3}, ABSENT], PUBLIC] + details: + columnId: 3 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 57 - [[ColumnName:{DescID: 57, ColumnID: 1, Name: id}, ABSENT], PUBLIC] details: columnId: 1 @@ -96,6 +114,13 @@ DROP SCHEMA defaultdb.SC1 CASCADE columnId: 3 name: val tableId: 57 +- [[ConstraintComment:{DescID: 57, ConstraintType: PrimaryKey, Name: t1_pkey}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + constraintName: t1_pkey + constraintType: PrimaryKey + tableId: 57 - [[DatabaseSchemaEntry:{DescID: 50, ReferencedDescID: 54}, ABSENT], PUBLIC] details: databaseId: 50 @@ -107,6 +132,12 @@ DROP SCHEMA defaultdb.SC1 CASCADE tableId: 57 usesSequenceIDs: - 56 +- [[IndexComment:{DescID: 57, IndexID: 1}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + indexId: 1 + tableId: 57 - [[IndexName:{DescID: 57, IndexID: 1, Name: t1_pkey}, ABSENT], PUBLIC] details: indexId: 1 @@ -295,12 +326,57 @@ DROP SCHEMA defaultdb.SC1 CASCADE - 63 - 64 schemaId: 54 +- [[SchemaComment:{DescID: 54}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + schemaId: 54 - [[Sequence:{DescID: 56}, ABSENT], PUBLIC] details: sequenceId: 56 - [[Table:{DescID: 57}, ABSENT], PUBLIC] details: tableId: 57 +- [[TableComment:{DescID: 56}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 56 +- [[TableComment:{DescID: 57}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 57 +- [[TableComment:{DescID: 58}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 58 +- [[TableComment:{DescID: 59}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 59 +- [[TableComment:{DescID: 60}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 60 +- [[TableComment:{DescID: 61}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 61 +- [[TableComment:{DescID: 64}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 64 +- [[TableComment:{DescID: 65}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 65 - [[Type:{DescID: 62}, ABSENT], PUBLIC] details: typeId: 62 diff --git a/pkg/sql/schemachanger/scbuild/testdata/drop_sequence b/pkg/sql/schemachanger/scbuild/testdata/drop_sequence index 8eb08c885ca3..ec48788b2935 100644 --- a/pkg/sql/schemachanger/scbuild/testdata/drop_sequence +++ b/pkg/sql/schemachanger/scbuild/testdata/drop_sequence @@ -21,6 +21,11 @@ DROP SEQUENCE defaultdb.SQ1 CASCADE - [[Sequence:{DescID: 54}, ABSENT], PUBLIC] details: sequenceId: 54 +- [[TableComment:{DescID: 54}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 54 - [[UserPrivileges:{DescID: 54, Username: admin}, ABSENT], PUBLIC] details: descriptorId: 54 @@ -117,6 +122,11 @@ DROP SEQUENCE defaultdb.SQ1 CASCADE - [[Sequence:{DescID: 54}, ABSENT], PUBLIC] details: sequenceId: 54 +- [[TableComment:{DescID: 54}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 54 - [[UserPrivileges:{DescID: 54, Username: admin}, ABSENT], PUBLIC] details: descriptorId: 54 diff --git a/pkg/sql/schemachanger/scbuild/testdata/drop_table b/pkg/sql/schemachanger/scbuild/testdata/drop_table index ea5fda69b87b..a1b6d52c74b9 100644 --- a/pkg/sql/schemachanger/scbuild/testdata/drop_table +++ b/pkg/sql/schemachanger/scbuild/testdata/drop_table @@ -111,6 +111,42 @@ DROP TABLE defaultdb.shipments CASCADE; oid: 100057 udtMetadata: arrayTypeOid: 100058 +- [[ColumnComment:{DescID: 59, ColumnID: 1}, ABSENT], PUBLIC] + details: + columnId: 1 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 59 +- [[ColumnComment:{DescID: 59, ColumnID: 2}, ABSENT], PUBLIC] + details: + columnId: 2 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 59 +- [[ColumnComment:{DescID: 59, ColumnID: 3}, ABSENT], PUBLIC] + details: + columnId: 3 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 59 +- [[ColumnComment:{DescID: 59, ColumnID: 4}, ABSENT], PUBLIC] + details: + columnId: 4 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 59 +- [[ColumnComment:{DescID: 59, ColumnID: 5}, ABSENT], PUBLIC] + details: + columnId: 5 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 59 +- [[ColumnComment:{DescID: 59, ColumnID: 6}, ABSENT], PUBLIC] + details: + columnId: 6 + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 59 - [[ColumnName:{DescID: 59, ColumnID: 1, Name: tracking_number}, ABSENT], PUBLIC] details: columnId: 1 @@ -161,6 +197,27 @@ DROP TABLE defaultdb.shipments CASCADE; columnId: 6 tableId: 59 typeId: 58 +- [[ConstraintComment:{DescID: 59, ConstraintType: FK, Name: fk_customers}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + constraintName: fk_customers + constraintType: FK + tableId: 59 +- [[ConstraintComment:{DescID: 59, ConstraintType: FK, Name: fk_orders}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + constraintName: fk_orders + constraintType: FK + tableId: 59 +- [[ConstraintComment:{DescID: 59, ConstraintType: PrimaryKey, Name: shipments_pkey}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + constraintName: shipments_pkey + constraintType: PrimaryKey + tableId: 59 - [[DefaultExpression:{DescID: 59, ColumnID: 1}, ABSENT], PUBLIC] details: columnId: 1 @@ -210,6 +267,12 @@ DROP TABLE defaultdb.shipments CASCADE; referenceColumns: - 4 referenceId: 59 +- [[IndexComment:{DescID: 59, IndexID: 1}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + indexId: 1 + tableId: 59 - [[IndexName:{DescID: 59, IndexID: 1, Name: shipments_pkey}, ABSENT], PUBLIC] details: indexId: 1 @@ -291,6 +354,21 @@ DROP TABLE defaultdb.shipments CASCADE; - [[Table:{DescID: 59}, ABSENT], PUBLIC] details: tableId: 59 +- [[TableComment:{DescID: 59}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 59 +- [[TableComment:{DescID: 60}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 60 +- [[TableComment:{DescID: 61}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 61 - [[UserPrivileges:{DescID: 59, Username: admin}, ABSENT], PUBLIC] details: descriptorId: 59 diff --git a/pkg/sql/schemachanger/scbuild/testdata/drop_view b/pkg/sql/schemachanger/scbuild/testdata/drop_view index f3bbec362fab..4cf1a6e054bd 100644 --- a/pkg/sql/schemachanger/scbuild/testdata/drop_view +++ b/pkg/sql/schemachanger/scbuild/testdata/drop_view @@ -26,6 +26,11 @@ DROP VIEW defaultdb.v1 details: dependedOn: 55 tableId: 54 +- [[TableComment:{DescID: 55}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 55 - [[UserPrivileges:{DescID: 55, Username: admin}, ABSENT], PUBLIC] details: descriptorId: 55 @@ -161,6 +166,31 @@ DROP VIEW defaultdb.v1 CASCADE columnID: 1 dependedOn: 61 tableId: 58 +- [[TableComment:{DescID: 55}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 55 +- [[TableComment:{DescID: 56}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 56 +- [[TableComment:{DescID: 57}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 57 +- [[TableComment:{DescID: 58}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 58 +- [[TableComment:{DescID: 61}, ABSENT], PUBLIC] + details: + comment: TODO(fqazi) Comments are not currently fetched from system.comments when + doing decomposition + tableId: 61 - [[UserPrivileges:{DescID: 55, Username: admin}, ABSENT], PUBLIC] details: descriptorId: 55 diff --git a/pkg/sql/schemachanger/scdeps/build_deps.go b/pkg/sql/schemachanger/scdeps/build_deps.go index 4736d3bebe54..c3767932c95b 100644 --- a/pkg/sql/schemachanger/scdeps/build_deps.go +++ b/pkg/sql/schemachanger/scdeps/build_deps.go @@ -35,6 +35,7 @@ func NewBuilderDependencies( descsCollection *descs.Collection, schemaResolver resolver.SchemaResolver, authAccessor scbuild.AuthorizationAccessor, + astFormatter scbuild.AstFormatter, sessionData *sessiondata.SessionData, settings *cluster.Settings, statements []string, @@ -48,6 +49,7 @@ func NewBuilderDependencies( sessionData: sessionData, settings: settings, statements: statements, + astFormatter: astFormatter, } } @@ -60,6 +62,7 @@ type buildDeps struct { sessionData *sessiondata.SessionData settings *cluster.Settings statements []string + astFormatter scbuild.AstFormatter } var _ scbuild.CatalogReader = (*buildDeps)(nil) @@ -223,3 +226,7 @@ func (d *buildDeps) ClusterSettings() *cluster.Settings { func (d *buildDeps) Statements() []string { return d.statements } + +func (d *buildDeps) AstFormatter() scbuild.AstFormatter { + return d.astFormatter +} diff --git a/pkg/sql/schemachanger/scdeps/exec_deps.go b/pkg/sql/schemachanger/scdeps/exec_deps.go index b01fb2d4f3d8..3e9a0e42c0bf 100644 --- a/pkg/sql/schemachanger/scdeps/exec_deps.go +++ b/pkg/sql/schemachanger/scdeps/exec_deps.go @@ -28,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec/scmutationexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/errors" ) @@ -45,6 +46,7 @@ type JobRegistry interface { // from the given arguments. func NewExecutorDependencies( codec keys.SQLCodec, + sessionData *sessiondata.SessionData, txn *kv.Txn, user security.SQLUsername, descsCollection *descs.Collection, @@ -54,6 +56,7 @@ func NewExecutorDependencies( backfillFlusher scexec.PeriodicProgressFlusher, indexValidator scexec.IndexValidator, partitioner scmutationexec.Partitioner, + commentUpdaterFactory scexec.CommentUpdaterFactory, eventLogger scexec.EventLogger, schemaChangerJobID jobspb.JobID, statements []string, @@ -70,10 +73,12 @@ func NewExecutorDependencies( }, backfiller: backfiller, backfillTracker: backfillTracker, + commentUpdaterFactory: commentUpdaterFactory, periodicProgressFlusher: backfillFlusher, statements: statements, partitioner: partitioner, user: user, + sessionData: sessionData, } } @@ -130,6 +135,7 @@ func (d *txnDeps) GetFullyQualifiedName(ctx context.Context, id descpb.ID) (stri tree.CommonLookupFlags{ Required: true, IncludeDropped: true, + AvoidLeased: true, }) if err != nil { return "", err @@ -144,6 +150,7 @@ func (d *txnDeps) GetFullyQualifiedName(ctx context.Context, id descpb.ID) (stri tree.CommonLookupFlags{ IncludeDropped: true, Required: true, + AvoidLeased: true, }) if err != nil { return "", err @@ -151,7 +158,9 @@ func (d *txnDeps) GetFullyQualifiedName(ctx context.Context, id descpb.ID) (stri schemaDesc, err := d.descsCollection.GetImmutableSchemaByID(ctx, d.txn, objectDesc.GetParentSchemaID(), tree.SchemaLookupFlags{ Required: true, - IncludeDropped: true}) + IncludeDropped: true, + AvoidLeased: true, + }) if err != nil { return "", err } @@ -168,6 +177,7 @@ func (d *txnDeps) GetFullyQualifiedName(ctx context.Context, id descpb.ID) (stri tree.CommonLookupFlags{ IncludeDropped: true, Required: true, + AvoidLeased: true, }) if err != nil { return "", err @@ -304,11 +314,13 @@ func (d *txnDeps) SetResumeSpans( type execDeps struct { txnDeps partitioner scmutationexec.Partitioner + commentUpdaterFactory scexec.CommentUpdaterFactory backfiller scexec.Backfiller backfillTracker scexec.BackfillTracker periodicProgressFlusher scexec.PeriodicProgressFlusher statements []string user security.SQLUsername + sessionData *sessiondata.SessionData } var _ scexec.Dependencies = (*execDeps)(nil) @@ -362,6 +374,11 @@ func (d *execDeps) User() security.SQLUsername { return d.user } +// CommentUpdater implements the scexec.Dependencies interface. +func (d *execDeps) CommentUpdater(ctx context.Context) scexec.CommentUpdater { + return d.commentUpdaterFactory.NewCommentUpdater(ctx, d.txn, d.sessionData) +} + // EventLoggerFactory constructs a new event logger with a txn. type EventLoggerFactory = func(*kv.Txn) scexec.EventLogger diff --git a/pkg/sql/schemachanger/scdeps/run_deps.go b/pkg/sql/schemachanger/scdeps/run_deps.go index 95c8fbeaaeac..ad1a2b72381b 100644 --- a/pkg/sql/schemachanger/scdeps/run_deps.go +++ b/pkg/sql/schemachanger/scdeps/run_deps.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec/scmutationexec" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scrun" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" ) @@ -39,37 +40,42 @@ func NewJobRunDependencies( codec keys.SQLCodec, settings *cluster.Settings, indexValidator scexec.IndexValidator, + commentUpdaterFactory scexec.CommentUpdaterFactory, testingKnobs *scrun.TestingKnobs, statements []string, + sessionData *sessiondata.SessionData, ) scrun.JobRunDependencies { return &jobExecutionDeps{ - collectionFactory: collectionFactory, - db: db, - internalExecutor: internalExecutor, - backfiller: backfiller, - rangeCounter: rangeCounter, - eventLoggerFactory: eventLoggerFactory, - partitioner: partitioner, - jobRegistry: jobRegistry, - job: job, - codec: codec, - settings: settings, - testingKnobs: testingKnobs, - statements: statements, - indexValidator: indexValidator, + collectionFactory: collectionFactory, + db: db, + internalExecutor: internalExecutor, + backfiller: backfiller, + rangeCounter: rangeCounter, + eventLoggerFactory: eventLoggerFactory, + partitioner: partitioner, + jobRegistry: jobRegistry, + job: job, + codec: codec, + settings: settings, + testingKnobs: testingKnobs, + statements: statements, + indexValidator: indexValidator, + commentUpdaterFactory: commentUpdaterFactory, + sessionData: sessionData, } } type jobExecutionDeps struct { - collectionFactory *descs.CollectionFactory - db *kv.DB - internalExecutor sqlutil.InternalExecutor - eventLoggerFactory func(txn *kv.Txn) scexec.EventLogger - partitioner scmutationexec.Partitioner - backfiller scexec.Backfiller - rangeCounter RangeCounter - jobRegistry *jobs.Registry - job *jobs.Job + collectionFactory *descs.CollectionFactory + db *kv.DB + internalExecutor sqlutil.InternalExecutor + eventLoggerFactory func(txn *kv.Txn) scexec.EventLogger + partitioner scmutationexec.Partitioner + backfiller scexec.Backfiller + commentUpdaterFactory scexec.CommentUpdaterFactory + rangeCounter RangeCounter + jobRegistry *jobs.Registry + job *jobs.Job indexValidator scexec.IndexValidator @@ -77,6 +83,7 @@ type jobExecutionDeps struct { settings *cluster.Settings testingKnobs *scrun.TestingKnobs statements []string + sessionData *sessiondata.SessionData } var _ scrun.JobRunDependencies = (*jobExecutionDeps)(nil) @@ -113,6 +120,8 @@ func (d *jobExecutionDeps) WithTxnInJob(ctx context.Context, fn scrun.JobTxnFunc statements: d.statements, partitioner: d.partitioner, user: d.job.Payload().UsernameProto.Decode(), + commentUpdaterFactory: d.commentUpdaterFactory, + sessionData: d.sessionData, }) }) if err != nil { diff --git a/pkg/sql/schemachanger/scdeps/sctestdeps/BUILD.bazel b/pkg/sql/schemachanger/scdeps/sctestdeps/BUILD.bazel index bd85c3e832e2..5aec1b75223c 100644 --- a/pkg/sql/schemachanger/scdeps/sctestdeps/BUILD.bazel +++ b/pkg/sql/schemachanger/scdeps/sctestdeps/BUILD.bazel @@ -44,6 +44,7 @@ go_library( "//pkg/util/log/eventpb", "//pkg/util/protoutil", "@com_github_cockroachdb_errors//:errors", + "@com_github_cockroachdb_redact//:redact", "@com_github_lib_pq//oid", ], ) diff --git a/pkg/sql/schemachanger/scdeps/sctestdeps/test_deps.go b/pkg/sql/schemachanger/scdeps/sctestdeps/test_deps.go index 9c3a846a132a..78321f4061af 100644 --- a/pkg/sql/schemachanger/scdeps/sctestdeps/test_deps.go +++ b/pkg/sql/schemachanger/scdeps/sctestdeps/test_deps.go @@ -816,16 +816,68 @@ func (s *TestState) IndexValidator() scexec.IndexValidator { return s } -// LogEvent implements scexec.EventLogger +// LogEvent implements scexec.EventLogger. func (s *TestState) LogEvent( - _ context.Context, descID descpb.ID, metadata scpb.ElementMetadata, event eventpb.EventPayload, + _ context.Context, + descID descpb.ID, + details eventpb.CommonSQLEventDetails, + event eventpb.EventPayload, ) error { s.LogSideEffectf("write %T to event log for descriptor #%d: %s", - event, descID, metadata.Statement) + event, descID, details.Statement) return nil } -// EventLogger implements scexec.Dependencies +// EventLogger implements scexec.Dependencies. func (s *TestState) EventLogger() scexec.EventLogger { return s } + +// UpsertDescriptorComment updates a comment associated with an associated +// schema object. +func (s *TestState) UpsertDescriptorComment( + id int64, subID int64, commentType keys.CommentType, comment string, +) error { + s.LogSideEffectf("upsert %s comment for descriptor #%d of type %s", + comment, id, commentType) + return nil +} + +// DeleteDescriptorComment deletes a comment for a given descriptor. +func (s *TestState) DeleteDescriptorComment( + id int64, subID int64, commentType keys.CommentType, +) error { + s.LogSideEffectf("delete comment for descriptor #%d of type %s", + id, commentType) + return nil +} + +//UpsertConstraintComment updates a comment associated with a constraint. +func (s *TestState) UpsertConstraintComment( + desc catalog.TableDescriptor, + _ string, + constraintName string, + constraintType scpb.ConstraintType, + comment string, +) error { + s.LogSideEffectf("upsert comment %s for constraint on #%d, name: %s, type: %s"+ + comment, desc.GetID(), constraintName, constraintType) + return nil +} + +//DeleteConstraintComment deletes a comment associated with a constraint. +func (s *TestState) DeleteConstraintComment( + desc catalog.TableDescriptor, + schemaName string, + constraintName string, + constraintType scpb.ConstraintType, +) error { + s.LogSideEffectf("delete comment for constraint on #%d, name: %s, type: %s", + desc.GetID(), constraintName, constraintType) + return nil +} + +// CommentUpdater implement scexec.Dependencies. +func (s *TestState) CommentUpdater(ctx context.Context) scexec.CommentUpdater { + return s +} diff --git a/pkg/sql/schemachanger/scdeps/sctestdeps/test_state.go b/pkg/sql/schemachanger/scdeps/sctestdeps/test_state.go index ed2de40ecacf..a0162d286d79 100644 --- a/pkg/sql/schemachanger/scdeps/sctestdeps/test_state.go +++ b/pkg/sql/schemachanger/scdeps/sctestdeps/test_state.go @@ -18,10 +18,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/nstree" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scbuild" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scrun" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" + "github.com/cockroachdb/redact" ) // TestState is a backing struct used to implement all schema changer @@ -98,3 +101,22 @@ func (s *TestState) JobRecord(jobID jobspb.JobID) *jobs.Record { } return &s.jobs[idx] } + +// FormatAstAsRedactableString implements scbuild.AstFormatter +func (s *TestState) FormatAstAsRedactableString( + statement tree.Statement, ann *tree.Annotations, +) redact.RedactableString { + // Return the SQL back non-redacted and not fully resolved for the purposes + // of testing. + f := tree.NewFmtCtx( + tree.FmtAlwaysQualifyTableNames|tree.FmtMarkRedactionNode, + tree.FmtAnnotations(ann)) + f.FormatNode(statement) + formattedRedactableStatementString := f.CloseAndGetString() + return redact.RedactableString(formattedRedactableStatementString) +} + +// AstFormatter dummy formatter for AST nodes. +func (s *TestState) AstFormatter() scbuild.AstFormatter { + return s +} diff --git a/pkg/sql/schemachanger/scdeps/sctestutils/BUILD.bazel b/pkg/sql/schemachanger/scdeps/sctestutils/BUILD.bazel index 83acade0672c..523da4321541 100644 --- a/pkg/sql/schemachanger/scdeps/sctestutils/BUILD.bazel +++ b/pkg/sql/schemachanger/scdeps/sctestutils/BUILD.bazel @@ -15,7 +15,6 @@ go_library( "//pkg/sql/protoreflect", "//pkg/sql/schemachanger/scbuild", "//pkg/sql/schemachanger/scdeps", - "//pkg/sql/schemachanger/scgraphviz", "//pkg/sql/schemachanger/scop", "//pkg/sql/schemachanger/scpb", "//pkg/sql/schemachanger/scplan", diff --git a/pkg/sql/schemachanger/scdeps/sctestutils/sctestutils.go b/pkg/sql/schemachanger/scdeps/sctestutils/sctestutils.go index de0bc642b7f3..0ece6d01fdae 100644 --- a/pkg/sql/schemachanger/scdeps/sctestutils/sctestutils.go +++ b/pkg/sql/schemachanger/scdeps/sctestutils/sctestutils.go @@ -26,7 +26,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/protoreflect" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scbuild" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scdeps" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraphviz" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan" @@ -64,6 +63,7 @@ func WithBuilderDependenciesFromTestServer( SessionData() *sessiondata.SessionData resolver.SchemaResolver scbuild.AuthorizationAccessor + scbuild.AstFormatter }) // For setting up a builder inside tests we will ensure that the new schema // changer will allow non-fully implemented operations. @@ -74,6 +74,7 @@ func WithBuilderDependenciesFromTestServer( planner.Descriptors(), planner, planner, + planner, planner.SessionData(), execCfg.Settings, nil, /* statements */ @@ -159,20 +160,21 @@ func ProtoDiff(a, b protoutil.Message, args DiffArgs) string { } // MakePlan is a convenient alternative to calling scplan.MakePlan in tests. -func MakePlan(t *testing.T, state scpb.State, phase scop.Phase) scplan.Plan { +func MakePlan(t *testing.T, state scpb.CurrentState, phase scop.Phase) scplan.Plan { plan, err := scplan.MakePlan(state, scplan.Params{ ExecutionPhase: phase, SchemaChangerJobIDSupplier: func() jobspb.JobID { return 1 }, }) - require.NoError(t, scgraphviz.DecorateErrorWithPlanDetails(err, plan)) + require.NoError(t, err) // Remove really long ops details that aren't that important anyway. for _, s := range plan.Stages { for _, o := range s.ExtraOps { if op, ok := o.(*scop.CreateDeclarativeSchemaChangerJob); ok { - op.State.Nodes = nil + op.TargetState.Targets = nil + op.Current = nil } if op, ok := o.(*scop.UpdateSchemaChangerJob); ok { - op.Statuses = nil + op.Current = nil } } } diff --git a/pkg/sql/schemachanger/scexec/BUILD.bazel b/pkg/sql/schemachanger/scexec/BUILD.bazel index c22c5062a353..0d9e0dce41a7 100644 --- a/pkg/sql/schemachanger/scexec/BUILD.bazel +++ b/pkg/sql/schemachanger/scexec/BUILD.bazel @@ -15,6 +15,8 @@ go_library( deps = [ "//pkg/jobs", "//pkg/jobs/jobspb", + "//pkg/keys", + "//pkg/kv", "//pkg/roachpb:with-mocks", "//pkg/security", "//pkg/sql/catalog", @@ -48,6 +50,7 @@ go_test( "//pkg/base", "//pkg/jobs", "//pkg/jobs/jobspb", + "//pkg/keys", "//pkg/kv", "//pkg/security", "//pkg/security/securitytest", @@ -66,9 +69,9 @@ go_test( "//pkg/sql/schemachanger/scdeps/sctestdeps", "//pkg/sql/schemachanger/scdeps/sctestutils", "//pkg/sql/schemachanger/scexec/scmutationexec", - "//pkg/sql/schemachanger/scgraphviz", "//pkg/sql/schemachanger/scop", "//pkg/sql/schemachanger/scpb", + "//pkg/sql/sem/catid", # keep "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/sql/sqlutil", diff --git a/pkg/sql/schemachanger/scexec/dependencies.go b/pkg/sql/schemachanger/scexec/dependencies.go index 890378d7bf12..2396a4f218c8 100644 --- a/pkg/sql/schemachanger/scexec/dependencies.go +++ b/pkg/sql/schemachanger/scexec/dependencies.go @@ -15,6 +15,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" @@ -37,6 +39,7 @@ type Dependencies interface { IndexValidator() IndexValidator IndexSpanSplitter() IndexSpanSplitter EventLogger() EventLogger + CommentUpdater(ctx context.Context) CommentUpdater // Statements returns the statements behind this schema change. Statements() []string @@ -62,7 +65,12 @@ type Catalog interface { // EventLogger encapsulates the operations for emitting event log entries. type EventLogger interface { // LogEvent writes to the eventlog. - LogEvent(ctx context.Context, descID descpb.ID, metadata scpb.ElementMetadata, event eventpb.EventPayload) error + LogEvent( + ctx context.Context, + descID descpb.ID, + details eventpb.CommonSQLEventDetails, + event eventpb.EventPayload, + ) error } // CatalogChangeBatcher encapsulates batched updates to the catalog: descriptor @@ -235,3 +243,27 @@ type BackfillProgressFlusher interface { // FlushFractionCompleted writes out the fraction completed. FlushFractionCompleted(ctx context.Context) error } + +// CommentUpdater is used to update comments associated with schema objects. +type CommentUpdater interface { + // UpsertDescriptorComment updates a comment associated with a schema object. + UpsertDescriptorComment(id int64, subID int64, commentType keys.CommentType, comment string) error + + // DeleteDescriptorComment deletes a comment for schema object. + DeleteDescriptorComment(id int64, subID int64, commentType keys.CommentType) error + + //UpsertConstraintComment upserts a comment associated with a constraint. + UpsertConstraintComment(desc catalog.TableDescriptor, schemaName string, constraintName string, constraintType scpb.ConstraintType, comment string) error + + //DeleteConstraintComment deletes a comment associated with a constraint. + DeleteConstraintComment(desc catalog.TableDescriptor, schemaName string, constraintName string, constraintType scpb.ConstraintType) error +} + +// CommentUpdaterFactory is used to construct a CommentUpdater for a given +// transaction and context. +type CommentUpdaterFactory interface { + // NewCommentUpdater creates a new CommentUpdater. + NewCommentUpdater( + ctx context.Context, txn *kv.Txn, sessionData *sessiondata.SessionData, + ) CommentUpdater +} diff --git a/pkg/sql/schemachanger/scexec/exec_mutation.go b/pkg/sql/schemachanger/scexec/exec_mutation.go index e5e3545aa9cc..eb385c141650 100644 --- a/pkg/sql/schemachanger/scexec/exec_mutation.go +++ b/pkg/sql/schemachanger/scexec/exec_mutation.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -132,12 +133,38 @@ func executeDescriptorMutationOps(ctx context.Context, deps Dependencies, ops [] entries := eventLogEntriesForStatement(mvs.eventsByStatement[statementID]) for _, e := range entries { // TODO(postamar): batch these - if err := deps.EventLogger().LogEvent(ctx, e.id, *e.metadata, e.event); err != nil { + if err := deps.EventLogger().LogEvent(ctx, e.id, e.details, e.event); err != nil { + return err + } + } + } + commentUpdater := deps.CommentUpdater(ctx) + for _, comment := range mvs.commentsToUpdate { + if len(comment.comment) > 0 { + if err := commentUpdater.UpsertDescriptorComment( + comment.id, comment.subID, comment.commentType, comment.comment); err != nil { + return err + } + } else { + if err := commentUpdater.DeleteDescriptorComment( + comment.id, comment.subID, comment.commentType); err != nil { + return err + } + } + } + for _, comment := range mvs.constraintCommentsToUpdate { + if len(comment.comment) > 0 { + if err := commentUpdater.UpsertConstraintComment( + comment.tbl, comment.schemaName, comment.constraintName, comment.constraintType, comment.comment); err != nil { + return err + } + } else { + if err := commentUpdater.DeleteConstraintComment( + comment.tbl, comment.schemaName, comment.constraintName, comment.constraintType); err != nil { return err } } } - for _, id := range mvs.descriptorsToDelete.Ordered() { if err := b.DeleteDescriptor(ctx, id); err != nil { return err @@ -148,7 +175,7 @@ func executeDescriptorMutationOps(ctx context.Context, deps Dependencies, ops [] md jobs.JobMetadata, updateProgress func(*jobspb.Progress), setNonCancelable func(), ) error { progress := *md.Progress - progress.GetNewSchemaChange().States = update.progress + progress.GetNewSchemaChange().Current = update.current updateProgress(&progress) if !md.Payload.Noncancelable && update.isNonCancelable { setNonCancelable() @@ -172,14 +199,14 @@ func eventLogEntriesForStatement(statementEvents []eventPayload) (logEntries []e // be the source and everything else before will be dependencies if they have // the same subtask ID. for _, event := range statementEvents { - dependentEvents[event.metadata.SubWorkID] = append(dependentEvents[event.metadata.SubWorkID], event) + dependentEvents[event.SubWorkID] = append(dependentEvents[event.SubWorkID], event) } // Split of the source events. orderedSubWorkID := make([]uint32, 0, len(dependentEvents)) for subWorkID := range dependentEvents { elems := dependentEvents[subWorkID] sort.SliceStable(elems, func(i, j int) bool { - return elems[i].metadata.SourceElementID < elems[j].metadata.SourceElementID + return elems[i].SourceElementID < elems[j].SourceElementID }) sourceEvents[subWorkID] = elems[0] dependentEvents[subWorkID] = elems[1:] @@ -237,31 +264,50 @@ func eventLogEntriesForStatement(statementEvents []eventPayload) (logEntries []e } type mutationVisitorState struct { - c Catalog - checkedOutDescriptors nstree.Map - drainedNames map[descpb.ID][]descpb.NameInfo - descriptorsToDelete catalog.DescriptorIDSet - dbGCJobs catalog.DescriptorIDSet - descriptorGCJobs map[descpb.ID][]jobspb.SchemaChangeGCDetails_DroppedID - indexGCJobs map[descpb.ID][]jobspb.SchemaChangeGCDetails_DroppedIndex - schemaChangerJob *jobs.Record - schemaChangerJobUpdates map[jobspb.JobID]schemaChangerJobUpdate - eventsByStatement map[uint32][]eventPayload + c Catalog + checkedOutDescriptors nstree.Map + drainedNames map[descpb.ID][]descpb.NameInfo + descriptorsToDelete catalog.DescriptorIDSet + commentsToUpdate []commentToUpdate + constraintCommentsToUpdate []constraintCommentToUpdate + dbGCJobs catalog.DescriptorIDSet + descriptorGCJobs map[descpb.ID][]jobspb.SchemaChangeGCDetails_DroppedID + indexGCJobs map[descpb.ID][]jobspb.SchemaChangeGCDetails_DroppedIndex + schemaChangerJob *jobs.Record + schemaChangerJobUpdates map[jobspb.JobID]schemaChangerJobUpdate + eventsByStatement map[uint32][]eventPayload +} + +type constraintCommentToUpdate struct { + tbl catalog.TableDescriptor + schemaName string + constraintName string + constraintType scpb.ConstraintType + comment string +} + +type commentToUpdate struct { + id int64 + subID int64 + commentType keys.CommentType + comment string } type eventPayload struct { - id descpb.ID - metadata *scpb.ElementMetadata - event eventpb.EventPayload + id descpb.ID + scpb.TargetMetadata + + details eventpb.CommonSQLEventDetails + event eventpb.EventPayload } type schemaChangerJobUpdate struct { - progress []scpb.Status + current []scpb.Status isNonCancelable bool } func (mvs *mutationVisitorState) UpdateSchemaChangerJob( - jobID jobspb.JobID, statuses []scpb.Status, isNonCancelable bool, + jobID jobspb.JobID, current []scpb.Status, isNonCancelable bool, ) error { if mvs.schemaChangerJobUpdates == nil { mvs.schemaChangerJobUpdates = make(map[jobspb.JobID]schemaChangerJobUpdate) @@ -269,7 +315,7 @@ func (mvs *mutationVisitorState) UpdateSchemaChangerJob( return errors.AssertionFailedf("cannot update job %d more than once", jobID) } mvs.schemaChangerJobUpdates[jobID] = schemaChangerJobUpdate{ - progress: statuses, + current: current, isNonCancelable: isNonCancelable, } return nil @@ -307,6 +353,37 @@ func (mvs *mutationVisitorState) DeleteDescriptor(id descpb.ID) { mvs.descriptorsToDelete.Add(id) } +func (mvs *mutationVisitorState) DeleteComment( + id descpb.ID, subID int, commentType keys.CommentType, +) { + mvs.commentsToUpdate = append(mvs.commentsToUpdate, + commentToUpdate{ + id: int64(id), + subID: int64(subID), + commentType: commentType, + }) +} + +func (mvs *mutationVisitorState) DeleteConstraintComment( + ctx context.Context, + tbl catalog.TableDescriptor, + constraintName string, + constraintType scpb.ConstraintType, +) error { + schema, err := mvs.c.MustReadImmutableDescriptor(ctx, tbl.GetParentSchemaID()) + if err != nil { + return err + } + mvs.constraintCommentsToUpdate = append(mvs.constraintCommentsToUpdate, + constraintCommentToUpdate{ + tbl: tbl, + schemaName: schema.GetName(), + constraintName: constraintName, + constraintType: constraintType, + }) + return nil +} + func (mvs *mutationVisitorState) AddDrainedName(id descpb.ID, nameInfo descpb.NameInfo) { if _, ok := mvs.drainedNames[id]; !ok { mvs.drainedNames[id] = []descpb.NameInfo{nameInfo} @@ -339,39 +416,29 @@ func (mvs *mutationVisitorState) AddNewGCJobForIndex( } func (mvs *mutationVisitorState) AddNewSchemaChangerJob( - jobID jobspb.JobID, state scpb.State, + jobID jobspb.JobID, targetState scpb.TargetState, current []scpb.Status, ) error { if mvs.schemaChangerJob != nil { return errors.AssertionFailedf("cannot create more than one new schema change job") } - targets := make([]*scpb.Target, len(state.Nodes)) - nodeStatuses := make([]scpb.Status, len(state.Nodes)) - // TODO(ajwerner): It may be better in the future to have the builder be - // responsible for determining this set of descriptors. As of the time of - // writing, the descriptors to be "locked," descriptors that need schema - // change jobs, and descriptors with schema change mutations all coincide. But - // there are future schema changes to be implemented in the new schema changer - // (e.g., RENAME TABLE) for which this may no longer be true. - for i, n := range state.Nodes { - targets[i] = n.Target - nodeStatuses[i] = n.Status - } - stmts := make([]string, len(state.Statements)) - for i, stmt := range state.Statements { + stmts := make([]string, len(targetState.Statements)) + for i, stmt := range targetState.Statements { stmts[i] = stmt.Statement } mvs.schemaChangerJob = &jobs.Record{ - JobID: jobID, - Description: "schema change job", // TODO(ajwerner): use const - Statements: stmts, - Username: security.MakeSQLUsernameFromPreNormalizedString(state.Authorization.Username), - DescriptorIDs: screl.GetDescIDs(state), - Details: jobspb.NewSchemaChangeDetails{Targets: targets}, - Progress: jobspb.NewSchemaChangeProgress{ - States: nodeStatuses, - Authorization: &state.Authorization, - Statements: state.Statements, - }, + JobID: jobID, + Description: "schema change job", // TODO(ajwerner): use const + Statements: stmts, + Username: security.MakeSQLUsernameFromPreNormalizedString(targetState.Authorization.UserName), + // TODO(ajwerner): It may be better in the future to have the builder be + // responsible for determining this set of descriptors. As of the time of + // writing, the descriptors to be "locked," descriptors that need schema + // change jobs, and descriptors with schema change mutations all coincide. + // But there are future schema changes to be implemented in the new schema + // changer (e.g., RENAME TABLE) for which this may no longer be true. + DescriptorIDs: screl.GetDescIDs(targetState), + Details: jobspb.NewSchemaChangeDetails{TargetState: targetState}, + Progress: jobspb.NewSchemaChangeProgress{Current: current}, RunningStatus: "", NonCancelable: false, } @@ -407,14 +474,18 @@ func createGCJobRecord( // EnqueueEvent implements the scmutationexec.MutationVisitorStateUpdater // interface. func (mvs *mutationVisitorState) EnqueueEvent( - id descpb.ID, metadata *scpb.ElementMetadata, event eventpb.EventPayload, + id descpb.ID, + metadata scpb.TargetMetadata, + details eventpb.CommonSQLEventDetails, + event eventpb.EventPayload, ) error { mvs.eventsByStatement[metadata.StatementID] = append( mvs.eventsByStatement[metadata.StatementID], eventPayload{ - id: id, - event: event, - metadata: metadata, + id: id, + event: event, + TargetMetadata: metadata, + details: details, }, ) return nil diff --git a/pkg/sql/schemachanger/scexec/executor_external_test.go b/pkg/sql/schemachanger/scexec/executor_external_test.go index dacf3ea461bc..57275e8d673f 100644 --- a/pkg/sql/schemachanger/scexec/executor_external_test.go +++ b/pkg/sql/schemachanger/scexec/executor_external_test.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -32,7 +33,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scdeps/sctestutils" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec/scmutationexec" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraphviz" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -61,6 +61,7 @@ func (ti testInfra) newExecDeps( ) scexec.Dependencies { return scdeps.NewExecutorDependencies( ti.lm.Codec(), + &sessiondata.SessionData{}, /* sessionData */ txn, security.RootUserName(), descsCollection, @@ -68,11 +69,12 @@ func (ti testInfra) newExecDeps( noopBackfiller{}, /* backfiller */ scdeps.NewNoOpBackfillTracker(ti.lm.Codec()), scdeps.NewNoopPeriodicProgressFlusher(), - noopIndexValidator{}, /* indexValidator */ - noopPartitioner{}, /* partitioner */ - noopEventLogger{}, /* eventLogger */ - 1, /* schemaChangerJobID */ - nil, /* statements */ + noopIndexValidator{}, /* indexValidator */ + noopPartitioner{}, /* partitioner */ + noopCommentUpdaterFactory{}, /* commentUpdaterFactory*/ + noopEventLogger{}, /* eventLogger */ + 1, /* schemaChangerJobID */ + nil, /* statements */ ) } @@ -251,8 +253,7 @@ func TestSchemaChanger(t *testing.T) { ti.tsql.Exec(t, `CREATE DATABASE db`) ti.tsql.Exec(t, `CREATE TABLE db.foo (i INT PRIMARY KEY)`) - var ts scpb.State - var targetSlice []*scpb.Target + var cs scpb.CurrentState require.NoError(t, ti.txn(ctx, func( ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ) (err error) { @@ -260,16 +261,18 @@ func TestSchemaChanger(t *testing.T) { _, fooTable, err := descriptors.GetImmutableTableByName(ctx, txn, &tn, tree.ObjectLookupFlagsWithRequired()) require.NoError(t, err) - // Corresponds to: - // - // ALTER TABLE foo ADD COLUMN j INT; - // + stmts := []scpb.Statement{ + { + Statement: "ALTER TABLE foo ADD COLUMN j INT", + }, + } metadata := &scpb.TargetMetadata{ StatementID: 0, SubWorkID: 1, - SourceElementID: 1} - targetSlice = []*scpb.Target{ - scpb.NewTarget( + SourceElementID: 1, + } + targets := []scpb.Target{ + scpb.MakeTarget( scpb.Status_PUBLIC, &scpb.PrimaryIndex{ TableID: fooTable.GetID(), @@ -280,24 +283,27 @@ func TestSchemaChanger(t *testing.T) { Unique: true, Inverted: false, }, - metadata), - scpb.NewTarget( + metadata, + ), + scpb.MakeTarget( scpb.Status_PUBLIC, &scpb.IndexName{ TableID: fooTable.GetID(), IndexID: 2, Name: "new_primary_key", }, - metadata), - scpb.NewTarget( + metadata, + ), + scpb.MakeTarget( scpb.Status_PUBLIC, &scpb.ColumnName{ TableID: fooTable.GetID(), ColumnID: 2, Name: "j", }, - metadata), - scpb.NewTarget( + metadata, + ), + scpb.MakeTarget( scpb.Status_PUBLIC, &scpb.Column{ TableID: fooTable.GetID(), @@ -306,8 +312,9 @@ func TestSchemaChanger(t *testing.T) { Nullable: true, PgAttributeNum: 2, }, - metadata), - scpb.NewTarget( + metadata, + ), + scpb.MakeTarget( scpb.Status_ABSENT, &scpb.PrimaryIndex{ TableID: fooTable.GetID(), @@ -317,107 +324,65 @@ func TestSchemaChanger(t *testing.T) { Unique: true, Inverted: false, }, - metadata), - scpb.NewTarget( + metadata, + ), + scpb.MakeTarget( scpb.Status_ABSENT, &scpb.IndexName{ TableID: fooTable.GetID(), IndexID: 1, Name: "primary", }, - metadata), + metadata, + ), } - - nodes := scpb.State{ - Nodes: []*scpb.Node{ - { - Target: targetSlice[0], - Status: scpb.Status_ABSENT, - }, - { - Target: targetSlice[1], - Status: scpb.Status_ABSENT, - }, - { - Target: targetSlice[2], - Status: scpb.Status_ABSENT, - }, - { - Target: targetSlice[3], - Status: scpb.Status_ABSENT, - }, - { - Target: targetSlice[4], - Status: scpb.Status_PUBLIC, - }, - { - Target: targetSlice[5], - Status: scpb.Status_PUBLIC, - }, - }, - Statements: []*scpb.Statement{ - {}, - }, + current := []scpb.Status{ + scpb.Status_ABSENT, + scpb.Status_ABSENT, + scpb.Status_ABSENT, + scpb.Status_ABSENT, + scpb.Status_PUBLIC, + scpb.Status_PUBLIC, + } + initial := scpb.CurrentState{ + TargetState: scpb.TargetState{Statements: stmts, Targets: targets}, + Current: current, } for _, phase := range []scop.Phase{ scop.StatementPhase, scop.PreCommitPhase, } { - sc := sctestutils.MakePlan(t, nodes, phase) + sc := sctestutils.MakePlan(t, initial, phase) stages := sc.StagesForCurrentPhase() for _, s := range stages { exDeps := ti.newExecDeps(txn, descriptors) - require.NoError(t, scgraphviz.DecorateErrorWithPlanDetails(scexec.ExecuteStage(ctx, exDeps, s.Ops()), sc)) - ts = s.After + require.NoError(t, sc.DecorateErrorWithPlanDetails(scexec.ExecuteStage(ctx, exDeps, s.Ops()))) + cs = scpb.CurrentState{TargetState: initial.TargetState, Current: s.After} } } return nil })) - var after scpb.State + var after scpb.CurrentState require.NoError(t, ti.txn(ctx, func( ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ) error { - sc := sctestutils.MakePlan(t, ts, scop.PostCommitPhase) + sc := sctestutils.MakePlan(t, cs, scop.PostCommitPhase) for _, s := range sc.Stages { exDeps := ti.newExecDeps(txn, descriptors) - require.NoError(t, scgraphviz.DecorateErrorWithPlanDetails(scexec.ExecuteStage(ctx, exDeps, s.Ops()), sc)) - after = s.After + require.NoError(t, sc.DecorateErrorWithPlanDetails(scexec.ExecuteStage(ctx, exDeps, s.Ops()))) + after = scpb.CurrentState{TargetState: cs.TargetState, Current: s.After} } return nil })) - require.Equal(t, scpb.State{ - Nodes: []*scpb.Node{ - { - Target: targetSlice[0], - Status: scpb.Status_PUBLIC, - }, - { - Target: targetSlice[1], - Status: scpb.Status_PUBLIC, - }, - { - Target: targetSlice[2], - Status: scpb.Status_PUBLIC, - }, - { - Target: targetSlice[3], - Status: scpb.Status_PUBLIC, - }, - { - Target: targetSlice[4], - Status: scpb.Status_ABSENT, - }, - { - Target: targetSlice[5], - Status: scpb.Status_ABSENT, - }, - }, - - Statements: []*scpb.Statement{ - {}, - }, - }, after) + require.Equal(t, []scpb.Status{ + scpb.Status_PUBLIC, + scpb.Status_PUBLIC, + scpb.Status_PUBLIC, + scpb.Status_PUBLIC, + scpb.Status_ABSENT, + scpb.Status_ABSENT, + }, after.Current) ti.tsql.Exec(t, "INSERT INTO db.foo VALUES (1, 1)") }) t.Run("with builder", func(t *testing.T) { @@ -426,7 +391,7 @@ func TestSchemaChanger(t *testing.T) { ti.tsql.Exec(t, `CREATE DATABASE db`) ti.tsql.Exec(t, `CREATE TABLE db.foo (i INT PRIMARY KEY)`) - var ts scpb.State + var cs scpb.CurrentState require.NoError(t, ti.txn(ctx, func( ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ) (err error) { @@ -434,18 +399,18 @@ func TestSchemaChanger(t *testing.T) { parsed, err := parser.Parse("ALTER TABLE db.foo ADD COLUMN j INT") require.NoError(t, err) require.Len(t, parsed, 1) - outputNodes, err := scbuild.Build(ctx, buildDeps, scpb.State{}, parsed[0].AST.(*tree.AlterTable)) + initial, err := scbuild.Build(ctx, buildDeps, scpb.CurrentState{}, parsed[0].AST.(*tree.AlterTable)) require.NoError(t, err) for _, phase := range []scop.Phase{ scop.StatementPhase, scop.PreCommitPhase, } { - sc := sctestutils.MakePlan(t, outputNodes, phase) + sc := sctestutils.MakePlan(t, initial, phase) for _, s := range sc.StagesForCurrentPhase() { exDeps := ti.newExecDeps(txn, descriptors) - require.NoError(t, scgraphviz.DecorateErrorWithPlanDetails(scexec.ExecuteStage(ctx, exDeps, s.Ops()), sc)) - ts = s.After + require.NoError(t, sc.DecorateErrorWithPlanDetails(scexec.ExecuteStage(ctx, exDeps, s.Ops()))) + cs = scpb.CurrentState{TargetState: initial.TargetState, Current: s.After} } } }) @@ -454,10 +419,10 @@ func TestSchemaChanger(t *testing.T) { require.NoError(t, ti.txn(ctx, func( ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ) error { - sc := sctestutils.MakePlan(t, ts, scop.PostCommitPhase) + sc := sctestutils.MakePlan(t, cs, scop.PostCommitPhase) for _, s := range sc.Stages { exDeps := ti.newExecDeps(txn, descriptors) - require.NoError(t, scgraphviz.DecorateErrorWithPlanDetails(scexec.ExecuteStage(ctx, exDeps, s.Ops()), sc)) + require.NoError(t, sc.DecorateErrorWithPlanDetails(scexec.ExecuteStage(ctx, exDeps, s.Ops()))) } return nil })) @@ -542,7 +507,53 @@ func (noopPartitioner) AddPartitioning( type noopEventLogger struct{} func (noopEventLogger) LogEvent( - ctx context.Context, descID descpb.ID, metadata scpb.ElementMetadata, event eventpb.EventPayload, + _ context.Context, _ descpb.ID, _ eventpb.CommonSQLEventDetails, _ eventpb.EventPayload, +) error { + return nil +} + +type noopCommentUpdaterFactory struct { +} + +type noopCommentUpdater struct { +} + +func (noopCommentUpdaterFactory) NewCommentUpdater( + ctx context.Context, txn *kv.Txn, sessionData *sessiondata.SessionData, +) scexec.CommentUpdater { + return &noopCommentUpdater{} +} + +func (noopCommentUpdater) UpsertDescriptorComment( + id int64, subID int64, commentType keys.CommentType, comment string, +) error { + return nil +} + +// DeleteDescriptorComment deletes a comment for a given descriptor. +func (noopCommentUpdater) DeleteDescriptorComment( + id int64, subID int64, commentType keys.CommentType, +) error { + return nil +} + +//UpsertConstraintComment upsersts a comment associated with a constraint. +func (noopCommentUpdater) UpsertConstraintComment( + desc catalog.TableDescriptor, + schemaName string, + constraintName string, + constraintType scpb.ConstraintType, + comment string, +) error { + return nil +} + +//DeleteConstraintComment deletes a comment associated with a constraint. +func (noopCommentUpdater) DeleteConstraintComment( + desc catalog.TableDescriptor, + schemaName string, + constraintName string, + constraintType scpb.ConstraintType, ) error { return nil } @@ -551,3 +562,4 @@ var _ scexec.Backfiller = noopBackfiller{} var _ scexec.IndexValidator = noopIndexValidator{} var _ scmutationexec.Partitioner = noopPartitioner{} var _ scexec.EventLogger = noopEventLogger{} +var _ scexec.CommentUpdater = noopCommentUpdater{} diff --git a/pkg/sql/schemachanger/scexec/mocks_generated_test.go b/pkg/sql/schemachanger/scexec/mocks_generated_test.go index 91d1a67d6c11..d6d3f2b44d94 100644 --- a/pkg/sql/schemachanger/scexec/mocks_generated_test.go +++ b/pkg/sql/schemachanger/scexec/mocks_generated_test.go @@ -10,9 +10,9 @@ import ( security "github.com/cockroachdb/cockroach/pkg/security" catalog "github.com/cockroachdb/cockroach/pkg/sql/catalog" - descpb "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" scexec "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" scmutationexec "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec/scmutationexec" + catid "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" gomock "github.com/golang/mock/gomock" ) @@ -52,7 +52,7 @@ func (mr *MockCatalogMockRecorder) AddSyntheticDescriptor(arg0 interface{}) *gom } // GetFullyQualifiedName mocks base method. -func (m *MockCatalog) GetFullyQualifiedName(arg0 context.Context, arg1 descpb.ID) (string, error) { +func (m *MockCatalog) GetFullyQualifiedName(arg0 context.Context, arg1 catid.DescID) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetFullyQualifiedName", arg0, arg1) ret0, _ := ret[0].(string) @@ -67,7 +67,7 @@ func (mr *MockCatalogMockRecorder) GetFullyQualifiedName(arg0, arg1 interface{}) } // MustReadImmutableDescriptor mocks base method. -func (m *MockCatalog) MustReadImmutableDescriptor(arg0 context.Context, arg1 descpb.ID) (catalog.Descriptor, error) { +func (m *MockCatalog) MustReadImmutableDescriptor(arg0 context.Context, arg1 catid.DescID) (catalog.Descriptor, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MustReadImmutableDescriptor", arg0, arg1) ret0, _ := ret[0].(catalog.Descriptor) @@ -82,7 +82,7 @@ func (mr *MockCatalogMockRecorder) MustReadImmutableDescriptor(arg0, arg1 interf } // MustReadMutableDescriptor mocks base method. -func (m *MockCatalog) MustReadMutableDescriptor(arg0 context.Context, arg1 descpb.ID) (catalog.MutableDescriptor, error) { +func (m *MockCatalog) MustReadMutableDescriptor(arg0 context.Context, arg1 catid.DescID) (catalog.MutableDescriptor, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MustReadMutableDescriptor", arg0, arg1) ret0, _ := ret[0].(catalog.MutableDescriptor) @@ -111,7 +111,7 @@ func (mr *MockCatalogMockRecorder) NewCatalogChangeBatcher() *gomock.Call { } // RemoveSyntheticDescriptor mocks base method. -func (m *MockCatalog) RemoveSyntheticDescriptor(arg0 descpb.ID) { +func (m *MockCatalog) RemoveSyntheticDescriptor(arg0 catid.DescID) { m.ctrl.T.Helper() m.ctrl.Call(m, "RemoveSyntheticDescriptor", arg0) } @@ -173,6 +173,20 @@ func (mr *MockDependenciesMockRecorder) Catalog() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Catalog", reflect.TypeOf((*MockDependencies)(nil).Catalog)) } +// CommentUpdater mocks base method. +func (m *MockDependencies) CommentUpdater(arg0 context.Context) scexec.CommentUpdater { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommentUpdater", arg0) + ret0, _ := ret[0].(scexec.CommentUpdater) + return ret0 +} + +// CommentUpdater indicates an expected call of CommentUpdater. +func (mr *MockDependenciesMockRecorder) CommentUpdater(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommentUpdater", reflect.TypeOf((*MockDependencies)(nil).CommentUpdater), arg0) +} + // EventLogger mocks base method. func (m *MockDependencies) EventLogger() scexec.EventLogger { m.ctrl.T.Helper() diff --git a/pkg/sql/schemachanger/scexec/scmutationexec/BUILD.bazel b/pkg/sql/schemachanger/scexec/scmutationexec/BUILD.bazel index 2046d25c3bb3..bea91d2b1480 100644 --- a/pkg/sql/schemachanger/scexec/scmutationexec/BUILD.bazel +++ b/pkg/sql/schemachanger/scexec/scmutationexec/BUILD.bazel @@ -10,6 +10,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/jobs/jobspb", + "//pkg/keys", "//pkg/sql/catalog", "//pkg/sql/catalog/dbdesc", "//pkg/sql/catalog/descpb", @@ -25,5 +26,6 @@ go_library( "//pkg/util/log/eventpb", "//pkg/util/protoutil", "@com_github_cockroachdb_errors//:errors", + "@com_github_cockroachdb_redact//:redact", ], ) diff --git a/pkg/sql/schemachanger/scexec/scmutationexec/scmutationexec.go b/pkg/sql/schemachanger/scexec/scmutationexec/scmutationexec.go index 1877a2480620..caf7bc22a8ed 100644 --- a/pkg/sql/schemachanger/scexec/scmutationexec/scmutationexec.go +++ b/pkg/sql/schemachanger/scexec/scmutationexec/scmutationexec.go @@ -15,6 +15,7 @@ import ( "sort" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -30,6 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" + "github.com/cockroachdb/redact" ) // CatalogReader describes catalog read operations as required by the mutation @@ -77,6 +79,17 @@ type MutationVisitorStateUpdater interface { // DeleteDescriptor adds a descriptor for deletion. DeleteDescriptor(id descpb.ID) + // DeleteComment removes comments for a descriptor + DeleteComment(id descpb.ID, subID int, commentType keys.CommentType) + + // DeleteConstraintComment removes comments for a descriptor + DeleteConstraintComment( + ctx context.Context, + tbl catalog.TableDescriptor, + constraintName string, + constraintType scpb.ConstraintType, + ) error + // AddNewGCJobForTable enqueues a GC job for the given table. AddNewGCJobForTable(descriptor catalog.TableDescriptor) @@ -87,14 +100,14 @@ type MutationVisitorStateUpdater interface { AddNewGCJobForIndex(tbl catalog.TableDescriptor, index catalog.Index) // AddNewSchemaChangerJob adds a schema changer job. - AddNewSchemaChangerJob(jobID jobspb.JobID, state scpb.State) error + AddNewSchemaChangerJob(jobID jobspb.JobID, targetState scpb.TargetState, current []scpb.Status) error // UpdateSchemaChangerJob will update the progress and payload of the // schema changer job. - UpdateSchemaChangerJob(jobID jobspb.JobID, statuses []scpb.Status, isNonCancelable bool) error + UpdateSchemaChangerJob(jobID jobspb.JobID, current []scpb.Status, isNonCancelable bool) error // EnqueueEvent will enqueue an event to be written to the event log. - EnqueueEvent(id descpb.ID, metadata *scpb.ElementMetadata, event eventpb.EventPayload) error + EnqueueEvent(id descpb.ID, metadata scpb.TargetMetadata, details eventpb.CommonSQLEventDetails, event eventpb.EventPayload) error } // NewMutationVisitor creates a new scop.MutationVisitor. @@ -169,13 +182,13 @@ func (m *visitor) swapSchemaChangeJobID( func (m *visitor) CreateDeclarativeSchemaChangerJob( ctx context.Context, job scop.CreateDeclarativeSchemaChangerJob, ) error { - return m.s.AddNewSchemaChangerJob(job.JobID, job.State) + return m.s.AddNewSchemaChangerJob(job.JobID, job.TargetState, job.Current) } func (m *visitor) UpdateSchemaChangerJob( ctx context.Context, progress scop.UpdateSchemaChangerJob, ) error { - return m.s.UpdateSchemaChangerJob(progress.JobID, progress.Statuses, progress.IsNonCancelable) + return m.s.UpdateSchemaChangerJob(progress.JobID, progress.Current, progress.IsNonCancelable) } func (m *visitor) checkOutTable(ctx context.Context, id descpb.ID) (*tabledesc.Mutable, error) { @@ -889,23 +902,29 @@ func (m *visitor) DropForeignKeyRef(ctx context.Context, op scop.DropForeignKeyR } func (m *visitor) LogEvent(ctx context.Context, op scop.LogEvent) error { - event, err := asEventPayload(ctx, op, m) + descID := screl.GetDescID(op.Element.Element()) + fullName, err := m.cr.GetFullyQualifiedName(ctx, descID) + if err != nil { + return err + } + event, err := asEventPayload(ctx, fullName, op.Element.Element(), op.TargetStatus, m) if err != nil { return err } - return m.s.EnqueueEvent(op.DescID, &op.Metadata, event) + details := eventpb.CommonSQLEventDetails{ + ApplicationName: op.Authorization.AppName, + User: op.Authorization.UserName, + Statement: redact.RedactableString(op.Statement), + Tag: op.StatementTag, + } + return m.s.EnqueueEvent(descID, op.TargetMetadata, details, event) } func asEventPayload( - ctx context.Context, op scop.LogEvent, m *visitor, + ctx context.Context, fullName string, e scpb.Element, targetStatus scpb.Status, m *visitor, ) (eventpb.EventPayload, error) { - descID := screl.GetDescID(op.Element.Element()) - fullName, err := m.cr.GetFullyQualifiedName(ctx, descID) - if err != nil { - return nil, err - } - if op.TargetStatus == scpb.Status_ABSENT { - switch op.Element.GetValue().(type) { + if targetStatus == scpb.Status_ABSENT { + switch e.(type) { case *scpb.Table: return &eventpb.DropTable{TableName: fullName}, nil case *scpb.View: @@ -920,9 +939,9 @@ func asEventPayload( return &eventpb.DropType{TypeName: fullName}, nil } } - switch e := op.Element.GetValue().(type) { + switch e := e.(type) { case *scpb.Column: - tbl, err := m.checkOutTable(ctx, op.DescID) + tbl, err := m.checkOutTable(ctx, e.TableID) if err != nil { return nil, err } @@ -935,7 +954,7 @@ func asEventPayload( MutationID: uint32(mutation.MutationID()), }, nil case *scpb.SecondaryIndex: - tbl, err := m.checkOutTable(ctx, op.DescID) + tbl, err := m.checkOutTable(ctx, e.TableID) if err != nil { return nil, err } @@ -943,7 +962,7 @@ func asEventPayload( if err != nil { return nil, err } - switch op.TargetStatus { + switch targetStatus { case scpb.Status_PUBLIC: return &eventpb.AlterTable{ TableName: fullName, @@ -956,10 +975,10 @@ func asEventPayload( MutationID: uint32(mutation.MutationID()), }, nil default: - return nil, errors.AssertionFailedf("unknown target status %s", op.TargetStatus) + return nil, errors.AssertionFailedf("unknown target status %s", targetStatus) } } - return nil, errors.AssertionFailedf("unknown %s element type %T", op.TargetStatus.String(), op.Element.GetValue()) + return nil, errors.AssertionFailedf("unknown %s element type %T", targetStatus.String(), e) } func (m *visitor) AddIndexPartitionInfo(ctx context.Context, op scop.AddIndexPartitionInfo) error { @@ -1016,4 +1035,39 @@ func (m *visitor) DeleteDatabaseSchemaEntry( return nil } +func (m *visitor) RemoveTableComment(_ context.Context, op scop.RemoveTableComment) error { + m.s.DeleteComment(op.TableID, 0, keys.TableCommentType) + return nil +} + +func (m *visitor) RemoveDatabaseComment(_ context.Context, op scop.RemoveDatabaseComment) error { + m.s.DeleteComment(op.DatabaseID, 0, keys.DatabaseCommentType) + return nil +} + +func (m *visitor) RemoveSchemaComment(_ context.Context, op scop.RemoveSchemaComment) error { + m.s.DeleteComment(op.SchemaID, 0, keys.SchemaCommentType) + return nil +} + +func (m *visitor) RemoveIndexComment(_ context.Context, op scop.RemoveIndexComment) error { + m.s.DeleteComment(op.TableID, int(op.IndexID), keys.IndexCommentType) + return nil +} + +func (m *visitor) RemoveColumnComment(_ context.Context, op scop.RemoveColumnComment) error { + m.s.DeleteComment(op.TableID, int(op.ColumnID), keys.ColumnCommentType) + return nil +} + +func (m *visitor) RemoveConstraintComment( + ctx context.Context, op scop.RemoveConstraintComment, +) error { + tbl, err := m.cr.MustReadImmutableDescriptor(ctx, op.TableID) + if err != nil { + return err + } + return m.s.DeleteConstraintComment(ctx, tbl.(catalog.TableDescriptor), op.ConstraintName, op.ConstraintType) +} + var _ scop.MutationVisitor = (*visitor)(nil) diff --git a/pkg/sql/schemachanger/schemachanger_test.go b/pkg/sql/schemachanger/schemachanger_test.go index b4609a7f54d6..5b5c3450b679 100644 --- a/pkg/sql/schemachanger/schemachanger_test.go +++ b/pkg/sql/schemachanger/schemachanger_test.go @@ -222,7 +222,7 @@ func TestSchemaChangeWaitsForOtherSchemaChanges(t *testing.T) { assert.Truef(t, highestID <= 1, "unexpected mutation IDs %v", idsSeen) // Block job 1 during the backfill. s := p.Stages[idx] - stmt := p.Initial.Statements[0].Statement + stmt := p.TargetState.Statements[0].Statement if stmt != stmt1 || s.Type() != scop.BackfillType { return nil } diff --git a/pkg/sql/schemachanger/scjob/job.go b/pkg/sql/schemachanger/scjob/job.go index 731845dac73f..990eff783280 100644 --- a/pkg/sql/schemachanger/scjob/job.go +++ b/pkg/sql/schemachanger/scjob/job.go @@ -79,14 +79,20 @@ func (n *newSchemaChangeResumer) run(ctx context.Context, execCtxI interface{}) execCfg.Codec, execCfg.Settings, execCfg.IndexValidator, + execCfg.CommentUpdaterFactory, execCfg.DeclarativeSchemaChangerTestingKnobs, payload.Statement, + execCtx.SessionData(), ) return scrun.RunSchemaChangesInJob( - ctx, execCfg.DeclarativeSchemaChangerTestingKnobs, execCfg.Settings, - deps, n.job.ID(), payload.DescriptorIDs, - *newSchemaChangeDetails, *newSchemaChangeProgress, + ctx, + execCfg.DeclarativeSchemaChangerTestingKnobs, + execCfg.Settings, + deps, + n.job.ID(), + *newSchemaChangeDetails, + *newSchemaChangeProgress, n.rollback, ) } diff --git a/pkg/sql/schemachanger/scop/BUILD.bazel b/pkg/sql/schemachanger/scop/BUILD.bazel index b1ec90b60e41..016c71054264 100644 --- a/pkg/sql/schemachanger/scop/BUILD.bazel +++ b/pkg/sql/schemachanger/scop/BUILD.bazel @@ -19,6 +19,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/jobs/jobspb", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", "//pkg/sql/schemachanger/scpb", "//pkg/sql/types", diff --git a/pkg/sql/schemachanger/scop/mutation.go b/pkg/sql/schemachanger/scop/mutation.go index 540de8821f41..2f38f6545695 100644 --- a/pkg/sql/schemachanger/scop/mutation.go +++ b/pkg/sql/schemachanger/scop/mutation.go @@ -12,6 +12,7 @@ package scop import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -49,7 +50,7 @@ type MakeAddedIndexDeleteOnly struct { KeySuffixColumnIDs []descpb.ColumnID StoreColumnIDs []descpb.ColumnID CompositeColumnIDs []descpb.ColumnID - ShardedDescriptor *descpb.ShardedDescriptor + ShardedDescriptor *catpb.ShardedDescriptor Inverted bool Concurrently bool SecondaryIndex bool @@ -204,12 +205,12 @@ type MakeAddedColumnDeleteOnly struct { OnUpdateExpr string Hidden bool Inaccessible bool - GeneratedAsIdentityType descpb.GeneratedAsIdentityType + GeneratedAsIdentityType catpb.GeneratedAsIdentityType GeneratedAsIdentitySequenceOption string UsesSequenceIds []descpb.ID ComputerExpr string PgAttributeNum uint32 - SystemColumnKind descpb.SystemColumnKind + SystemColumnKind catpb.SystemColumnKind Virtual bool } @@ -295,10 +296,12 @@ type AddIndexPartitionInfo struct { // LogEvent logs an event for a given descriptor. type LogEvent struct { mutationOp - DescID descpb.ID - Metadata scpb.ElementMetadata - Element *scpb.ElementProto - TargetStatus scpb.Status + TargetMetadata scpb.TargetMetadata + Authorization scpb.Authorization + Statement string + StatementTag string + Element scpb.ElementProto + TargetStatus scpb.Status } // SetColumnName makes a column only to allocate @@ -350,8 +353,9 @@ type AddJobReference struct { // declarative schema changer post-commit phases. type CreateDeclarativeSchemaChangerJob struct { mutationOp - JobID jobspb.JobID - State scpb.State + JobID jobspb.JobID + TargetState scpb.TargetState + Current []scpb.Status } // UpdateSchemaChangerJob is used to update the progress and payload of the @@ -359,6 +363,47 @@ type CreateDeclarativeSchemaChangerJob struct { type UpdateSchemaChangerJob struct { mutationOp JobID jobspb.JobID - Statuses []scpb.Status + Current []scpb.Status IsNonCancelable bool } + +// RemoveTableComment is used to delete a comment associated with a table. +type RemoveTableComment struct { + mutationOp + TableID descpb.ID +} + +// RemoveDatabaseComment is used to delete a comment associated with a database. +type RemoveDatabaseComment struct { + mutationOp + DatabaseID descpb.ID +} + +// RemoveSchemaComment is used to delete a comment associated with a schema. +type RemoveSchemaComment struct { + mutationOp + SchemaID descpb.ID +} + +// RemoveIndexComment is used to delete a comment associated with an index. +type RemoveIndexComment struct { + mutationOp + TableID descpb.ID + IndexID descpb.IndexID +} + +// RemoveColumnComment is used to delete a comment associated with a column. +type RemoveColumnComment struct { + mutationOp + TableID descpb.ID + ColumnID descpb.ColumnID +} + +// RemoveConstraintComment is used to delete a comment associated with a +// constraint. +type RemoveConstraintComment struct { + mutationOp + TableID descpb.ID + ConstraintType scpb.ConstraintType + ConstraintName string +} diff --git a/pkg/sql/schemachanger/scop/mutation_visitor_generated.go b/pkg/sql/schemachanger/scop/mutation_visitor_generated.go index a7c9f0bd26f8..59328986c4ae 100644 --- a/pkg/sql/schemachanger/scop/mutation_visitor_generated.go +++ b/pkg/sql/schemachanger/scop/mutation_visitor_generated.go @@ -62,6 +62,12 @@ type MutationVisitor interface { AddJobReference(context.Context, AddJobReference) error CreateDeclarativeSchemaChangerJob(context.Context, CreateDeclarativeSchemaChangerJob) error UpdateSchemaChangerJob(context.Context, UpdateSchemaChangerJob) error + RemoveTableComment(context.Context, RemoveTableComment) error + RemoveDatabaseComment(context.Context, RemoveDatabaseComment) error + RemoveSchemaComment(context.Context, RemoveSchemaComment) error + RemoveIndexComment(context.Context, RemoveIndexComment) error + RemoveColumnComment(context.Context, RemoveColumnComment) error + RemoveConstraintComment(context.Context, RemoveConstraintComment) error } // Visit is part of the MutationOp interface. @@ -263,3 +269,33 @@ func (op CreateDeclarativeSchemaChangerJob) Visit(ctx context.Context, v Mutatio func (op UpdateSchemaChangerJob) Visit(ctx context.Context, v MutationVisitor) error { return v.UpdateSchemaChangerJob(ctx, op) } + +// Visit is part of the MutationOp interface. +func (op RemoveTableComment) Visit(ctx context.Context, v MutationVisitor) error { + return v.RemoveTableComment(ctx, op) +} + +// Visit is part of the MutationOp interface. +func (op RemoveDatabaseComment) Visit(ctx context.Context, v MutationVisitor) error { + return v.RemoveDatabaseComment(ctx, op) +} + +// Visit is part of the MutationOp interface. +func (op RemoveSchemaComment) Visit(ctx context.Context, v MutationVisitor) error { + return v.RemoveSchemaComment(ctx, op) +} + +// Visit is part of the MutationOp interface. +func (op RemoveIndexComment) Visit(ctx context.Context, v MutationVisitor) error { + return v.RemoveIndexComment(ctx, op) +} + +// Visit is part of the MutationOp interface. +func (op RemoveColumnComment) Visit(ctx context.Context, v MutationVisitor) error { + return v.RemoveColumnComment(ctx, op) +} + +// Visit is part of the MutationOp interface. +func (op RemoveConstraintComment) Visit(ctx context.Context, v MutationVisitor) error { + return v.RemoveConstraintComment(ctx, op) +} diff --git a/pkg/sql/schemachanger/scpb/BUILD.bazel b/pkg/sql/schemachanger/scpb/BUILD.bazel index ea5032f49ee1..b213581d521c 100644 --- a/pkg/sql/schemachanger/scpb/BUILD.bazel +++ b/pkg/sql/schemachanger/scpb/BUILD.bazel @@ -5,8 +5,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") go_library( name = "scpb", srcs = [ - "metadata.go", - "node.go", + "constants.go", + "state.go", ":gen-elements-interface", # keep ], embed = [":scpb_go_proto"], @@ -25,7 +25,9 @@ go_proto_library( proto = ":scpb_proto", visibility = ["//visibility:public"], deps = [ + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", + "//pkg/sql/sem/catid", # keep "//pkg/sql/types", "@com_github_gogo_protobuf//gogoproto", ], @@ -33,10 +35,14 @@ go_proto_library( proto_library( name = "scpb_proto", - srcs = ["scpb.proto"], + srcs = [ + "elements.proto", + "scpb.proto", + ], strip_import_prefix = "/pkg", visibility = ["//visibility:public"], deps = [ + "//pkg/sql/catalog/catpb:catpb_proto", "//pkg/sql/catalog/descpb:descpb_proto", "//pkg/sql/types:types_proto", "@com_github_gogo_protobuf//gogoproto:gogo_proto", @@ -53,7 +59,7 @@ go_binary( genrule( name = "gen-elements-interface", srcs = [ - "scpb.proto", + "elements.proto", ], outs = ["elements_generated.go"], cmd = """ diff --git a/pkg/sql/execinfra/scanbase.go b/pkg/sql/schemachanger/scpb/constants.go similarity index 50% rename from pkg/sql/execinfra/scanbase.go rename to pkg/sql/schemachanger/scpb/constants.go index 50bbb5eb6ce2..8c135a9df96c 100644 --- a/pkg/sql/execinfra/scanbase.go +++ b/pkg/sql/schemachanger/scpb/constants.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Cockroach Authors. +// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -8,12 +8,10 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package execinfra +package scpb -import "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" - -// Prettier aliases for execinfrapb.ScanVisibility values. const ( - ScanVisibilityPublic = execinfrapb.ScanVisibility_PUBLIC - ScanVisibilityPublicAndNotPublic = execinfrapb.ScanVisibility_PUBLIC_AND_NOT_PUBLIC + // PlaceHolderComment placeholder string for non-fetched comments. + PlaceHolderComment string = "TODO(fqazi) Comments are not currently fetched " + + "from system.comments when doing decomposition" ) diff --git a/pkg/sql/schemachanger/scpb/element_generator.go b/pkg/sql/schemachanger/scpb/element_generator.go index e38171b0ee6d..fc5daa08f79f 100644 --- a/pkg/sql/schemachanger/scpb/element_generator.go +++ b/pkg/sql/schemachanger/scpb/element_generator.go @@ -62,8 +62,8 @@ func run(in, out string) error { package scpb -type NodeIterator interface { - ForEachNode(fn func(status, targetStatus Status, elem Element)) +type ElementStatusIterator interface { + ForEachElementStatus(fn func(status, targetStatus Status, elem Element)) } {{ range . }} @@ -71,9 +71,9 @@ func (e {{ . }}) element() {} // ForEach{{ . }} iterates over nodes of type {{ . }}. func ForEach{{ . }}( - b NodeIterator, elementFunc func(status, targetStatus Status, element *{{ . }}), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *{{ . }}), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*{{ . }}); ok { elementFunc(status, targetStatus, e) } diff --git a/pkg/sql/schemachanger/scpb/elements.proto b/pkg/sql/schemachanger/scpb/elements.proto new file mode 100644 index 000000000000..f5cbb0e5bb38 --- /dev/null +++ b/pkg/sql/schemachanger/scpb/elements.proto @@ -0,0 +1,408 @@ +// Copyright 2020 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +syntax = "proto3"; +package cockroach.sql.schemachanger.scpb; +option go_package = "scpb"; + +import "sql/catalog/catpb/catalog.proto"; +import "sql/catalog/descpb/privilege.proto"; +import "sql/types/types.proto"; +import "gogoproto/gogo.proto"; + +message ElementProto { + option (gogoproto.onlyone) = true; + Column column = 1 [(gogoproto.moretags) = "parent:\"Table\""]; + PrimaryIndex primary_index = 2 [(gogoproto.moretags) = "parent:\"Table\""]; + SecondaryIndex secondary_index = 3 [(gogoproto.moretags) = "parent:\"Table\""]; + SequenceDependency sequence_dependency = 4 [(gogoproto.moretags) = "parent:\"Column, Table\""]; + UniqueConstraint unique_constraint = 5 [(gogoproto.moretags) = "parent:\"SecondaryIndex, Table\""]; + CheckConstraint check_constraint = 6 [(gogoproto.moretags) = "parent:\"Table\""]; + Sequence sequence = 7; + DefaultExpression default_expression = 8 [(gogoproto.moretags) = "parent:\"Column\""]; + View view = 9; + Table table = 10; + ForeignKey outForeignKey = 11 [(gogoproto.moretags) = "parent:\"Table\""]; + ForeignKeyBackReference inForeignKey = 12 [(gogoproto.moretags) = "parent:\"Table\""]; + RelationDependedOnBy relationDependedOnBy = 13 [(gogoproto.moretags) = "parent:\"Table, View\""]; + SequenceOwnedBy sequenceOwner = 45 [(gogoproto.moretags) = "parent:\"Column, Sequence\""]; + Type type = 15; + Schema schema = 16; + Database database = 17; + Partitioning partitioning = 18 [(gogoproto.moretags) = "parent:\"PrimaryIndex, SecondaryIndex\""]; + Namespace namespace = 19 [(gogoproto.moretags) = "parent:\"Table, View, Sequence, Database, Schema, Type\""]; + Owner owner = 20 [(gogoproto.moretags) = "parent:\"Table, View, Sequence, Database, Schema\""]; + UserPrivileges UserPrivileges = 21 [(gogoproto.moretags) = "parent:\"Table, View, Sequence, Database, Schema\""]; + ColumnName columnName = 22 [(gogoproto.moretags) = "parent:\"Column\""]; + Locality locality = 23 [(gogoproto.moretags) = "parent:\"Table\""]; + IndexName indexName = 24 [(gogoproto.moretags) = "parent:\"PrimaryIndex, SecondaryIndex\""]; + ConstraintName constraintName = 25 [(gogoproto.moretags) = "parent:\"UniqueConstraint, CheckConstraint\""]; + DefaultExprTypeReference defaultExprTypeRef = 26 [(gogoproto.moretags) = "parent:\"Column, Type\""]; + OnUpdateExprTypeReference onUpdateTypeReference = 27 [(gogoproto.moretags) = "parent:\"Column, Type\""]; + ComputedExprTypeReference computedExprTypeReference = 28 [(gogoproto.moretags) = "parent:\"Column, Type\""]; + ViewDependsOnType viewDependsOnType = 29 [(gogoproto.moretags) = "parent:\"View, Type\""]; + ColumnTypeReference columnTypeReference = 30 [(gogoproto.moretags) = "parent:\"Column, Type\""]; + DatabaseSchemaEntry schemaEntry = 31 [(gogoproto.moretags) = "parent:\"Database, Schema\""]; + CheckConstraintTypeReference checkConstraintTypeReference = 32 [(gogoproto.moretags) = "parent:\"Table, Type\""]; + TableComment tableComment = 33 [(gogoproto.moretags) = "parent:\"Table, View, Sequence\""]; + DatabaseComment databaseComment = 35 [(gogoproto.moretags) = "parent:\"Database\""]; + SchemaComment schemaComment = 36 [(gogoproto.moretags) = "parent:\"Schema\""]; + IndexComment indexComment = 37 [(gogoproto.moretags) = "parent:\"Index\""]; + ColumnComment columnComment = 38 [(gogoproto.moretags) = "parent:\"Column\""]; + ConstraintComment constraintComment = 39 [(gogoproto.moretags) = "parent:\"PrimaryIndex, SecondaryIndex, ForeignKey, UniqueConstraint, CheckConstraint\""]; +} + +message Column { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 column_id = 3 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + uint32 family_id = 4 [(gogoproto.customname) = "FamilyID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.FamilyID"]; + string family_name = 5; + sql.sem.types.T type = 6; + bool nullable = 7; + string default_expr = 8 [(gogoproto.customname) = "DefaultExpr"]; + string on_update_expr = 9 [(gogoproto.customname) = "OnUpdateExpr"]; + bool hidden = 10; + bool inaccessible = 11; + uint32 generated_as_identity_type = 12 [(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb.GeneratedAsIdentityType"]; + string generated_as_identity_sequence_option = 13; + repeated uint32 uses_sequence_ids = 14 [(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + string computerExpr = 15; + uint32 pg_attribute_num = 16 [(gogoproto.customname) = "PgAttributeNum"]; + uint32 system_column_kind = 17 [(gogoproto.customname) = "SystemColumnKind", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb.SystemColumnKind"]; + bool virtual = 18; +} + +message PrimaryIndex { + // The direction of a column in the index. + enum Direction { + ASC = 0; + DESC = 1; + } + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 index_id = 2 [(gogoproto.customname) = "IndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.IndexID"]; + bool unique = 3 [(gogoproto.customname) = "Unique"]; + repeated uint32 key_column_ids = 4 [(gogoproto.customname) = "KeyColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + repeated Direction key_column_direction = 5 [(gogoproto.customname) = "KeyColumnDirections"]; + repeated uint32 key_suffix_column_ids = 6 [(gogoproto.customname) = "KeySuffixColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + cockroach.sql.catalog.catpb.ShardedDescriptor sharded_descriptor = 7 [(gogoproto.customname) = "ShardedDescriptor"]; + repeated uint32 storing_column_ids = 8 [(gogoproto.customname) = "StoringColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + repeated uint32 composite_column_ids = 9 [(gogoproto.customname) = "CompositeColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + bool inverted = 10 [(gogoproto.customname) = "Inverted"]; + bool concurrently = 11 [(gogoproto.customname) = "Concurrently"]; + + // SourceIndexID refers to the primary index which will be used to + // to backfill this index. + uint32 source_index_id = 12 [(gogoproto.customname) = "SourceIndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.IndexID"]; +} + +message SecondaryIndex { + // The direction of a column in the index. + enum Direction { + ASC = 0; + DESC = 1; + } + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 index_id = 2 [(gogoproto.customname) = "IndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.IndexID"]; + bool unique = 3 [(gogoproto.customname) = "Unique"]; + repeated uint32 key_column_ids = 4 [(gogoproto.customname) = "KeyColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + repeated Direction key_column_direction = 5 [(gogoproto.customname) = "KeyColumnDirections"]; + repeated uint32 key_suffix_column_ids = 6 [(gogoproto.customname) = "KeySuffixColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + cockroach.sql.catalog.catpb.ShardedDescriptor sharded_descriptor = 7 [(gogoproto.customname) = "ShardedDescriptor"]; + repeated uint32 storing_column_ids = 8 [(gogoproto.customname) = "StoringColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + repeated uint32 composite_column_ids = 9 [(gogoproto.customname) = "CompositeColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + bool inverted = 10 [(gogoproto.customname) = "Inverted"]; + bool concurrently = 11 [(gogoproto.customname) = "Concurrently"]; + + // SourceIndexID refers to the primary index which will be used to + // to backfill this index. + uint32 source_index_id = 12 [(gogoproto.customname) = "SourceIndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.IndexID"]; +} + +message SequenceDependency { + option (gogoproto.equal) = true; + + enum Type { + UNKNOWN = 0; + USES = 1; + OWNS = 2; + } + + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + uint32 sequence_id = 3 [(gogoproto.customname) = "SequenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + Type type = 4; +} + +message UniqueConstraint { + option (gogoproto.equal) = true; + ConstraintType constraint_type = 1; + uint32 constraint_ordinal = 2; + uint32 table_id = 3 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 index_id = 4 [(gogoproto.customname) = "IndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.IndexID"]; + repeated uint32 column_ids = 5 [(gogoproto.customname) = "ColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; +} + +message CheckConstraint { + option (gogoproto.equal) = true; + ConstraintType constraint_type = 1; + uint32 constraint_ordinal = 2; + uint32 table_id = 3 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + string name = 4; + string expr = 5; + repeated uint32 column_ids = 6 [(gogoproto.customname) = "ColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + bool validated = 7; +} + +message Sequence { + option (gogoproto.equal) = true; + uint32 sequence_id = 1 [(gogoproto.customname) = "SequenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message DefaultExpression { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + repeated uint32 usesSequenceIDs =3 [(gogoproto.customname) = "UsesSequenceIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + string default_expr = 4; +} + +message View { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message Table { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message OnUpdateExprTypeReference { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + uint32 type_id = 3 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message ComputedExprTypeReference { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + uint32 type_id = 3 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message DefaultExprTypeReference { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + uint32 type_id = 3 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message ColumnTypeReference { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + uint32 type_id = 3 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message CheckConstraintTypeReference { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 constraint_ordinal = 2; + uint32 type_id = 3 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message ViewDependsOnType { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 type_id = 3 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message ForeignKey { + option (gogoproto.equal) = true; + uint32 origin_id = 1 [(gogoproto.customname) = "OriginID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + repeated uint32 origin_columns = 3 [(gogoproto.customname) = "OriginColumns", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + uint32 reference_id = 4 [(gogoproto.customname) = "ReferenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + repeated uint32 reference_columns = 5 [(gogoproto.customname) = "ReferenceColumns", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + uint32 on_update = 6 [(gogoproto.customname) = "OnUpdate", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb.ForeignKeyAction"]; + uint32 on_delete = 7 [(gogoproto.customname) = "OnDelete", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb.ForeignKeyAction"]; + string name = 8; +} + +message ForeignKeyBackReference { + option (gogoproto.equal) = true; + uint32 origin_id = 1 [(gogoproto.customname) = "OriginID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + repeated uint32 origin_columns = 3 [(gogoproto.customname) = "OriginColumns", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + uint32 reference_id = 4 [(gogoproto.customname) = "ReferenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + repeated uint32 reference_columns = 5 [(gogoproto.customname) = "ReferenceColumns", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + uint32 on_update = 6 [(gogoproto.customname) = "OnUpdate", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb.ForeignKeyAction"]; + uint32 on_delete = 7 [(gogoproto.customname) = "OnDelete", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb.ForeignKeyAction"]; + string name = 8; +} + +message SequenceOwnedBy { + uint32 sequence_id = 1 [(gogoproto.customname) = "SequenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 owner_table_id = 2 [(gogoproto.customname) = "OwnerTableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message RelationDependedOnBy { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 dependedOn = 2 [(gogoproto.customname) = "DependedOnBy", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 columnID = 3 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; +} + +message Type { + uint32 type_id = 1 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message Schema { + uint32 schema_id = 1 [(gogoproto.customname) = "SchemaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + repeated uint32 dependentObjects = 3 [(gogoproto.customname) = "DependentObjects", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message Database { + uint32 database_id = 1 [(gogoproto.customname) = "DatabaseID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + repeated uint32 dependentObjects = 3 [(gogoproto.customname) = "DependentObjects", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message ListPartition { + option (gogoproto.equal) = true; + string name = 1 [(gogoproto.customname) = "Name"]; + repeated string expr = 2 [(gogoproto.customname) = "Expr"]; +} + +message RangePartitions { + option (gogoproto.equal) = true; + string name = 1 [(gogoproto.customname) = "Name"]; + repeated string To = 2 [(gogoproto.customname) = "To"]; + repeated string From = 3 [(gogoproto.customname) = "From"]; +} + +message Partitioning { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 index_id = 2 [(gogoproto.customname) = "IndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.IndexID"]; + repeated string fields = 3 [(gogoproto.customname) = "Fields"]; + repeated ListPartition list_partitions = 4 [(gogoproto.customname) = "ListPartitions"]; + repeated RangePartitions range_partitions = 5 [(gogoproto.customname) = "RangePartitions"]; +} + +message Namespace { + uint32 database_id = 1 [(gogoproto.customname) = "DatabaseID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 schema_id = 2 [(gogoproto.customname) = "SchemaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 descriptor_id = 3[(gogoproto.customname) = "DescriptorID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + string name = 4; +} + +message Owner { + uint32 descriptor_id = 1[(gogoproto.customname) = "DescriptorID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + string owner = 2; +} + +message UserPrivileges { + uint32 descriptor_id = 1[(gogoproto.customname) = "DescriptorID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + string username = 2; + uint32 privileges = 3; +} + +message Locality { + uint32 descriptor_id = 1[(gogoproto.customname) = "DescriptorID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + cockroach.sql.catalog.catpb.LocalityConfig Locality = 2 [(gogoproto.customname) = "Locality", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb.LocalityConfig"]; +} + +message ColumnName { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.ColumnID"]; + string name = 3; +} + +message IndexName { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 index_id = 2 [(gogoproto.customname) = "IndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.IndexID"]; + string name = 3; +} + +enum ConstraintType { + Invalid = 0; + UniqueWithoutIndex = 1; + Check = 2; + FK = 3; + PrimaryKey = 4; +} + +message ConstraintName { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + ConstraintType constraint_type = 2; + uint32 constraint_ordinal = 3; + string name = 4; +} + + +message DefaultPrivilege { + uint32 descriptor_id = 1[(gogoproto.customname) = "DescriptorID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + message DefaultObjectPrivilege{ + repeated UserPrivileges privileges = 1; + } + oneof role { + cockroach.sql.sqlbase.DefaultPrivilegesForRole.ExplicitRole explicit_role = 2; + cockroach.sql.sqlbase.DefaultPrivilegesForRole.ForAllRolesPseudoRole for_all_roles = 3; + } + map privileges_per_object = 4; +} + +message DatabaseSchemaEntry { + uint32 database_id = 1 [(gogoproto.customname) = "DatabaseID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; + uint32 schema_id = 2 [(gogoproto.customname) = "SchemaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid.DescID"]; +} + +message TableComment { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; + string comment = 2; +} + +message DatabaseComment { + option (gogoproto.equal) = true; + uint32 database_id = 1 [(gogoproto.customname) = "DatabaseID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; + string comment = 2; +} +message SchemaComment { + option (gogoproto.equal) = true; + uint32 schema_id = 1 [(gogoproto.customname) = "SchemaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; + string comment = 2; +} + +message IndexComment { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; + uint32 index_id = 2 [(gogoproto.customname) = "IndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.IndexID"]; + string comment = 3; +} + +message ColumnComment { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; + uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; + string comment = 3; +} + +message ConstraintComment { + option (gogoproto.equal) = true; + uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; + string constraint_name = 2; + ConstraintType constraint_type = 3; + string comment = 4; +} diff --git a/pkg/sql/schemachanger/scpb/elements_generated.go b/pkg/sql/schemachanger/scpb/elements_generated.go index 33c9c6a4d156..6e5988ecc20a 100755 --- a/pkg/sql/schemachanger/scpb/elements_generated.go +++ b/pkg/sql/schemachanger/scpb/elements_generated.go @@ -12,8 +12,8 @@ package scpb -type NodeIterator interface { - ForEachNode(fn func(status, targetStatus Status, elem Element)) +type ElementStatusIterator interface { + ForEachElementStatus(fn func(status, targetStatus Status, elem Element)) } @@ -21,9 +21,9 @@ func (e Column) element() {} // ForEachColumn iterates over nodes of type Column. func ForEachColumn( - b NodeIterator, elementFunc func(status, targetStatus Status, element *Column), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *Column), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*Column); ok { elementFunc(status, targetStatus, e) } @@ -34,9 +34,9 @@ func (e PrimaryIndex) element() {} // ForEachPrimaryIndex iterates over nodes of type PrimaryIndex. func ForEachPrimaryIndex( - b NodeIterator, elementFunc func(status, targetStatus Status, element *PrimaryIndex), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *PrimaryIndex), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*PrimaryIndex); ok { elementFunc(status, targetStatus, e) } @@ -47,9 +47,9 @@ func (e SecondaryIndex) element() {} // ForEachSecondaryIndex iterates over nodes of type SecondaryIndex. func ForEachSecondaryIndex( - b NodeIterator, elementFunc func(status, targetStatus Status, element *SecondaryIndex), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *SecondaryIndex), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*SecondaryIndex); ok { elementFunc(status, targetStatus, e) } @@ -60,9 +60,9 @@ func (e SequenceDependency) element() {} // ForEachSequenceDependency iterates over nodes of type SequenceDependency. func ForEachSequenceDependency( - b NodeIterator, elementFunc func(status, targetStatus Status, element *SequenceDependency), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *SequenceDependency), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*SequenceDependency); ok { elementFunc(status, targetStatus, e) } @@ -73,9 +73,9 @@ func (e UniqueConstraint) element() {} // ForEachUniqueConstraint iterates over nodes of type UniqueConstraint. func ForEachUniqueConstraint( - b NodeIterator, elementFunc func(status, targetStatus Status, element *UniqueConstraint), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *UniqueConstraint), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*UniqueConstraint); ok { elementFunc(status, targetStatus, e) } @@ -86,9 +86,9 @@ func (e CheckConstraint) element() {} // ForEachCheckConstraint iterates over nodes of type CheckConstraint. func ForEachCheckConstraint( - b NodeIterator, elementFunc func(status, targetStatus Status, element *CheckConstraint), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *CheckConstraint), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*CheckConstraint); ok { elementFunc(status, targetStatus, e) } @@ -99,9 +99,9 @@ func (e Sequence) element() {} // ForEachSequence iterates over nodes of type Sequence. func ForEachSequence( - b NodeIterator, elementFunc func(status, targetStatus Status, element *Sequence), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *Sequence), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*Sequence); ok { elementFunc(status, targetStatus, e) } @@ -112,9 +112,9 @@ func (e DefaultExpression) element() {} // ForEachDefaultExpression iterates over nodes of type DefaultExpression. func ForEachDefaultExpression( - b NodeIterator, elementFunc func(status, targetStatus Status, element *DefaultExpression), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *DefaultExpression), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*DefaultExpression); ok { elementFunc(status, targetStatus, e) } @@ -125,9 +125,9 @@ func (e View) element() {} // ForEachView iterates over nodes of type View. func ForEachView( - b NodeIterator, elementFunc func(status, targetStatus Status, element *View), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *View), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*View); ok { elementFunc(status, targetStatus, e) } @@ -138,9 +138,9 @@ func (e Table) element() {} // ForEachTable iterates over nodes of type Table. func ForEachTable( - b NodeIterator, elementFunc func(status, targetStatus Status, element *Table), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *Table), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*Table); ok { elementFunc(status, targetStatus, e) } @@ -151,9 +151,9 @@ func (e ForeignKey) element() {} // ForEachForeignKey iterates over nodes of type ForeignKey. func ForEachForeignKey( - b NodeIterator, elementFunc func(status, targetStatus Status, element *ForeignKey), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *ForeignKey), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*ForeignKey); ok { elementFunc(status, targetStatus, e) } @@ -164,9 +164,9 @@ func (e ForeignKeyBackReference) element() {} // ForEachForeignKeyBackReference iterates over nodes of type ForeignKeyBackReference. func ForEachForeignKeyBackReference( - b NodeIterator, elementFunc func(status, targetStatus Status, element *ForeignKeyBackReference), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *ForeignKeyBackReference), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*ForeignKeyBackReference); ok { elementFunc(status, targetStatus, e) } @@ -177,9 +177,9 @@ func (e RelationDependedOnBy) element() {} // ForEachRelationDependedOnBy iterates over nodes of type RelationDependedOnBy. func ForEachRelationDependedOnBy( - b NodeIterator, elementFunc func(status, targetStatus Status, element *RelationDependedOnBy), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *RelationDependedOnBy), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*RelationDependedOnBy); ok { elementFunc(status, targetStatus, e) } @@ -190,9 +190,9 @@ func (e SequenceOwnedBy) element() {} // ForEachSequenceOwnedBy iterates over nodes of type SequenceOwnedBy. func ForEachSequenceOwnedBy( - b NodeIterator, elementFunc func(status, targetStatus Status, element *SequenceOwnedBy), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *SequenceOwnedBy), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*SequenceOwnedBy); ok { elementFunc(status, targetStatus, e) } @@ -203,9 +203,9 @@ func (e Type) element() {} // ForEachType iterates over nodes of type Type. func ForEachType( - b NodeIterator, elementFunc func(status, targetStatus Status, element *Type), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *Type), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*Type); ok { elementFunc(status, targetStatus, e) } @@ -216,9 +216,9 @@ func (e Schema) element() {} // ForEachSchema iterates over nodes of type Schema. func ForEachSchema( - b NodeIterator, elementFunc func(status, targetStatus Status, element *Schema), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *Schema), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*Schema); ok { elementFunc(status, targetStatus, e) } @@ -229,9 +229,9 @@ func (e Database) element() {} // ForEachDatabase iterates over nodes of type Database. func ForEachDatabase( - b NodeIterator, elementFunc func(status, targetStatus Status, element *Database), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *Database), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*Database); ok { elementFunc(status, targetStatus, e) } @@ -242,9 +242,9 @@ func (e Partitioning) element() {} // ForEachPartitioning iterates over nodes of type Partitioning. func ForEachPartitioning( - b NodeIterator, elementFunc func(status, targetStatus Status, element *Partitioning), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *Partitioning), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*Partitioning); ok { elementFunc(status, targetStatus, e) } @@ -255,9 +255,9 @@ func (e Namespace) element() {} // ForEachNamespace iterates over nodes of type Namespace. func ForEachNamespace( - b NodeIterator, elementFunc func(status, targetStatus Status, element *Namespace), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *Namespace), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*Namespace); ok { elementFunc(status, targetStatus, e) } @@ -268,9 +268,9 @@ func (e Owner) element() {} // ForEachOwner iterates over nodes of type Owner. func ForEachOwner( - b NodeIterator, elementFunc func(status, targetStatus Status, element *Owner), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *Owner), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*Owner); ok { elementFunc(status, targetStatus, e) } @@ -281,9 +281,9 @@ func (e UserPrivileges) element() {} // ForEachUserPrivileges iterates over nodes of type UserPrivileges. func ForEachUserPrivileges( - b NodeIterator, elementFunc func(status, targetStatus Status, element *UserPrivileges), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *UserPrivileges), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*UserPrivileges); ok { elementFunc(status, targetStatus, e) } @@ -294,9 +294,9 @@ func (e ColumnName) element() {} // ForEachColumnName iterates over nodes of type ColumnName. func ForEachColumnName( - b NodeIterator, elementFunc func(status, targetStatus Status, element *ColumnName), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *ColumnName), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*ColumnName); ok { elementFunc(status, targetStatus, e) } @@ -307,9 +307,9 @@ func (e Locality) element() {} // ForEachLocality iterates over nodes of type Locality. func ForEachLocality( - b NodeIterator, elementFunc func(status, targetStatus Status, element *Locality), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *Locality), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*Locality); ok { elementFunc(status, targetStatus, e) } @@ -320,9 +320,9 @@ func (e IndexName) element() {} // ForEachIndexName iterates over nodes of type IndexName. func ForEachIndexName( - b NodeIterator, elementFunc func(status, targetStatus Status, element *IndexName), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *IndexName), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*IndexName); ok { elementFunc(status, targetStatus, e) } @@ -333,9 +333,9 @@ func (e ConstraintName) element() {} // ForEachConstraintName iterates over nodes of type ConstraintName. func ForEachConstraintName( - b NodeIterator, elementFunc func(status, targetStatus Status, element *ConstraintName), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *ConstraintName), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*ConstraintName); ok { elementFunc(status, targetStatus, e) } @@ -346,9 +346,9 @@ func (e DefaultExprTypeReference) element() {} // ForEachDefaultExprTypeReference iterates over nodes of type DefaultExprTypeReference. func ForEachDefaultExprTypeReference( - b NodeIterator, elementFunc func(status, targetStatus Status, element *DefaultExprTypeReference), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *DefaultExprTypeReference), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*DefaultExprTypeReference); ok { elementFunc(status, targetStatus, e) } @@ -359,9 +359,9 @@ func (e OnUpdateExprTypeReference) element() {} // ForEachOnUpdateExprTypeReference iterates over nodes of type OnUpdateExprTypeReference. func ForEachOnUpdateExprTypeReference( - b NodeIterator, elementFunc func(status, targetStatus Status, element *OnUpdateExprTypeReference), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *OnUpdateExprTypeReference), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*OnUpdateExprTypeReference); ok { elementFunc(status, targetStatus, e) } @@ -372,9 +372,9 @@ func (e ComputedExprTypeReference) element() {} // ForEachComputedExprTypeReference iterates over nodes of type ComputedExprTypeReference. func ForEachComputedExprTypeReference( - b NodeIterator, elementFunc func(status, targetStatus Status, element *ComputedExprTypeReference), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *ComputedExprTypeReference), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*ComputedExprTypeReference); ok { elementFunc(status, targetStatus, e) } @@ -385,9 +385,9 @@ func (e ViewDependsOnType) element() {} // ForEachViewDependsOnType iterates over nodes of type ViewDependsOnType. func ForEachViewDependsOnType( - b NodeIterator, elementFunc func(status, targetStatus Status, element *ViewDependsOnType), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *ViewDependsOnType), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*ViewDependsOnType); ok { elementFunc(status, targetStatus, e) } @@ -398,9 +398,9 @@ func (e ColumnTypeReference) element() {} // ForEachColumnTypeReference iterates over nodes of type ColumnTypeReference. func ForEachColumnTypeReference( - b NodeIterator, elementFunc func(status, targetStatus Status, element *ColumnTypeReference), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *ColumnTypeReference), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*ColumnTypeReference); ok { elementFunc(status, targetStatus, e) } @@ -411,9 +411,9 @@ func (e DatabaseSchemaEntry) element() {} // ForEachDatabaseSchemaEntry iterates over nodes of type DatabaseSchemaEntry. func ForEachDatabaseSchemaEntry( - b NodeIterator, elementFunc func(status, targetStatus Status, element *DatabaseSchemaEntry), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *DatabaseSchemaEntry), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*DatabaseSchemaEntry); ok { elementFunc(status, targetStatus, e) } @@ -424,11 +424,89 @@ func (e CheckConstraintTypeReference) element() {} // ForEachCheckConstraintTypeReference iterates over nodes of type CheckConstraintTypeReference. func ForEachCheckConstraintTypeReference( - b NodeIterator, elementFunc func(status, targetStatus Status, element *CheckConstraintTypeReference), + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *CheckConstraintTypeReference), ) { - b.ForEachNode(func(status, targetStatus Status, elem Element) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { if e, ok := elem.(*CheckConstraintTypeReference); ok { elementFunc(status, targetStatus, e) } }) +} + +func (e TableComment) element() {} + +// ForEachTableComment iterates over nodes of type TableComment. +func ForEachTableComment( + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *TableComment), +) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { + if e, ok := elem.(*TableComment); ok { + elementFunc(status, targetStatus, e) + } + }) +} + +func (e DatabaseComment) element() {} + +// ForEachDatabaseComment iterates over nodes of type DatabaseComment. +func ForEachDatabaseComment( + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *DatabaseComment), +) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { + if e, ok := elem.(*DatabaseComment); ok { + elementFunc(status, targetStatus, e) + } + }) +} + +func (e SchemaComment) element() {} + +// ForEachSchemaComment iterates over nodes of type SchemaComment. +func ForEachSchemaComment( + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *SchemaComment), +) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { + if e, ok := elem.(*SchemaComment); ok { + elementFunc(status, targetStatus, e) + } + }) +} + +func (e IndexComment) element() {} + +// ForEachIndexComment iterates over nodes of type IndexComment. +func ForEachIndexComment( + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *IndexComment), +) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { + if e, ok := elem.(*IndexComment); ok { + elementFunc(status, targetStatus, e) + } + }) +} + +func (e ColumnComment) element() {} + +// ForEachColumnComment iterates over nodes of type ColumnComment. +func ForEachColumnComment( + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *ColumnComment), +) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { + if e, ok := elem.(*ColumnComment); ok { + elementFunc(status, targetStatus, e) + } + }) +} + +func (e ConstraintComment) element() {} + +// ForEachConstraintComment iterates over nodes of type ConstraintComment. +func ForEachConstraintComment( + b ElementStatusIterator, elementFunc func(status, targetStatus Status, element *ConstraintComment), +) { + b.ForEachElementStatus(func(status, targetStatus Status, elem Element) { + if e, ok := elem.(*ConstraintComment); ok { + elementFunc(status, targetStatus, e) + } + }) } \ No newline at end of file diff --git a/pkg/sql/schemachanger/scpb/metadata.go b/pkg/sql/schemachanger/scpb/metadata.go deleted file mode 100644 index 581034b19e43..000000000000 --- a/pkg/sql/schemachanger/scpb/metadata.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2021 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package scpb - -import "github.com/cockroachdb/cockroach/pkg/util/protoutil" - -// SourceElementID elements ID's for identifying parent elements. -// This ID is dynamically allocated when any parent element is -// created and has no relation to the descriptor ID. -type SourceElementID uint32 - -// ElementMetadata contains materialized metadata for an element, -// where references inside the TargetMetadata are resolved to -// their actual values. This structure is mainly used during opgen -// where we need to know these values to emit event log entries for -// example. -type ElementMetadata struct { - TargetMetadata - Username string - AppName string - Statement string -} - -// Clone clones a State and any associated metadata (i.e. statement and -//authorization -// information) for that state. -func (s *State) Clone() State { - clone := State{ - Nodes: make([]*Node, len(s.Nodes)), - Statements: make([]*Statement, len(s.Statements)), - } - for i, n := range s.Nodes { - clone.Nodes[i] = &Node{ - Target: protoutil.Clone(n.Target).(*Target), - Status: n.Status, - } - } - for i, n := range s.Statements { - clone.Statements[i] = &Statement{ - Statement: n.Statement, - } - } - clone.Authorization.Username = s.Authorization.Username - clone.Authorization.AppName = s.Authorization.AppName - return clone -} diff --git a/pkg/sql/schemachanger/scpb/scpb.proto b/pkg/sql/schemachanger/scpb/scpb.proto index f085518e9d54..4aa525660fcd 100644 --- a/pkg/sql/schemachanger/scpb/scpb.proto +++ b/pkg/sql/schemachanger/scpb/scpb.proto @@ -12,50 +12,12 @@ syntax = "proto3"; package cockroach.sql.schemachanger.scpb; option go_package = "scpb"; -import "sql/catalog/descpb/structured.proto"; -import "sql/types/types.proto"; -import "sql/catalog/descpb/privilege.proto"; +import "sql/schemachanger/scpb/elements.proto"; import "gogoproto/gogo.proto"; -message ElementProto { - option (gogoproto.onlyone) = true; - Column column = 1 [(gogoproto.moretags) = "parent:\"Table\""]; - PrimaryIndex primary_index = 2 [(gogoproto.moretags) = "parent:\"Table\""]; - SecondaryIndex secondary_index = 3 [(gogoproto.moretags) = "parent:\"Table\""]; - SequenceDependency sequence_dependency = 4 [(gogoproto.moretags) = "parent:\"Column, Table\""]; - UniqueConstraint unique_constraint = 5 [(gogoproto.moretags) = "parent:\"SecondaryIndex, Table\""]; - CheckConstraint check_constraint = 6 [(gogoproto.moretags) = "parent:\"Table\""]; - Sequence sequence = 7; - DefaultExpression default_expression = 8 [(gogoproto.moretags) = "parent:\"Column\""]; - View view = 9; - Table table = 10; - ForeignKey outForeignKey = 11 [(gogoproto.moretags) = "parent:\"Table\""]; - ForeignKeyBackReference inForeignKey = 12 [(gogoproto.moretags) = "parent:\"Table\""]; - RelationDependedOnBy relationDependedOnBy = 13 [(gogoproto.moretags) = "parent:\"Table, View\""]; - SequenceOwnedBy sequenceOwner = 45 [(gogoproto.moretags) = "parent:\"Column, Sequence\""]; - Type type = 15; - Schema schema = 16; - Database database = 17; - Partitioning partitioning = 18 [(gogoproto.moretags) = "parent:\"PrimaryIndex, SecondaryIndex\""]; - Namespace namespace = 19 [(gogoproto.moretags) = "parent:\"Table, View, Sequence, Database, Schema, Type\""]; - Owner owner = 20 [(gogoproto.moretags) = "parent:\"Table, View, Sequence, Database, Schema\""]; - UserPrivileges UserPrivileges = 21 [(gogoproto.moretags) = "parent:\"Table, View, Sequence, Database, Schema\""]; - ColumnName columnName = 22 [(gogoproto.moretags) = "parent:\"Column\""]; - Locality locality = 23 [(gogoproto.moretags) = "parent:\"Table\""]; - IndexName indexName = 24 [(gogoproto.moretags) = "parent:\"PrimaryIndex, SecondaryIndex\""]; - ConstraintName constraintName = 25 [(gogoproto.moretags) = "parent:\"UniqueConstraint, CheckConstraint\""]; - DefaultExprTypeReference defaultExprTypeRef = 26 [(gogoproto.moretags) = "parent:\"Column, Type\""]; - OnUpdateExprTypeReference onUpdateTypeReference = 27 [(gogoproto.moretags) = "parent:\"Column, Type\""]; - ComputedExprTypeReference computedExprTypeReference = 28 [(gogoproto.moretags) = "parent:\"Column, Type\""]; - ViewDependsOnType viewDependsOnType = 29 [(gogoproto.moretags) = "parent:\"View, Type\""]; - ColumnTypeReference columnTypeReference = 30 [(gogoproto.moretags) = "parent:\"Column, Type\""]; - DatabaseSchemaEntry schemaEntry = 31 [(gogoproto.moretags) = "parent:\"Database, Schema\""]; - CheckConstraintTypeReference checkConstraintTypeReference = 32 [(gogoproto.moretags) = "parent:\"Table, Type\""]; -} - message Target { ElementProto element_proto = 1 [(gogoproto.embed) = true, (gogoproto.nullable) = false]; - TargetMetadata metadata = 2 [(gogoproto.nullable) = false]; + TargetMetadata metadata = 2 [(gogoproto.nullable) = false]; Status target_status = 3; } @@ -71,233 +33,7 @@ enum Status { PUBLIC = 8; } -message Column { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 column_id = 3 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - uint32 family_id = 4 [(gogoproto.customname) = "FamilyID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.FamilyID"]; - string family_name = 5; - sql.sem.types.T type = 6; - bool nullable = 7; - string default_expr = 8 [(gogoproto.customname) = "DefaultExpr"]; - string on_update_expr = 9 [(gogoproto.customname) = "OnUpdateExpr"]; - bool hidden = 10; - bool inaccessible = 11; - uint32 generated_as_identity_type = 12 [(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.GeneratedAsIdentityType"]; - string generated_as_identity_sequence_option = 13; - repeated uint32 uses_sequence_ids = 14 [(gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - string computerExpr = 15; - uint32 pg_attribute_num = 16 [(gogoproto.customname) = "PgAttributeNum"]; - uint32 system_column_kind = 17 [(gogoproto.customname) = "SystemColumnKind", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.SystemColumnKind"]; - bool virtual = 18; -} - -message PrimaryIndex { - // The direction of a column in the index. - enum Direction { - ASC = 0; - DESC = 1; - } - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 index_id = 2 [(gogoproto.customname) = "IndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.IndexID"]; - bool unique = 3 [(gogoproto.customname) = "Unique"]; - repeated uint32 key_column_ids = 4 [(gogoproto.customname) = "KeyColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - repeated Direction key_column_direction = 5 [(gogoproto.customname) = "KeyColumnDirections"]; - repeated uint32 key_suffix_column_ids = 6 [(gogoproto.customname) = "KeySuffixColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - cockroach.sql.sqlbase.ShardedDescriptor sharded_descriptor = 7 [(gogoproto.customname) = "ShardedDescriptor"]; - repeated uint32 storing_column_ids = 8 [(gogoproto.customname) = "StoringColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - repeated uint32 composite_column_ids = 9 [(gogoproto.customname) = "CompositeColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - bool inverted = 10 [(gogoproto.customname) = "Inverted"]; - bool concurrently = 11 [(gogoproto.customname) = "Concurrently"]; - - // SourceIndexID refers to the primary index which will be used to - // to backfill this index. - uint32 source_index_id = 12 [(gogoproto.customname) = "SourceIndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.IndexID"]; -} - -message SecondaryIndex { - // The direction of a column in the index. - enum Direction { - ASC = 0; - DESC = 1; - } - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 index_id = 2 [(gogoproto.customname) = "IndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.IndexID"]; - bool unique = 3 [(gogoproto.customname) = "Unique"]; - repeated uint32 key_column_ids = 4 [(gogoproto.customname) = "KeyColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - repeated Direction key_column_direction = 5 [(gogoproto.customname) = "KeyColumnDirections"]; - repeated uint32 key_suffix_column_ids = 6 [(gogoproto.customname) = "KeySuffixColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - cockroach.sql.sqlbase.ShardedDescriptor sharded_descriptor = 7 [(gogoproto.customname) = "ShardedDescriptor"]; - repeated uint32 storing_column_ids = 8 [(gogoproto.customname) = "StoringColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - repeated uint32 composite_column_ids = 9 [(gogoproto.customname) = "CompositeColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - bool inverted = 10 [(gogoproto.customname) = "Inverted"]; - bool concurrently = 11 [(gogoproto.customname) = "Concurrently"]; - - // SourceIndexID refers to the primary index which will be used to - // to backfill this index. - uint32 source_index_id = 12 [(gogoproto.customname) = "SourceIndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.IndexID"]; -} - -message SequenceDependency { - option (gogoproto.equal) = true; - - enum Type { - UNKNOWN = 0; - USES = 1; - OWNS = 2; - } - - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - uint32 sequence_id = 3 [(gogoproto.customname) = "SequenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - Type type = 4; -} - -message UniqueConstraint { - option (gogoproto.equal) = true; - ConstraintType constraint_type = 1; - uint32 constraint_ordinal = 2; - uint32 table_id = 3 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 index_id = 4 [(gogoproto.customname) = "IndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.IndexID"]; - repeated uint32 column_ids = 5 [(gogoproto.customname) = "ColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; -} - -message CheckConstraint { - option (gogoproto.equal) = true; - ConstraintType constraint_type = 1; - uint32 constraint_ordinal = 2; - uint32 table_id = 3 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - string name = 4; - string expr = 5; - repeated uint32 column_ids = 6 [(gogoproto.customname) = "ColumnIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - bool validated = 7; -} - -message Sequence { - option (gogoproto.equal) = true; - uint32 sequence_id = 1 [(gogoproto.customname) = "SequenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; -} - -message DefaultExpression { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - repeated uint32 usesSequenceIDs =3 [(gogoproto.customname) = "UsesSequenceIDs", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - string default_expr = 4; -} - -message View { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; -} - -message Table { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; -} - -message OnUpdateExprTypeReference { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - uint32 type_id = 3 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; -} - -message ComputedExprTypeReference { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - uint32 type_id = 3 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; -} - -message DefaultExprTypeReference { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - uint32 type_id = 3 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; -} - -message ColumnTypeReference { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - uint32 type_id = 3 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; -} - -message CheckConstraintTypeReference { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 constraint_ordinal = 2; - uint32 type_id = 3 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; -} - -message ViewDependsOnType { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 type_id = 3 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; -} - -message ForeignKey { - option (gogoproto.equal) = true; - uint32 origin_id = 1 [(gogoproto.customname) = "OriginID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - repeated uint32 origin_columns = 3 [(gogoproto.customname) = "OriginColumns", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - uint32 reference_id = 4 [(gogoproto.customname) = "ReferenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - repeated uint32 reference_columns = 5 [(gogoproto.customname) = "ReferenceColumns", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - uint32 on_update = 6 [(gogoproto.customname) = "OnUpdate", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ForeignKeyReference_Action"]; - uint32 on_delete = 7 [(gogoproto.customname) = "OnDelete", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ForeignKeyReference_Action"]; - string name = 8; -} - -message ForeignKeyBackReference { - option (gogoproto.equal) = true; - uint32 origin_id = 1 [(gogoproto.customname) = "OriginID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - repeated uint32 origin_columns = 3 [(gogoproto.customname) = "OriginColumns", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - uint32 reference_id = 4 [(gogoproto.customname) = "ReferenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - repeated uint32 reference_columns = 5 [(gogoproto.customname) = "ReferenceColumns", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - uint32 on_update = 6 [(gogoproto.customname) = "OnUpdate", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ForeignKeyReference_Action"]; - uint32 on_delete = 7 [(gogoproto.customname) = "OnDelete", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ForeignKeyReference_Action"]; - string name = 8; -} - -message SequenceOwnedBy { - uint32 sequence_id = 1 [(gogoproto.customname) = "SequenceID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 owner_table_id = 2 [(gogoproto.customname) = "OwnerTableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; -} - -message RelationDependedOnBy { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 dependedOn = 2 [(gogoproto.customname) = "DependedOnBy", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 columnID = 3 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; -} - -message Type { - uint32 type_id = 1 [(gogoproto.customname) = "TypeID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; -} - -message Schema { - uint32 schema_id = 1 [(gogoproto.customname) = "SchemaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - repeated uint32 dependentObjects = 3 [(gogoproto.customname) = "DependentObjects", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; -} - -message Database { - uint32 database_id = 1 [(gogoproto.customname) = "DatabaseID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - repeated uint32 dependentObjects = 3 [(gogoproto.customname) = "DependentObjects", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; -} - -message Authorization { - string username = 1; - string appName = 2 [(gogoproto.customname) = "AppName"]; -} - -// Contains SQL statements for which a schema change is being executed. -message Statement { - string statement = 1; -} - -// TargetMetaData refers to the metadata for individual elements, where +// TargetMetadata refers to the metadata for individual elements, where // some fields like statement_id are indexes into the the full declarative // schema changer State. message TargetMetadata { @@ -305,7 +41,7 @@ message TargetMetadata { // target belongs too. For example, if multiple objects such as tables or // schemas are dropped in a single statement (i.e. such as DROP TABLE A, B) // then this counter will increment for each of those objects. - uint32 sub_work_id = 1 [(gogoproto.customname) = "SubWorkID"]; + uint32 sub_work_id = 1 [(gogoproto.customname) = "SubWorkID"]; // SourceElementID identifies the parent element responsible for generating // an element, which will be used to track cascaded drops. For example // if database is being dropped, then any schemas that are dropped will have @@ -317,93 +53,19 @@ message TargetMetadata { uint32 statement_id = 3 [(gogoproto.customname) = "StatementID"]; } -message ListPartition { - option (gogoproto.equal) = true; - string name = 1 [(gogoproto.customname) = "Name"]; - repeated string expr = 2 [(gogoproto.customname) = "Expr"]; -} - -message RangePartitions { - option (gogoproto.equal) = true; - string name = 1 [(gogoproto.customname) = "Name"]; - repeated string To = 2 [(gogoproto.customname) = "To"]; - repeated string From = 3 [(gogoproto.customname) = "From"]; -} - -message Partitioning { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 index_id = 2 [(gogoproto.customname) = "IndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.IndexID"]; - repeated string fields = 3 [(gogoproto.customname) = "Fields"]; - repeated ListPartition list_partitions = 4 [(gogoproto.customname) = "ListPartitions"]; - repeated RangePartitions range_partitions = 5 [(gogoproto.customname) = "RangePartitions"]; +message TargetState { + repeated Target targets = 1 [(gogoproto.nullable) = false]; + repeated Statement statements = 2 [(gogoproto.nullable) = false]; + Authorization authorization = 3 [(gogoproto.nullable) = false]; } -message Namespace { - uint32 database_id = 1 [(gogoproto.customname) = "DatabaseID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 schema_id = 2 [(gogoproto.customname) = "SchemaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 descriptor_id = 3[(gogoproto.customname) = "DescriptorID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - string name = 4; -} - -message Owner { - uint32 descriptor_id = 1[(gogoproto.customname) = "DescriptorID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - string owner = 2; -} - -message UserPrivileges { - uint32 descriptor_id = 1[(gogoproto.customname) = "DescriptorID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - string username = 2; - uint32 privileges = 3; -} - -message Locality { - uint32 descriptor_id = 1[(gogoproto.customname) = "DescriptorID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - cockroach.sql.sqlbase.TableDescriptor.LocalityConfig Locality = 2 [(gogoproto.customname) = "Locality", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.TableDescriptor_LocalityConfig"]; -} - -message ColumnName { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 column_id = 2 [(gogoproto.customname) = "ColumnID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ColumnID"]; - string name = 3; -} - -message IndexName { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 index_id = 2 [(gogoproto.customname) = "IndexID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.IndexID"]; - string name = 3; -} - -enum ConstraintType { - Invalid = 0; - UniqueWithoutIndex = 1; - Check = 2; -} - -message ConstraintName { - option (gogoproto.equal) = true; - uint32 table_id = 1 [(gogoproto.customname) = "TableID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - ConstraintType constraint_type = 2; - uint32 constraint_ordinal = 3; - string name = 4; -} - - -message DefaultPrivilege { - uint32 descriptor_id = 1[(gogoproto.customname) = "DescriptorID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - message DefaultObjectPrivilege{ - repeated UserPrivileges privileges = 1; - } - oneof role { - cockroach.sql.sqlbase.DefaultPrivilegesForRole.ExplicitRole explicit_role = 2; - cockroach.sql.sqlbase.DefaultPrivilegesForRole.ForAllRolesPseudoRole for_all_roles = 3; - } - map privileges_per_object = 4; +message Statement { + string statement = 1; + string redacted_statement = 2; + string statement_tag = 3; } -message DatabaseSchemaEntry { - uint32 database_id = 1 [(gogoproto.customname) = "DatabaseID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; - uint32 schema_id = 2 [(gogoproto.customname) = "SchemaID", (gogoproto.casttype) = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb.ID"]; +message Authorization { + string user_name = 1; + string app_name = 2; } diff --git a/pkg/sql/schemachanger/scpb/node.go b/pkg/sql/schemachanger/scpb/state.go similarity index 59% rename from pkg/sql/schemachanger/scpb/node.go rename to pkg/sql/schemachanger/scpb/state.go index da3eb4349beb..aa434d2a061d 100644 --- a/pkg/sql/schemachanger/scpb/node.go +++ b/pkg/sql/schemachanger/scpb/state.go @@ -15,35 +15,24 @@ import ( "github.com/cockroachdb/errors" ) -// State represents a current or potential future state of the -// schema change system. Additionally, it tracks any metadata -// for the schema change such as the Statements and Authorization -// information. Nodes may refer to this information for different -// purposes. -type State struct { - Nodes []*Node - Statements []*Statement - Authorization Authorization +// CurrentState is a TargetState decorated with the current status of the +// elements in the target state. +type CurrentState struct { + TargetState + Current []Status } -// Statuses returns a slice of statuses extracted from the Nodes. -func (s *State) Statuses() []Status { - statuses := make([]Status, len(s.Nodes)) - for i := range s.Nodes { - statuses[i] = s.Nodes[i].Status +// DeepCopy returns a deep copy of the receiver. +func (s CurrentState) DeepCopy() CurrentState { + return CurrentState{ + TargetState: *protoutil.Clone(&s.TargetState).(*TargetState), + Current: append(make([]Status, 0, len(s.Current)), s.Current...), } - return statuses } // NumStatus is the number of values which Status may take on. var NumStatus = len(Status_name) -// Node represents a Target with a given status. -type Node struct { - *Target - Status -} - // Element represents a logical component of a catalog entry's schema (e.g., an // index or column in a table). type Element interface { @@ -54,7 +43,7 @@ type Element interface { element() } -//go:generate go run element_generator.go --in scpb.proto --out elements_generated.go +//go:generate go run element_generator.go --in elements.proto --out elements_generated.go //go:generate go run element_uml_generator.go --out uml/table.puml // Element returns an Element from its wrapper for serialization. @@ -62,9 +51,9 @@ func (e *ElementProto) Element() Element { return e.GetValue().(Element) } -// NewTarget constructs a new Target. The passed elem must be one of the oneOf +// MakeTarget constructs a new Target. The passed elem must be one of the oneOf // members of Element. If not, this call will panic. -func NewTarget(status Status, elem Element, metadata *TargetMetadata) *Target { +func MakeTarget(status Status, elem Element, metadata *TargetMetadata) Target { t := Target{ TargetStatus: status, } @@ -74,5 +63,10 @@ func NewTarget(status Status, elem Element, metadata *TargetMetadata) *Target { if !t.SetValue(elem) { panic(errors.Errorf("unknown element type %T", elem)) } - return &t + return t } + +// SourceElementID elements ID's for identifying parent elements. +// This ID is dynamically allocated when any parent element is +// created and has no relation to the descriptor ID. +type SourceElementID uint32 diff --git a/pkg/sql/schemachanger/scpb/uml/table.puml b/pkg/sql/schemachanger/scpb/uml/table.puml index 75ba87eaf1f9..279ffbeb534e 100644 --- a/pkg/sql/schemachanger/scpb/uml/table.puml +++ b/pkg/sql/schemachanger/scpb/uml/table.puml @@ -228,6 +228,40 @@ CheckConstraintTypeReference : TableID CheckConstraintTypeReference : ConstraintOrdinal CheckConstraintTypeReference : TypeID +object TableComment + +TableComment : TableID +TableComment : Comment + +object DatabaseComment + +DatabaseComment : DatabaseID +DatabaseComment : Comment + +object SchemaComment + +SchemaComment : SchemaID +SchemaComment : Comment + +object IndexComment + +IndexComment : TableID +IndexComment : IndexID +IndexComment : Comment + +object ColumnComment + +ColumnComment : TableID +ColumnComment : ColumnID +ColumnComment : Comment + +object ConstraintComment + +ConstraintComment : TableID +ConstraintComment : ConstraintName +ConstraintComment : ConstraintType +ConstraintComment : Comment + Table <|-- Column Table <|-- PrimaryIndex Table <|-- SecondaryIndex @@ -281,4 +315,16 @@ Database <|-- DatabaseSchemaEntry Schema <|-- DatabaseSchemaEntry Table <|-- CheckConstraintTypeReference Type <|-- CheckConstraintTypeReference +Table <|-- TableComment +View <|-- TableComment +Sequence <|-- TableComment +Database <|-- DatabaseComment +Schema <|-- SchemaComment +Index <|-- IndexComment +Column <|-- ColumnComment +PrimaryIndex <|-- ConstraintComment +SecondaryIndex <|-- ConstraintComment +ForeignKey <|-- ConstraintComment +UniqueConstraint <|-- ConstraintComment +CheckConstraint <|-- ConstraintComment @enduml diff --git a/pkg/sql/schemachanger/scplan/BUILD.bazel b/pkg/sql/schemachanger/scplan/BUILD.bazel index 607c04f1b1d1..b5a337dc41a6 100644 --- a/pkg/sql/schemachanger/scplan/BUILD.bazel +++ b/pkg/sql/schemachanger/scplan/BUILD.bazel @@ -7,13 +7,14 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/jobs/jobspb", - "//pkg/sql/schemachanger/scgraph", "//pkg/sql/schemachanger/scop", "//pkg/sql/schemachanger/scpb", - "//pkg/sql/schemachanger/scplan/deprules", - "//pkg/sql/schemachanger/scplan/opgen", - "//pkg/sql/schemachanger/scplan/scopt", - "//pkg/sql/schemachanger/scplan/scstage", + "//pkg/sql/schemachanger/scplan/internal/deprules", + "//pkg/sql/schemachanger/scplan/internal/opgen", + "//pkg/sql/schemachanger/scplan/internal/scgraph", + "//pkg/sql/schemachanger/scplan/internal/scgraphviz", + "//pkg/sql/schemachanger/scplan/internal/scopt", + "//pkg/sql/schemachanger/scplan/internal/scstage", "@com_github_cockroachdb_errors//:errors", ], ) @@ -37,11 +38,11 @@ go_test( "//pkg/sql/schemachanger/scbuild", "//pkg/sql/schemachanger/scdeps/sctestutils", "//pkg/sql/schemachanger/scerrors", - "//pkg/sql/schemachanger/scgraph", - "//pkg/sql/schemachanger/scgraphviz", "//pkg/sql/schemachanger/scop", "//pkg/sql/schemachanger/scpb", - "//pkg/sql/schemachanger/scplan/scstage", + "//pkg/sql/schemachanger/scplan/internal/scgraph", + "//pkg/sql/schemachanger/scplan/internal/scgraphviz", + "//pkg/sql/schemachanger/scplan/internal/scstage", "//pkg/sql/schemachanger/screl", "//pkg/sql/sem/tree", "//pkg/testutils/serverutils", diff --git a/pkg/sql/schemachanger/scplan/deprules/BUILD.bazel b/pkg/sql/schemachanger/scplan/internal/deprules/BUILD.bazel similarity index 89% rename from pkg/sql/schemachanger/scplan/deprules/BUILD.bazel rename to pkg/sql/schemachanger/scplan/internal/deprules/BUILD.bazel index b120a0049708..e98aa7a12346 100644 --- a/pkg/sql/schemachanger/scplan/deprules/BUILD.bazel +++ b/pkg/sql/schemachanger/scplan/internal/deprules/BUILD.bazel @@ -7,13 +7,13 @@ go_library( "registry.go", "rules.go", ], - importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/deprules", + importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/deprules", visibility = ["//visibility:public"], deps = [ "//pkg/sql/catalog/descpb", "//pkg/sql/schemachanger/rel", - "//pkg/sql/schemachanger/scgraph", "//pkg/sql/schemachanger/scpb", + "//pkg/sql/schemachanger/scplan/internal/scgraph", "//pkg/sql/schemachanger/screl", "@com_github_cockroachdb_errors//:errors", ], diff --git a/pkg/sql/schemachanger/scplan/deprules/helpers.go b/pkg/sql/schemachanger/scplan/internal/deprules/helpers.go similarity index 100% rename from pkg/sql/schemachanger/scplan/deprules/helpers.go rename to pkg/sql/schemachanger/scplan/internal/deprules/helpers.go diff --git a/pkg/sql/schemachanger/scplan/deprules/registry.go b/pkg/sql/schemachanger/scplan/internal/deprules/registry.go similarity index 81% rename from pkg/sql/schemachanger/scplan/deprules/registry.go rename to pkg/sql/schemachanger/scplan/internal/deprules/registry.go index 99366c66eac1..c5b51eb7e98a 100644 --- a/pkg/sql/schemachanger/scplan/deprules/registry.go +++ b/pkg/sql/schemachanger/scplan/internal/deprules/registry.go @@ -14,8 +14,8 @@ package deprules import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraph" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" ) // Apply will add the dependency edges to the graph which ought to exist @@ -23,10 +23,10 @@ import ( func Apply(g *scgraph.Graph) error { for _, dr := range depRules { if err := dr.q.Iterate(g.Database(), func(r rel.Result) error { - from := r.Var(dr.from).(*scpb.Node) - to := r.Var(dr.to).(*scpb.Node) + from := r.Var(dr.from).(*screl.Node) + to := r.Var(dr.to).(*screl.Node) return g.AddDepEdge( - dr.name, dr.kind, from.Target, from.Status, to.Target, to.Status, + dr.name, dr.kind, from.Target, from.CurrentStatus, to.Target, to.CurrentStatus, ) }); err != nil { return err diff --git a/pkg/sql/schemachanger/scplan/deprules/rules.go b/pkg/sql/schemachanger/scplan/internal/deprules/rules.go similarity index 98% rename from pkg/sql/schemachanger/scplan/deprules/rules.go rename to pkg/sql/schemachanger/scplan/internal/deprules/rules.go index e8984ee42d36..f71be8cee716 100644 --- a/pkg/sql/schemachanger/scplan/deprules/rules.go +++ b/pkg/sql/schemachanger/scplan/internal/deprules/rules.go @@ -13,8 +13,8 @@ package deprules import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraph" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" "github.com/cockroachdb/errors" ) @@ -27,7 +27,7 @@ func joinTargetNode(element, target, node rel.Var, targetStatus, status scpb.Sta return rel.And( screl.JoinTargetNode(element, target, node), target.AttrEq(screl.TargetStatus, targetStatus), - node.AttrEq(screl.Status, status), + node.AttrEq(screl.CurrentStatus, status), ) } @@ -70,7 +70,7 @@ func init() { screl.JoinTargetNode(parent, parentTarget, parentNode), parentTarget.AttrEq(screl.TargetStatus, scpb.Status_ABSENT), - parentNode.AttrEq(screl.Status, scpb.Status_ABSENT), + parentNode.AttrEq(screl.CurrentStatus, scpb.Status_ABSENT), joinTargetNode(other, otherTarget, otherNode, scpb.Status_ABSENT, scpb.Status_ABSENT), ), @@ -131,7 +131,7 @@ func init() { rel.Filter("columnInIndex", column, index)(columnInIndex), targetStatus.Entities(screl.TargetStatus, columnTarget, indexTarget), - status.Entities(screl.Status, columnNode, indexNode), + status.Entities(screl.CurrentStatus, columnNode, indexNode), screl.JoinTargetNode(column, columnTarget, columnNode), screl.JoinTargetNode(index, indexTarget, indexNode), @@ -437,7 +437,7 @@ func init() { screl.JoinTargetNode(index, indexTarget, indexNode), indexTarget.AttrEq(screl.TargetStatus, scpb.Status_ABSENT), - indexNode.AttrEq(screl.Status, scpb.Status_VALIDATED), + indexNode.AttrEq(screl.CurrentStatus, scpb.Status_VALIDATED), joinTargetNode(indexName, indexNameTarget, indexNameNode, scpb.Status_ABSENT, scpb.Status_ABSENT), ), ) diff --git a/pkg/sql/schemachanger/scplan/deprules/rules_test.go b/pkg/sql/schemachanger/scplan/internal/deprules/rules_test.go similarity index 100% rename from pkg/sql/schemachanger/scplan/deprules/rules_test.go rename to pkg/sql/schemachanger/scplan/internal/deprules/rules_test.go diff --git a/pkg/sql/schemachanger/scplan/deprules/testdata/rules b/pkg/sql/schemachanger/scplan/internal/deprules/testdata/rules similarity index 80% rename from pkg/sql/schemachanger/scplan/deprules/testdata/rules rename to pkg/sql/schemachanger/scplan/internal/deprules/testdata/rules index a4e3d19ec3fa..0f0e15bef647 100644 --- a/pkg/sql/schemachanger/scplan/deprules/testdata/rules +++ b/pkg/sql/schemachanger/scplan/internal/deprules/testdata/rules @@ -9,16 +9,16 @@ rules - parentDependsOn(scpb.Element, scpb.Element)($parent, $other) - $parent-target[Type] = '*scpb.Target' - $parent-target[Element] = $parent - - $parent-node[Type] = '*scpb.Node' + - $parent-node[Type] = '*screl.Node' - $parent-node[Target] = $parent-target - $parent-target[TargetStatus] = ABSENT - - $parent-node[Status] = ABSENT + - $parent-node[CurrentStatus] = ABSENT - $other-target[Type] = '*scpb.Target' - $other-target[Element] = $other - - $other-node[Type] = '*scpb.Node' + - $other-node[Type] = '*screl.Node' - $other-node[Target] = $other-target - $other-target[TargetStatus] = ABSENT - - $other-node[Status] = ABSENT + - $other-node[CurrentStatus] = ABSENT - name: column depends on indexes from: index-node to: column-node @@ -32,15 +32,15 @@ rules - columnInIndex(*scpb.Column, scpb.Element)($column, $index) - $column-target[TargetStatus] = $target-status - $index-target[TargetStatus] = $target-status - - $column-node[Status] = $status - - $index-node[Status] = $status + - $column-node[CurrentStatus] = $status + - $index-node[CurrentStatus] = $status - $column-target[Type] = '*scpb.Target' - $column-target[Element] = $column - - $column-node[Type] = '*scpb.Node' + - $column-node[Type] = '*screl.Node' - $column-node[Target] = $column-target - $index-target[Type] = '*scpb.Target' - $index-target[Element] = $index - - $index-node[Type] = '*scpb.Node' + - $index-node[Type] = '*screl.Node' - $index-node[Target] = $index-target - name: index existence depends on column existence from: column-node @@ -53,16 +53,16 @@ rules - columnInIndex(*scpb.Column, scpb.Element)($column, $index) - $column-target[Type] = '*scpb.Target' - $column-target[Element] = $column - - $column-node[Type] = '*scpb.Node' + - $column-node[Type] = '*screl.Node' - $column-node[Target] = $column-target - $column-target[TargetStatus] = PUBLIC - - $column-node[Status] = DELETE_ONLY + - $column-node[CurrentStatus] = DELETE_ONLY - $index-target[Type] = '*scpb.Target' - $index-target[Element] = $index - - $index-node[Type] = '*scpb.Node' + - $index-node[Type] = '*screl.Node' - $index-node[Target] = $index-target - $index-target[TargetStatus] = PUBLIC - - $index-node[Status] = DELETE_ONLY + - $index-node[CurrentStatus] = DELETE_ONLY - name: primary index add depends on drop from: drop-idx-node to: add-idx-node @@ -74,16 +74,16 @@ rules - referenceEachOther(*scpb.PrimaryIndex, *scpb.PrimaryIndex)($add-idx, $drop-idx) - $add-idx-target[Type] = '*scpb.Target' - $add-idx-target[Element] = $add-idx - - $add-idx-node[Type] = '*scpb.Node' + - $add-idx-node[Type] = '*screl.Node' - $add-idx-node[Target] = $add-idx-target - $add-idx-target[TargetStatus] = PUBLIC - - $add-idx-node[Status] = PUBLIC + - $add-idx-node[CurrentStatus] = PUBLIC - $drop-idx-target[Type] = '*scpb.Target' - $drop-idx-target[Element] = $drop-idx - - $drop-idx-node[Type] = '*scpb.Node' + - $drop-idx-node[Type] = '*screl.Node' - $drop-idx-node[Target] = $drop-idx-target - $drop-idx-target[TargetStatus] = ABSENT - - $drop-idx-node[Status] = VALIDATED + - $drop-idx-node[CurrentStatus] = VALIDATED - name: partitioning information needs the basic index as created from: add-idx-node to: partitioning-node @@ -96,16 +96,16 @@ rules - $partitioning[IndexID] = $index-id - $add-idx-target[Type] = '*scpb.Target' - $add-idx-target[Element] = $add-idx - - $add-idx-node[Type] = '*scpb.Node' + - $add-idx-node[Type] = '*screl.Node' - $add-idx-node[Target] = $add-idx-target - $add-idx-target[TargetStatus] = PUBLIC - - $add-idx-node[Status] = DELETE_ONLY + - $add-idx-node[CurrentStatus] = DELETE_ONLY - $partitioning-target[Type] = '*scpb.Target' - $partitioning-target[Element] = $partitioning - - $partitioning-node[Type] = '*scpb.Node' + - $partitioning-node[Type] = '*screl.Node' - $partitioning-node[Target] = $partitioning-target - $partitioning-target[TargetStatus] = PUBLIC - - $partitioning-node[Status] = PUBLIC + - $partitioning-node[CurrentStatus] = PUBLIC - name: index needs partitioning information to be filled from: add-idx-node to: partitioning-node @@ -118,16 +118,16 @@ rules - $partitioning[IndexID] = $id - $add-idx-target[Type] = '*scpb.Target' - $add-idx-target[Element] = $add-idx - - $add-idx-node[Type] = '*scpb.Node' + - $add-idx-node[Type] = '*screl.Node' - $add-idx-node[Target] = $add-idx-target - $add-idx-target[TargetStatus] = PUBLIC - - $add-idx-node[Status] = DELETE_AND_WRITE_ONLY + - $add-idx-node[CurrentStatus] = DELETE_AND_WRITE_ONLY - $partitioning-target[Type] = '*scpb.Target' - $partitioning-target[Element] = $partitioning - - $partitioning-node[Type] = '*scpb.Node' + - $partitioning-node[Type] = '*screl.Node' - $partitioning-node[Target] = $partitioning-target - $partitioning-target[TargetStatus] = PUBLIC - - $partitioning-node[Status] = PUBLIC + - $partitioning-node[CurrentStatus] = PUBLIC - name: dependency needs relation/type as non-synthetically dropped from: relation-node to: dep-node @@ -138,16 +138,16 @@ rules - $dep[DescID] = $id - $relation-target[Type] = '*scpb.Target' - $relation-target[Element] = $relation - - $relation-node[Type] = '*scpb.Node' + - $relation-node[Type] = '*screl.Node' - $relation-node[Target] = $relation-target - $relation-target[TargetStatus] = ABSENT - - $relation-node[Status] = DROPPED + - $relation-node[CurrentStatus] = DROPPED - $dep-target[Type] = '*scpb.Target' - $dep-target[Element] = $dep - - $dep-node[Type] = '*scpb.Node' + - $dep-node[Type] = '*screl.Node' - $dep-node[Target] = $dep-target - $dep-target[TargetStatus] = ABSENT - - $dep-node[Status] = ABSENT + - $dep-node[CurrentStatus] = ABSENT - name: dependency needs relation/type as non-synthetically dropped from: relation-node to: dep-node @@ -158,16 +158,16 @@ rules - $dep[ReferencedDescID] = $id - $relation-target[Type] = '*scpb.Target' - $relation-target[Element] = $relation - - $relation-node[Type] = '*scpb.Node' + - $relation-node[Type] = '*screl.Node' - $relation-node[Target] = $relation-target - $relation-target[TargetStatus] = ABSENT - - $relation-node[Status] = DROPPED + - $relation-node[CurrentStatus] = DROPPED - $dep-target[Type] = '*scpb.Target' - $dep-target[Element] = $dep - - $dep-node[Type] = '*scpb.Node' + - $dep-node[Type] = '*screl.Node' - $dep-node[Target] = $dep-target - $dep-target[TargetStatus] = ABSENT - - $dep-node[Status] = ABSENT + - $dep-node[CurrentStatus] = ABSENT - name: namespace needs descriptor to be dropped from: dep-node to: namespace-node @@ -178,16 +178,16 @@ rules - $namespace[DescID] = $desc-id - $namespace-target[Type] = '*scpb.Target' - $namespace-target[Element] = $namespace - - $namespace-node[Type] = '*scpb.Node' + - $namespace-node[Type] = '*screl.Node' - $namespace-node[Target] = $namespace-target - $namespace-target[TargetStatus] = ABSENT - - $namespace-node[Status] = ABSENT + - $namespace-node[CurrentStatus] = ABSENT - $dep-target[Type] = '*scpb.Target' - $dep-target[Element] = $dep - - $dep-node[Type] = '*scpb.Node' + - $dep-node[Type] = '*screl.Node' - $dep-node[Target] = $dep-target - $dep-target[TargetStatus] = ABSENT - - $dep-node[Status] = DROPPED + - $dep-node[CurrentStatus] = DROPPED - name: descriptor can only be cleaned up once the name is drained from: namespace-node to: dep-node @@ -198,16 +198,16 @@ rules - $namespace[DescID] = $desc-id - $namespace-target[Type] = '*scpb.Target' - $namespace-target[Element] = $namespace - - $namespace-node[Type] = '*scpb.Node' + - $namespace-node[Type] = '*screl.Node' - $namespace-node[Target] = $namespace-target - $namespace-target[TargetStatus] = ABSENT - - $namespace-node[Status] = ABSENT + - $namespace-node[CurrentStatus] = ABSENT - $dep-target[Type] = '*scpb.Target' - $dep-target[Element] = $dep - - $dep-node[Type] = '*scpb.Node' + - $dep-node[Type] = '*screl.Node' - $dep-node[Target] = $dep-target - $dep-target[TargetStatus] = ABSENT - - $dep-node[Status] = ABSENT + - $dep-node[CurrentStatus] = ABSENT - name: column named after column existence from: column-node to: column-name-node @@ -220,16 +220,16 @@ rules - $column-name[ColumnID] = $column-id - $column-target[Type] = '*scpb.Target' - $column-target[Element] = $column - - $column-node[Type] = '*scpb.Node' + - $column-node[Type] = '*screl.Node' - $column-node[Target] = $column-target - $column-target[TargetStatus] = PUBLIC - - $column-node[Status] = DELETE_ONLY + - $column-node[CurrentStatus] = DELETE_ONLY - $column-name-target[Type] = '*scpb.Target' - $column-name-target[Element] = $column-name - - $column-name-node[Type] = '*scpb.Node' + - $column-name-node[Type] = '*screl.Node' - $column-name-node[Target] = $column-name-target - $column-name-target[TargetStatus] = PUBLIC - - $column-name-node[Status] = PUBLIC + - $column-name-node[CurrentStatus] = PUBLIC - name: column named right before column becomes public from: column-name-node to: column-node @@ -242,16 +242,16 @@ rules - $column-name[ColumnID] = $column-id - $column-name-target[Type] = '*scpb.Target' - $column-name-target[Element] = $column-name - - $column-name-node[Type] = '*scpb.Node' + - $column-name-node[Type] = '*screl.Node' - $column-name-node[Target] = $column-name-target - $column-name-target[TargetStatus] = PUBLIC - - $column-name-node[Status] = PUBLIC + - $column-name-node[CurrentStatus] = PUBLIC - $column-target[Type] = '*scpb.Target' - $column-target[Element] = $column - - $column-node[Type] = '*scpb.Node' + - $column-node[Type] = '*screl.Node' - $column-node[Target] = $column-target - $column-target[TargetStatus] = PUBLIC - - $column-node[Status] = PUBLIC + - $column-node[CurrentStatus] = PUBLIC - name: column unnamed after column no longer public from: column-node to: column-name-node @@ -264,16 +264,16 @@ rules - $column-name[ColumnID] = $column-id - $column-target[Type] = '*scpb.Target' - $column-target[Element] = $column - - $column-node[Type] = '*scpb.Node' + - $column-node[Type] = '*screl.Node' - $column-node[Target] = $column-target - $column-target[TargetStatus] = ABSENT - - $column-node[Status] = DELETE_AND_WRITE_ONLY + - $column-node[CurrentStatus] = DELETE_AND_WRITE_ONLY - $column-name-target[Type] = '*scpb.Target' - $column-name-target[Element] = $column-name - - $column-name-node[Type] = '*scpb.Node' + - $column-name-node[Type] = '*screl.Node' - $column-name-node[Target] = $column-name-target - $column-name-target[TargetStatus] = ABSENT - - $column-name-node[Status] = ABSENT + - $column-name-node[CurrentStatus] = ABSENT - name: column unnamed before column no longer exists from: column-name-node to: column-node @@ -286,16 +286,16 @@ rules - $column-name[ColumnID] = $column-id - $column-name-target[Type] = '*scpb.Target' - $column-name-target[Element] = $column-name - - $column-name-node[Type] = '*scpb.Node' + - $column-name-node[Type] = '*screl.Node' - $column-name-node[Target] = $column-name-target - $column-name-target[TargetStatus] = ABSENT - - $column-name-node[Status] = ABSENT + - $column-name-node[CurrentStatus] = ABSENT - $column-target[Type] = '*scpb.Target' - $column-target[Element] = $column - - $column-node[Type] = '*scpb.Node' + - $column-node[Type] = '*screl.Node' - $column-node[Target] = $column-target - $column-target[TargetStatus] = ABSENT - - $column-node[Status] = ABSENT + - $column-node[CurrentStatus] = ABSENT - name: index named after index existence from: index-node to: index-name-node @@ -308,16 +308,16 @@ rules - $index-name[IndexID] = $index-id - $index-target[Type] = '*scpb.Target' - $index-target[Element] = $index - - $index-node[Type] = '*scpb.Node' + - $index-node[Type] = '*screl.Node' - $index-node[Target] = $index-target - $index-target[TargetStatus] = PUBLIC - - $index-node[Status] = DELETE_ONLY + - $index-node[CurrentStatus] = DELETE_ONLY - $index-name-target[Type] = '*scpb.Target' - $index-name-target[Element] = $index-name - - $index-name-node[Type] = '*scpb.Node' + - $index-name-node[Type] = '*screl.Node' - $index-name-node[Target] = $index-name-target - $index-name-target[TargetStatus] = PUBLIC - - $index-name-node[Status] = PUBLIC + - $index-name-node[CurrentStatus] = PUBLIC - name: index named right before index becomes public from: index-name-node to: index-node @@ -330,16 +330,16 @@ rules - $index-name[IndexID] = $index-id - $index-name-target[Type] = '*scpb.Target' - $index-name-target[Element] = $index-name - - $index-name-node[Type] = '*scpb.Node' + - $index-name-node[Type] = '*screl.Node' - $index-name-node[Target] = $index-name-target - $index-name-target[TargetStatus] = PUBLIC - - $index-name-node[Status] = PUBLIC + - $index-name-node[CurrentStatus] = PUBLIC - $index-target[Type] = '*scpb.Target' - $index-target[Element] = $index - - $index-node[Type] = '*scpb.Node' + - $index-node[Type] = '*screl.Node' - $index-node[Target] = $index-target - $index-target[TargetStatus] = PUBLIC - - $index-node[Status] = PUBLIC + - $index-node[CurrentStatus] = PUBLIC - name: index unnamed after index no longer public from: index-node to: index-name-node @@ -352,16 +352,16 @@ rules - $index-name[IndexID] = $index-id - $index-target[Type] = '*scpb.Target' - $index-target[Element] = $index - - $index-node[Type] = '*scpb.Node' + - $index-node[Type] = '*screl.Node' - $index-node[Target] = $index-target - $index-target[TargetStatus] = ABSENT - - $index-node[Status] = VALIDATED + - $index-node[CurrentStatus] = VALIDATED - $index-name-target[Type] = '*scpb.Target' - $index-name-target[Element] = $index-name - - $index-name-node[Type] = '*scpb.Node' + - $index-name-node[Type] = '*screl.Node' - $index-name-node[Target] = $index-name-target - $index-name-target[TargetStatus] = ABSENT - - $index-name-node[Status] = ABSENT + - $index-name-node[CurrentStatus] = ABSENT - name: index unnamed before index no longer exists from: index-name-node to: index-node @@ -374,16 +374,16 @@ rules - $index-name[IndexID] = $index-id - $index-name-target[Type] = '*scpb.Target' - $index-name-target[Element] = $index-name - - $index-name-node[Type] = '*scpb.Node' + - $index-name-node[Type] = '*screl.Node' - $index-name-node[Target] = $index-name-target - $index-name-target[TargetStatus] = ABSENT - - $index-name-node[Status] = ABSENT + - $index-name-node[CurrentStatus] = ABSENT - $index-target[Type] = '*scpb.Target' - $index-target[Element] = $index - - $index-node[Type] = '*scpb.Node' + - $index-node[Type] = '*screl.Node' - $index-node[Target] = $index-target - $index-target[TargetStatus] = ABSENT - - $index-node[Status] = ABSENT + - $index-node[CurrentStatus] = ABSENT - name: type ref drop is no-op if ref is being added from: type-ref-drop-node to: type-ref-drop-node @@ -396,16 +396,16 @@ rules - $type-ref-add[DescID] = $table-id - $type-ref-drop-target[Type] = '*scpb.Target' - $type-ref-drop-target[Element] = $type-ref-drop - - $type-ref-drop-node[Type] = '*scpb.Node' + - $type-ref-drop-node[Type] = '*screl.Node' - $type-ref-drop-node[Target] = $type-ref-drop-target - $type-ref-drop-target[TargetStatus] = ABSENT - - $type-ref-drop-node[Status] = ABSENT + - $type-ref-drop-node[CurrentStatus] = ABSENT - $type-ref-add-target[Type] = '*scpb.Target' - $type-ref-add-target[Element] = $type-ref-add - - $type-ref-add-node[Type] = '*scpb.Node' + - $type-ref-add-node[Type] = '*screl.Node' - $type-ref-add-node[Target] = $type-ref-add-target - $type-ref-add-target[TargetStatus] = PUBLIC - - $type-ref-add-node[Status] = PUBLIC + - $type-ref-add-node[CurrentStatus] = PUBLIC - name: table deps removal happens after table marked as dropped from: table-drop-node to: dep-drop-node @@ -416,16 +416,16 @@ rules - $dep-drop[DescID] = $table-id - $dep-drop-target[Type] = '*scpb.Target' - $dep-drop-target[Element] = $dep-drop - - $dep-drop-node[Type] = '*scpb.Node' + - $dep-drop-node[Type] = '*screl.Node' - $dep-drop-node[Target] = $dep-drop-target - $dep-drop-target[TargetStatus] = ABSENT - - $dep-drop-node[Status] = ABSENT + - $dep-drop-node[CurrentStatus] = ABSENT - $table-drop-target[Type] = '*scpb.Target' - $table-drop-target[Element] = $table-drop - - $table-drop-node[Type] = '*scpb.Node' + - $table-drop-node[Type] = '*screl.Node' - $table-drop-node[Target] = $table-drop-target - $table-drop-target[TargetStatus] = ABSENT - - $table-drop-node[Status] = DROPPED + - $table-drop-node[CurrentStatus] = DROPPED - name: schema can be dropped after schema entry inside the database from: schema-entry-node to: schema-node @@ -436,16 +436,16 @@ rules - $schema-entry[ReferencedDescID] = $schema-id - $schema-target[Type] = '*scpb.Target' - $schema-target[Element] = $schema - - $schema-node[Type] = '*scpb.Node' + - $schema-node[Type] = '*screl.Node' - $schema-node[Target] = $schema-target - $schema-target[TargetStatus] = ABSENT - - $schema-node[Status] = ABSENT + - $schema-node[CurrentStatus] = ABSENT - $schema-entry-target[Type] = '*scpb.Target' - $schema-entry-target[Element] = $schema-entry - - $schema-entry-node[Type] = '*scpb.Node' + - $schema-entry-node[Type] = '*screl.Node' - $schema-entry-node[Target] = $schema-entry-target - $schema-entry-target[TargetStatus] = ABSENT - - $schema-entry-node[Status] = ABSENT + - $schema-entry-node[CurrentStatus] = ABSENT - name: schema entry can be dropped after the database has exited synth drop from: database-node to: schema-entry-node @@ -456,13 +456,13 @@ rules - $schema-entry[DescID] = $schema-id - $database-target[Type] = '*scpb.Target' - $database-target[Element] = $database - - $database-node[Type] = '*scpb.Node' + - $database-node[Type] = '*screl.Node' - $database-node[Target] = $database-target - $database-target[TargetStatus] = ABSENT - - $database-node[Status] = DROPPED + - $database-node[CurrentStatus] = DROPPED - $schema-entry-target[Type] = '*scpb.Target' - $schema-entry-target[Element] = $schema-entry - - $schema-entry-node[Type] = '*scpb.Node' + - $schema-entry-node[Type] = '*screl.Node' - $schema-entry-node[Target] = $schema-entry-target - $schema-entry-target[TargetStatus] = ABSENT - - $schema-entry-node[Status] = ABSENT + - $schema-entry-node[CurrentStatus] = ABSENT diff --git a/pkg/sql/schemachanger/scplan/opgen/BUILD.bazel b/pkg/sql/schemachanger/scplan/internal/opgen/BUILD.bazel similarity index 85% rename from pkg/sql/schemachanger/scplan/opgen/BUILD.bazel rename to pkg/sql/schemachanger/scplan/internal/opgen/BUILD.bazel index 01bac976e56f..2754d6c0666b 100644 --- a/pkg/sql/schemachanger/scplan/opgen/BUILD.bazel +++ b/pkg/sql/schemachanger/scplan/internal/opgen/BUILD.bazel @@ -8,15 +8,19 @@ go_library( "opgen_check_constraint.go", "opgen_check_constraint_type_reference.go", "opgen_column.go", + "opgen_column_comment.go", "opgen_column_name.go", "opgen_column_type_reference.go", "opgen_computed_expr_type_reference.go", + "opgen_constraint_comment.go", "opgen_constraint_name.go", "opgen_database.go", + "opgen_database_comment.go", "opgen_db_schema_entry.go", "opgen_default_expr_type_reference.go", "opgen_default_expression.go", "opgen_in_foreign_key.go", + "opgen_index_comment.go", "opgen_index_name.go", "opgen_locality.go", "opgen_namespace.go", @@ -27,11 +31,13 @@ go_library( "opgen_primary_index.go", "opgen_relation_depended_on_by.go", "opgen_schema.go", + "opgen_schema_comment.go", "opgen_secondary_index.go", "opgen_sequence.go", "opgen_sequence_dependency.go", "opgen_sequence_owned_by.go", "opgen_table.go", + "opgen_table_comment.go", "opgen_type.go", "opgen_unique_constraint.go", "opgen_user_privileges.go", @@ -41,16 +47,17 @@ go_library( "specs.go", "target.go", ], - importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/opgen", + importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/opgen", visibility = ["//visibility:public"], deps = [ "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", "//pkg/sql/schemachanger/rel", - "//pkg/sql/schemachanger/scgraph", "//pkg/sql/schemachanger/scop", "//pkg/sql/schemachanger/scpb", + "//pkg/sql/schemachanger/scplan/internal/scgraph", "//pkg/sql/schemachanger/screl", + "//pkg/util/protoutil", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/sql/schemachanger/scplan/opgen/op_funcs.go b/pkg/sql/schemachanger/scplan/internal/opgen/op_funcs.go similarity index 66% rename from pkg/sql/schemachanger/scplan/opgen/op_funcs.go rename to pkg/sql/schemachanger/scplan/internal/opgen/op_funcs.go index f1f3a32d340b..d7161ea22d23 100644 --- a/pkg/sql/schemachanger/scplan/opgen/op_funcs.go +++ b/pkg/sql/schemachanger/scplan/internal/opgen/op_funcs.go @@ -15,12 +15,30 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" ) +func newLogEventOp(e scpb.Element, ts scpb.TargetState) *scop.LogEvent { + for _, t := range ts.Targets { + if t.Element() == e { + return &scop.LogEvent{ + TargetMetadata: *protoutil.Clone(&t.Metadata).(*scpb.TargetMetadata), + Authorization: *protoutil.Clone(&ts.Authorization).(*scpb.Authorization), + Statement: ts.Statements[t.Metadata.StatementID].RedactedStatement, + StatementTag: ts.Statements[t.Metadata.StatementID].StatementTag, + Element: *protoutil.Clone(&t.ElementProto).(*scpb.ElementProto), + TargetStatus: t.TargetStatus, + } + } + } + panic(errors.AssertionFailedf("could not find element %s in target state", screl.ElementString(e))) +} + // opsFunc are a fully-compiled and checked set of functions to emit operations // given an element value. -type opsFunc func(element scpb.Element, metadata *scpb.ElementMetadata) []scop.Op +type opsFunc func(element scpb.Element, targetState scpb.TargetState) []scop.Op func makeOpsFunc(el scpb.Element, fns []interface{}) (opsFunc, error) { var funcValues []reflect.Value @@ -30,10 +48,10 @@ func makeOpsFunc(el scpb.Element, fns []interface{}) (opsFunc, error) { } funcValues = append(funcValues, reflect.ValueOf(fn)) } - return func(element scpb.Element, metadata *scpb.ElementMetadata) []scop.Op { + return func(element scpb.Element, targetState scpb.TargetState) []scop.Op { ret := make([]scop.Op, 0, len(funcValues)) in := []reflect.Value{reflect.ValueOf(element)} - inWithMeta := []reflect.Value{reflect.ValueOf(element), reflect.ValueOf(metadata)} + inWithMeta := []reflect.Value{reflect.ValueOf(element), reflect.ValueOf(targetState)} for _, fn := range funcValues { var out []reflect.Value if fn.Type().NumIn() == 1 { @@ -60,7 +78,7 @@ func checkOpFunc(el scpb.Element, fn interface{}) error { elType := reflect.TypeOf(el) if !(fnT.NumIn() == 1 && fnT.In(0) == elType) && !(fnT.NumIn() == 2 && fnT.In(0) == elType && - fnT.In(1) == reflect.TypeOf((*scpb.ElementMetadata)(nil))) { + fnT.In(1) == reflect.TypeOf(scpb.TargetState{})) { return errors.Errorf( "expected %v to be a func with one argument of type %s", fnT, elType, ) diff --git a/pkg/sql/schemachanger/scplan/opgen/op_gen.go b/pkg/sql/schemachanger/scplan/internal/opgen/op_gen.go similarity index 75% rename from pkg/sql/schemachanger/scplan/opgen/op_gen.go rename to pkg/sql/schemachanger/scplan/internal/opgen/op_gen.go index 0c399883e2c5..c34dad7d202f 100644 --- a/pkg/sql/schemachanger/scplan/opgen/op_gen.go +++ b/pkg/sql/schemachanger/scplan/internal/opgen/op_gen.go @@ -11,9 +11,10 @@ package opgen import ( - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraph" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" ) type registry struct { @@ -24,12 +25,12 @@ var opRegistry = ®istry{} // BuildGraph constructs a graph with operation edges populated from an initial // state. -func BuildGraph(initial scpb.State) (*scgraph.Graph, error) { - return opRegistry.buildGraph(initial) +func BuildGraph(cs scpb.CurrentState) (*scgraph.Graph, error) { + return opRegistry.buildGraph(cs) } -func (r *registry) buildGraph(initial scpb.State) (*scgraph.Graph, error) { - g, err := scgraph.New(initial) +func (r *registry) buildGraph(cs scpb.CurrentState) (*scgraph.Graph, error) { + g, err := scgraph.New(cs) if err != nil { return nil, err } @@ -38,13 +39,13 @@ func (r *registry) buildGraph(initial scpb.State) (*scgraph.Graph, error) { // to not mutate the database in place. type toAdd struct { transition - n *scpb.Node + n *screl.Node } var edgesToAdd []toAdd for _, t := range r.targets { edgesToAdd = edgesToAdd[:0] - if err := t.iterateFunc(g.Database(), func(n *scpb.Node) error { - status := n.Status + if err := t.iterateFunc(g.Database(), func(n *screl.Node) error { + status := n.CurrentStatus for _, op := range t.transitions { if op.from == status { edgesToAdd = append(edgesToAdd, toAdd{ @@ -59,10 +60,9 @@ func (r *registry) buildGraph(initial scpb.State) (*scgraph.Graph, error) { return nil, err } for _, op := range edgesToAdd { - metadata := g.GetMetadataFromTarget(op.n.Target) var ops []scop.Op if op.ops != nil { - ops = op.ops(op.n.Element(), &metadata) + ops = op.ops(op.n.Element(), cs.TargetState) } if err := g.AddOpEdges( op.n.Target, op.from, op.to, op.revertible, op.minPhase, ops..., diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_check_constraint.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_check_constraint.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_check_constraint.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_check_constraint.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_check_constraint_type_reference.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_check_constraint_type_reference.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_check_constraint_type_reference.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_check_constraint_type_reference.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_column.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_column.go similarity index 85% rename from pkg/sql/schemachanger/scplan/opgen/opgen_column.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_column.go index 64f1cbf08490..9ced666335ec 100644 --- a/pkg/sql/schemachanger/scplan/opgen/opgen_column.go +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_column.go @@ -42,13 +42,8 @@ func init() { Virtual: this.Virtual, } }), - emit(func(this *scpb.Column, md *scpb.ElementMetadata) scop.Op { - return &scop.LogEvent{ - Metadata: *md, - DescID: this.TableID, - Element: &scpb.ElementProto{Column: this}, - TargetStatus: scpb.Status_PUBLIC, - } + emit(func(this *scpb.Column, ts scpb.TargetState) scop.Op { + return newLogEventOp(this, ts) }), ), to(scpb.Status_DELETE_AND_WRITE_ONLY, @@ -78,13 +73,8 @@ func init() { ColumnID: this.ColumnID, } }), - emit(func(this *scpb.Column, md *scpb.ElementMetadata) scop.Op { - return &scop.LogEvent{ - Metadata: *md, - DescID: this.TableID, - Element: &scpb.ElementProto{Column: this}, - TargetStatus: scpb.Status_ABSENT, - } + emit(func(this *scpb.Column, ts scpb.TargetState) scop.Op { + return newLogEventOp(this, ts) }), ), to(scpb.Status_DELETE_ONLY, diff --git a/pkg/sql/schemachanger/scplan/internal/opgen/opgen_column_comment.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_column_comment.go new file mode 100644 index 000000000000..c0b5f458745b --- /dev/null +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_column_comment.go @@ -0,0 +1,40 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package opgen + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" +) + +func init() { + opRegistry.register((*scpb.ColumnComment)(nil), + toPublic( + scpb.Status_ABSENT, + to(scpb.Status_PUBLIC, + emit(func(this *scpb.ColumnComment) scop.Op { + return notImplemented(this) + }), + ), + ), + toAbsent( + scpb.Status_PUBLIC, + to(scpb.Status_ABSENT, + emit(func(this *scpb.ColumnComment) scop.Op { + return &scop.RemoveColumnComment{ + TableID: this.TableID, + ColumnID: this.ColumnID, + } + }), + ), + ), + ) +} diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_column_name.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_column_name.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_column_name.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_column_name.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_column_type_reference.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_column_type_reference.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_column_type_reference.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_column_type_reference.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_computed_expr_type_reference.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_computed_expr_type_reference.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_computed_expr_type_reference.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_computed_expr_type_reference.go diff --git a/pkg/sql/schemachanger/scplan/internal/opgen/opgen_constraint_comment.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_constraint_comment.go new file mode 100644 index 000000000000..9a4ebcf5dd4d --- /dev/null +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_constraint_comment.go @@ -0,0 +1,41 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package opgen + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" +) + +func init() { + opRegistry.register((*scpb.ConstraintComment)(nil), + toPublic( + scpb.Status_ABSENT, + to(scpb.Status_PUBLIC, + emit(func(this *scpb.ConstraintComment) scop.Op { + return notImplemented(this) + }), + ), + ), + toAbsent( + scpb.Status_PUBLIC, + to(scpb.Status_ABSENT, + emit(func(this *scpb.ConstraintComment) scop.Op { + return &scop.RemoveConstraintComment{ + TableID: this.TableID, + ConstraintType: this.ConstraintType, + ConstraintName: this.ConstraintName, + } + }), + ), + ), + ) +} diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_constraint_name.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_constraint_name.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_constraint_name.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_constraint_name.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_database.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_database.go similarity index 85% rename from pkg/sql/schemachanger/scplan/opgen/opgen_database.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_database.go index 4f97bcf77744..e23703f84e08 100644 --- a/pkg/sql/schemachanger/scplan/opgen/opgen_database.go +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_database.go @@ -50,13 +50,8 @@ func init() { TableID: this.DatabaseID, } }), - emit(func(this *scpb.Database, md *scpb.ElementMetadata) scop.Op { - return &scop.LogEvent{ - Metadata: *md, - DescID: this.DatabaseID, - Element: &scpb.ElementProto{Database: this}, - TargetStatus: scpb.Status_ABSENT, - } + emit(func(this *scpb.Database, ts scpb.TargetState) scop.Op { + return newLogEventOp(this, ts) }), emit(func(this *scpb.Database) scop.Op { return &scop.CreateGcJobForDatabase{ diff --git a/pkg/sql/schemachanger/scplan/internal/opgen/opgen_database_comment.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_database_comment.go new file mode 100644 index 000000000000..585e095aab86 --- /dev/null +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_database_comment.go @@ -0,0 +1,39 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package opgen + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" +) + +func init() { + opRegistry.register((*scpb.DatabaseComment)(nil), + toPublic( + scpb.Status_ABSENT, + to(scpb.Status_PUBLIC, + emit(func(this *scpb.DatabaseComment) scop.Op { + return notImplemented(this) + }), + ), + ), + toAbsent( + scpb.Status_PUBLIC, + to(scpb.Status_ABSENT, + emit(func(this *scpb.DatabaseComment) scop.Op { + return &scop.RemoveDatabaseComment{ + DatabaseID: this.DatabaseID, + } + }), + ), + ), + ) +} diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_db_schema_entry.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_db_schema_entry.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_db_schema_entry.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_db_schema_entry.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_default_expr_type_reference.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_default_expr_type_reference.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_default_expr_type_reference.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_default_expr_type_reference.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_default_expression.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_default_expression.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_default_expression.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_default_expression.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_in_foreign_key.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_in_foreign_key.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_in_foreign_key.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_in_foreign_key.go diff --git a/pkg/sql/schemachanger/scplan/internal/opgen/opgen_index_comment.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_index_comment.go new file mode 100644 index 000000000000..5afa7af97baf --- /dev/null +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_index_comment.go @@ -0,0 +1,40 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package opgen + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" +) + +func init() { + opRegistry.register((*scpb.IndexComment)(nil), + toPublic( + scpb.Status_ABSENT, + to(scpb.Status_PUBLIC, + emit(func(this *scpb.IndexComment) scop.Op { + return notImplemented(this) + }), + ), + ), + toAbsent( + scpb.Status_PUBLIC, + to(scpb.Status_ABSENT, + emit(func(this *scpb.IndexComment) scop.Op { + return &scop.RemoveIndexComment{ + TableID: this.TableID, + IndexID: this.IndexID, + } + }), + ), + ), + ) +} diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_index_name.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_index_name.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_index_name.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_index_name.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_locality.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_locality.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_locality.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_locality.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_namespace.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_namespace.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_namespace.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_namespace.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_on_update_expr_type_reference.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_on_update_expr_type_reference.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_on_update_expr_type_reference.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_on_update_expr_type_reference.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_out_foreign_key.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_out_foreign_key.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_out_foreign_key.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_out_foreign_key.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_owner.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_owner.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_owner.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_owner.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_partitioning.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_partitioning.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_partitioning.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_partitioning.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_primary_index.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_primary_index.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_primary_index.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_primary_index.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_relation_depended_on_by.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_relation_depended_on_by.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_relation_depended_on_by.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_relation_depended_on_by.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_schema.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_schema.go similarity index 85% rename from pkg/sql/schemachanger/scplan/opgen/opgen_schema.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_schema.go index 064daa199438..54aef960ba28 100644 --- a/pkg/sql/schemachanger/scplan/opgen/opgen_schema.go +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_schema.go @@ -49,13 +49,8 @@ func init() { TableID: this.SchemaID, } }), - emit(func(this *scpb.Schema, md *scpb.ElementMetadata) scop.Op { - return &scop.LogEvent{ - Metadata: *md, - DescID: this.SchemaID, - Element: &scpb.ElementProto{Schema: this}, - TargetStatus: scpb.Status_ABSENT, - } + emit(func(this *scpb.Schema, ts scpb.TargetState) scop.Op { + return newLogEventOp(this, ts) }), emit(func(this *scpb.Schema) scop.Op { return &scop.DeleteDescriptor{ diff --git a/pkg/sql/schemachanger/scplan/internal/opgen/opgen_schema_comment.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_schema_comment.go new file mode 100644 index 000000000000..c6cf46d871a3 --- /dev/null +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_schema_comment.go @@ -0,0 +1,39 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package opgen + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" +) + +func init() { + opRegistry.register((*scpb.SchemaComment)(nil), + toPublic( + scpb.Status_ABSENT, + to(scpb.Status_PUBLIC, + emit(func(this *scpb.SchemaComment) scop.Op { + return notImplemented(this) + }), + ), + ), + toAbsent( + scpb.Status_PUBLIC, + to(scpb.Status_ABSENT, + emit(func(this *scpb.SchemaComment) scop.Op { + return &scop.RemoveSchemaComment{ + SchemaID: this.SchemaID, + } + }), + ), + ), + ) +} diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_secondary_index.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_secondary_index.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_secondary_index.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_secondary_index.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_sequence.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_sequence.go similarity index 84% rename from pkg/sql/schemachanger/scplan/opgen/opgen_sequence.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_sequence.go index 368e907db5a3..308f6b750b39 100644 --- a/pkg/sql/schemachanger/scplan/opgen/opgen_sequence.go +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_sequence.go @@ -47,13 +47,8 @@ func init() { ), to(scpb.Status_ABSENT, minPhase(scop.PostCommitPhase), - emit(func(this *scpb.Sequence, md *scpb.ElementMetadata) scop.Op { - return &scop.LogEvent{ - Metadata: *md, - DescID: this.SequenceID, - Element: &scpb.ElementProto{Sequence: this}, - TargetStatus: scpb.Status_ABSENT, - } + emit(func(this *scpb.Sequence, ts scpb.TargetState) scop.Op { + return newLogEventOp(this, ts) }), emit(func(this *scpb.Sequence) scop.Op { return &scop.CreateGcJobForTable{ diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_sequence_dependency.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_sequence_dependency.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_sequence_dependency.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_sequence_dependency.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_sequence_owned_by.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_sequence_owned_by.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_sequence_owned_by.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_sequence_owned_by.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_table.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_table.go similarity index 85% rename from pkg/sql/schemachanger/scplan/opgen/opgen_table.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_table.go index 7560616a9432..6dbdc9c631dc 100644 --- a/pkg/sql/schemachanger/scplan/opgen/opgen_table.go +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_table.go @@ -47,13 +47,8 @@ func init() { ), to(scpb.Status_ABSENT, minPhase(scop.PostCommitPhase), - emit(func(this *scpb.Table, md *scpb.ElementMetadata) scop.Op { - return &scop.LogEvent{ - Metadata: *md, - DescID: this.TableID, - Element: &scpb.ElementProto{Table: this}, - TargetStatus: scpb.Status_ABSENT, - } + emit(func(this *scpb.Table, ts scpb.TargetState) scop.Op { + return newLogEventOp(this, ts) }), emit(func(this *scpb.Table) scop.Op { return &scop.CreateGcJobForTable{ diff --git a/pkg/sql/schemachanger/scplan/internal/opgen/opgen_table_comment.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_table_comment.go new file mode 100644 index 000000000000..62533d9f5701 --- /dev/null +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_table_comment.go @@ -0,0 +1,39 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package opgen + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" +) + +func init() { + opRegistry.register((*scpb.TableComment)(nil), + toPublic( + scpb.Status_ABSENT, + to(scpb.Status_PUBLIC, + emit(func(this *scpb.TableComment) scop.Op { + return notImplemented(this) + }), + ), + ), + toAbsent( + scpb.Status_PUBLIC, + to(scpb.Status_ABSENT, + emit(func(this *scpb.TableComment) scop.Op { + return &scop.RemoveTableComment{ + TableID: this.TableID, + } + }), + ), + ), + ) +} diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_type.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_type.go similarity index 85% rename from pkg/sql/schemachanger/scplan/opgen/opgen_type.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_type.go index 7beb609d6539..9b457f63edbf 100644 --- a/pkg/sql/schemachanger/scplan/opgen/opgen_type.go +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_type.go @@ -47,13 +47,8 @@ func init() { ), to(scpb.Status_ABSENT, minPhase(scop.PostCommitPhase), - emit(func(this *scpb.Type, md *scpb.ElementMetadata) scop.Op { - return &scop.LogEvent{ - Metadata: *md, - DescID: this.TypeID, - Element: &scpb.ElementProto{Type: this}, - TargetStatus: scpb.Status_ABSENT, - } + emit(func(this *scpb.Type, ts scpb.TargetState) scop.Op { + return newLogEventOp(this, ts) }), emit(func(this *scpb.Type) scop.Op { return &scop.DeleteDescriptor{ diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_unique_constraint.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_unique_constraint.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_unique_constraint.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_unique_constraint.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_user_privileges.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_user_privileges.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_user_privileges.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_user_privileges.go diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_view.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_view.go similarity index 85% rename from pkg/sql/schemachanger/scplan/opgen/opgen_view.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_view.go index bea672d2abeb..02a5e17ead81 100644 --- a/pkg/sql/schemachanger/scplan/opgen/opgen_view.go +++ b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_view.go @@ -47,13 +47,8 @@ func init() { ), to(scpb.Status_ABSENT, minPhase(scop.PostCommitPhase), - emit(func(this *scpb.View, md *scpb.ElementMetadata) scop.Op { - return &scop.LogEvent{ - Metadata: *md, - DescID: this.TableID, - Element: &scpb.ElementProto{View: this}, - TargetStatus: scpb.Status_ABSENT, - } + emit(func(this *scpb.View, ts scpb.TargetState) scop.Op { + return newLogEventOp(this, ts) }), emit(func(this *scpb.View) scop.Op { return &scop.CreateGcJobForTable{ diff --git a/pkg/sql/schemachanger/scplan/opgen/opgen_view_depends_on_type.go b/pkg/sql/schemachanger/scplan/internal/opgen/opgen_view_depends_on_type.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/opgen_view_depends_on_type.go rename to pkg/sql/schemachanger/scplan/internal/opgen/opgen_view_depends_on_type.go diff --git a/pkg/sql/schemachanger/scplan/opgen/register.go b/pkg/sql/schemachanger/scplan/internal/opgen/register.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/register.go rename to pkg/sql/schemachanger/scplan/internal/opgen/register.go diff --git a/pkg/sql/schemachanger/scplan/opgen/register_test.go b/pkg/sql/schemachanger/scplan/internal/opgen/register_test.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/register_test.go rename to pkg/sql/schemachanger/scplan/internal/opgen/register_test.go diff --git a/pkg/sql/schemachanger/scplan/opgen/specs.go b/pkg/sql/schemachanger/scplan/internal/opgen/specs.go similarity index 100% rename from pkg/sql/schemachanger/scplan/opgen/specs.go rename to pkg/sql/schemachanger/scplan/internal/opgen/specs.go diff --git a/pkg/sql/schemachanger/scplan/opgen/target.go b/pkg/sql/schemachanger/scplan/internal/opgen/target.go similarity index 96% rename from pkg/sql/schemachanger/scplan/opgen/target.go rename to pkg/sql/schemachanger/scplan/internal/opgen/target.go index faeb96bc3260..633ae3dfd6bd 100644 --- a/pkg/sql/schemachanger/scplan/opgen/target.go +++ b/pkg/sql/schemachanger/scplan/internal/opgen/target.go @@ -23,7 +23,7 @@ type target struct { e scpb.Element status scpb.Status transitions []transition - iterateFunc func(*rel.Database, func(*scpb.Node) error) error + iterateFunc func(*rel.Database, func(*screl.Node) error) error } // transition represents a transition from one status to the next towards a @@ -59,9 +59,9 @@ func makeTarget(e scpb.Element, spec targetSpec) (t target, err error) { if err != nil { return t, errors.Wrap(err, "failed to construct query") } - t.iterateFunc = func(database *rel.Database, f func(*scpb.Node) error) error { + t.iterateFunc = func(database *rel.Database, f func(*screl.Node) error) error { return q.Iterate(database, func(r rel.Result) error { - return f(r.Var(node).(*scpb.Node)) + return f(r.Var(node).(*screl.Node)) }) } diff --git a/pkg/sql/schemachanger/scgraph/BUILD.bazel b/pkg/sql/schemachanger/scplan/internal/scgraph/BUILD.bazel similarity index 94% rename from pkg/sql/schemachanger/scgraph/BUILD.bazel rename to pkg/sql/schemachanger/scplan/internal/scgraph/BUILD.bazel index 4d394e9c60a9..7542d03c709a 100644 --- a/pkg/sql/schemachanger/scgraph/BUILD.bazel +++ b/pkg/sql/schemachanger/scplan/internal/scgraph/BUILD.bazel @@ -10,7 +10,7 @@ go_library( "iteration.go", ":gen-depedgekind-stringer", # keep ], - importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraph", + importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph", visibility = ["//visibility:public"], deps = [ "//pkg/sql/schemachanger/rel", @@ -35,6 +35,7 @@ go_test( "//pkg/sql/catalog/descpb", "//pkg/sql/schemachanger/scop", "//pkg/sql/schemachanger/scpb", + "//pkg/sql/schemachanger/screl", "//pkg/util/iterutil", "//pkg/util/leaktest", "@com_github_stretchr_testify//require", diff --git a/pkg/sql/schemachanger/scgraph/dep_edge_tree.go b/pkg/sql/schemachanger/scplan/internal/scgraph/dep_edge_tree.go similarity index 86% rename from pkg/sql/schemachanger/scgraph/dep_edge_tree.go rename to pkg/sql/schemachanger/scplan/internal/scgraph/dep_edge_tree.go index dcc3354574d5..f63f1214ee4c 100644 --- a/pkg/sql/schemachanger/scgraph/dep_edge_tree.go +++ b/pkg/sql/schemachanger/scplan/internal/scgraph/dep_edge_tree.go @@ -11,7 +11,7 @@ package scgraph import ( - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" "github.com/cockroachdb/cockroach/pkg/util/iterutil" "github.com/google/btree" ) @@ -22,7 +22,7 @@ type depEdgeTree struct { cmp nodeCmpFn } -type nodeCmpFn func(a, b *scpb.Node) (less, eq bool) +type nodeCmpFn func(a, b *screl.Node) (less, eq bool) func newDepEdgeTree(order edgeTreeOrder, cmp nodeCmpFn) *depEdgeTree { const degree = 8 // arbitrary @@ -37,14 +37,14 @@ func newDepEdgeTree(order edgeTreeOrder, cmp nodeCmpFn) *depEdgeTree { // either based on from/to node indexes. type edgeTreeOrder bool -func (o edgeTreeOrder) first(e Edge) *scpb.Node { +func (o edgeTreeOrder) first(e Edge) *screl.Node { if o == fromTo { return e.From() } return e.To() } -func (o edgeTreeOrder) second(e Edge) *scpb.Node { +func (o edgeTreeOrder) second(e Edge) *screl.Node { if o == toFrom { return e.From() } @@ -70,7 +70,7 @@ func (et *depEdgeTree) insert(e *DepEdge) { }) } -func (et *depEdgeTree) iterateSourceNode(n *scpb.Node, it DepEdgeIterator) (err error) { +func (et *depEdgeTree) iterateSourceNode(n *screl.Node, it DepEdgeIterator) (err error) { e := &edgeTreeEntry{t: et, edge: &DepEdge{}} if et.order == fromTo { e.edge.from = n diff --git a/pkg/sql/schemachanger/scgraph/dep_edge_tree_test.go b/pkg/sql/schemachanger/scplan/internal/scgraph/dep_edge_tree_test.go similarity index 80% rename from pkg/sql/schemachanger/scgraph/dep_edge_tree_test.go rename to pkg/sql/schemachanger/scplan/internal/scgraph/dep_edge_tree_test.go index ac9621146f71..555dec34a324 100644 --- a/pkg/sql/schemachanger/scgraph/dep_edge_tree_test.go +++ b/pkg/sql/schemachanger/scplan/internal/scgraph/dep_edge_tree_test.go @@ -15,6 +15,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" "github.com/cockroachdb/cockroach/pkg/util/iterutil" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/stretchr/testify/require" @@ -68,27 +69,27 @@ func TestDepEdgeTree(t *testing.T) { // testCaseState is used for each queryCase in a testCase. type testCaseState struct { tree *depEdgeTree - nodes []*scpb.Node // nodes with lower indexes sort lower - nodesToID map[*scpb.Node]nodeID + nodes []*screl.Node // nodes with lower indexes sort lower + nodesToID map[*screl.Node]nodeID } makeTestCaseState := func(tc testCase) testCaseState { tcs := testCaseState{ - nodesToID: make(map[*scpb.Node]nodeID), + nodesToID: make(map[*screl.Node]nodeID), } target := scpb.Target{} - getNode := func(i nodeID) *scpb.Node { + getNode := func(i nodeID) *screl.Node { if i > nodeID(len(tcs.nodes)-1) { for j := nodeID(len(tcs.nodes)); j <= i; j++ { - tcs.nodes = append(tcs.nodes, &scpb.Node{ - Target: &target, - Status: scpb.Status(j), + tcs.nodes = append(tcs.nodes, &screl.Node{ + Target: &target, + CurrentStatus: scpb.Status(j), }) tcs.nodesToID[tcs.nodes[j]] = j } } return tcs.nodes[i] } - tcs.tree = newDepEdgeTree(tc.order, func(a, b *scpb.Node) (less, eq bool) { + tcs.tree = newDepEdgeTree(tc.order, func(a, b *screl.Node) (less, eq bool) { ai, bi := tcs.nodesToID[a], tcs.nodesToID[b] return ai < bi, ai == bi }) @@ -131,16 +132,20 @@ func TestDepEdgeTree(t *testing.T) { // TestGraphCompareNodes ensures the semantics of (*Graph).compareNodes is sane. func TestGraphCompareNodes(t *testing.T) { defer leaktest.AfterTest(t)() - t1 := scpb.NewTarget(scpb.Status_PUBLIC, &scpb.Table{TableID: 1}, nil) - t2 := scpb.NewTarget(scpb.Status_ABSENT, &scpb.Table{TableID: 2}, nil) - mkNode := func(t *scpb.Target, s scpb.Status) *scpb.Node { - return &scpb.Node{Target: t, Status: s} + ts := scpb.TargetState{ + Targets: []scpb.Target{ + scpb.MakeTarget(scpb.Status_PUBLIC, &scpb.Table{TableID: 1}, nil), + scpb.MakeTarget(scpb.Status_ABSENT, &scpb.Table{TableID: 2}, nil), + }, + } + t1 := &ts.Targets[0] + t2 := &ts.Targets[1] + mkNode := func(t *scpb.Target, s scpb.Status) *screl.Node { + return &screl.Node{Target: t, CurrentStatus: s} } t1ABSENT := mkNode(t1, scpb.Status_ABSENT) t2PUBLIC := mkNode(t2, scpb.Status_PUBLIC) - g, err := New(scpb.State{ - Nodes: []*scpb.Node{t1ABSENT, t2PUBLIC}, - }) + g, err := New(scpb.CurrentState{TargetState: ts, Current: []scpb.Status{scpb.Status_ABSENT, scpb.Status_PUBLIC}}) targetStr := func(target *scpb.Target) string { switch target { case t1: @@ -151,16 +156,16 @@ func TestGraphCompareNodes(t *testing.T) { panic("unexpected target") } } - nodeStr := func(n *scpb.Node) string { + nodeStr := func(n *screl.Node) string { if n == nil { return "nil" } - return fmt.Sprintf("%s:%s", targetStr(n.Target), n.Status.String()) + return fmt.Sprintf("%s:%s", targetStr(n.Target), n.CurrentStatus.String()) } require.NoError(t, err) for _, tc := range []struct { - a, b *scpb.Node + a, b *screl.Node less, eq bool }{ {a: nil, b: nil, less: false, eq: true}, diff --git a/pkg/sql/schemachanger/scgraph/depedgekind_string.go b/pkg/sql/schemachanger/scplan/internal/scgraph/depedgekind_string.go similarity index 100% rename from pkg/sql/schemachanger/scgraph/depedgekind_string.go rename to pkg/sql/schemachanger/scplan/internal/scgraph/depedgekind_string.go diff --git a/pkg/sql/schemachanger/scgraph/edge.go b/pkg/sql/schemachanger/scplan/internal/scgraph/edge.go similarity index 89% rename from pkg/sql/schemachanger/scgraph/edge.go rename to pkg/sql/schemachanger/scplan/internal/scgraph/edge.go index 4cd635fa3da1..bdc6adeba5af 100644 --- a/pkg/sql/schemachanger/scgraph/edge.go +++ b/pkg/sql/schemachanger/scplan/internal/scgraph/edge.go @@ -14,7 +14,6 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" ) @@ -23,13 +22,13 @@ import ( // TODO(ajwerner): Consider hiding Node pointers behind an interface to clarify // mutability. type Edge interface { - From() *scpb.Node - To() *scpb.Node + From() *screl.Node + To() *screl.Node } // OpEdge represents an edge changing the state of a target with an op. type OpEdge struct { - from, to *scpb.Node + from, to *screl.Node op []scop.Op typ scop.Type revertible bool @@ -37,10 +36,10 @@ type OpEdge struct { } // From implements the Edge interface. -func (oe *OpEdge) From() *scpb.Node { return oe.from } +func (oe *OpEdge) From() *screl.Node { return oe.from } // To implements the Edge interface. -func (oe *OpEdge) To() *scpb.Node { return oe.to } +func (oe *OpEdge) To() *screl.Node { return oe.to } // Op returns the scop.Op for execution that is associated with the op edge. func (oe *OpEdge) Op() []scop.Op { return oe.op } @@ -65,7 +64,7 @@ func (oe *OpEdge) String() string { if !oe.revertible { nonRevertible = "non-revertible" } - return fmt.Sprintf("%s -op-%s-> %s", from, nonRevertible, oe.to.Status) + return fmt.Sprintf("%s -op-%s-> %s", from, nonRevertible, oe.to.CurrentStatus) } // DepEdgeKind indicates the kind of constraint enforced by the edge. @@ -89,7 +88,7 @@ const ( // implies that the To() node cannot be reached before the From() node. It // can be reached concurrently. type DepEdge struct { - from, to *scpb.Node + from, to *screl.Node kind DepEdgeKind // TODO(ajwerner): Deal with the possibility that multiple rules could @@ -98,10 +97,10 @@ type DepEdge struct { } // From implements the Edge interface. -func (de *DepEdge) From() *scpb.Node { return de.from } +func (de *DepEdge) From() *screl.Node { return de.from } // To implements the Edge interface. -func (de *DepEdge) To() *scpb.Node { return de.to } +func (de *DepEdge) To() *screl.Node { return de.to } // Name returns the name of the rule which generated this edge. func (de *DepEdge) Name() string { return de.rule } diff --git a/pkg/sql/schemachanger/scgraph/graph.go b/pkg/sql/schemachanger/scplan/internal/scgraph/graph.go similarity index 76% rename from pkg/sql/schemachanger/scgraph/graph.go rename to pkg/sql/schemachanger/scplan/internal/scgraph/graph.go index 58b180d0c7d2..0ea83bedf677 100644 --- a/pkg/sql/schemachanger/scgraph/graph.go +++ b/pkg/sql/schemachanger/scplan/internal/scgraph/graph.go @@ -27,21 +27,15 @@ type Graph struct { // Targets is an interned slice of targets. targets []*scpb.Target - // Statement metadata for targets. - statements []*scpb.Statement - - // Authorization information used by the targets. - authorization scpb.Authorization - // Interns the Node so that pointer equality can be used. - targetNodes []map[scpb.Status]*scpb.Node + targetNodes []map[scpb.Status]*screl.Node // Maps a target to its index in targetNodes. targetIdxMap map[*scpb.Target]int // opEdgesFrom maps a Node to an opEdge that proceeds // from it. A Node may have at most one opEdge from it. - opEdgesFrom map[*scpb.Node]*OpEdge + opEdgesFrom map[*screl.Node]*OpEdge // depEdgesFrom and depEdgesTo map a Node from and to its dependencies. // A Node dependency is another target node which cannot be reached before @@ -68,7 +62,7 @@ func (g *Graph) Database() *rel.Database { // New constructs a new Graph. All initial nodes ought to correspond to distinct // targets. If they do not, an error will be returned. -func New(initial scpb.State) (*Graph, error) { +func New(cs scpb.CurrentState) (*Graph, error) { db, err := rel.NewDatabase(screl.Schema, [][]rel.Attr{ {rel.Type, screl.DescID}, {screl.DescID, rel.Type}, @@ -80,26 +74,24 @@ func New(initial scpb.State) (*Graph, error) { return nil, err } g := Graph{ - targetIdxMap: map[*scpb.Target]int{}, - opEdgesFrom: map[*scpb.Node]*OpEdge{}, - noOpOpEdges: map[*OpEdge]bool{}, - opToOpEdge: map[scop.Op]*OpEdge{}, - entities: db, - statements: initial.Statements, - authorization: initial.Authorization, + targetIdxMap: map[*scpb.Target]int{}, + opEdgesFrom: map[*screl.Node]*OpEdge{}, + noOpOpEdges: map[*OpEdge]bool{}, + opToOpEdge: map[scop.Op]*OpEdge{}, + entities: db, } g.depEdgesFrom = newDepEdgeTree(fromTo, g.compareNodes) g.depEdgesTo = newDepEdgeTree(toFrom, g.compareNodes) - for _, n := range initial.Nodes { - if existing, ok := g.targetIdxMap[n.Target]; ok { - return nil, errors.Errorf("invalid initial state contains duplicate target: %v and %v", n, initial.Nodes[existing]) + for i, status := range cs.Current { + t := &cs.Targets[i] + if existing, ok := g.targetIdxMap[t]; ok { + return nil, errors.Errorf("invalid initial state contains duplicate target: %v and %v", *t, cs.Targets[existing]) } idx := len(g.targets) - g.targetIdxMap[n.Target] = idx - g.targets = append(g.targets, n.Target) - g.targetNodes = append(g.targetNodes, map[scpb.Status]*scpb.Node{ - n.Status: n, - }) + g.targetIdxMap[t] = idx + g.targets = append(g.targets, t) + n := &screl.Node{Target: t, CurrentStatus: status} + g.targetNodes = append(g.targetNodes, map[scpb.Status]*screl.Node{status: n}) if err := g.entities.Insert(n); err != nil { return nil, err } @@ -112,18 +104,16 @@ func New(initial scpb.State) (*Graph, error) { func (g *Graph) ShallowClone() *Graph { // Shallow copy the base structure. clone := &Graph{ - targets: g.targets, - statements: g.statements, - authorization: g.authorization, - targetNodes: g.targetNodes, - targetIdxMap: g.targetIdxMap, - opEdgesFrom: g.opEdgesFrom, - depEdgesFrom: g.depEdgesFrom, - depEdgesTo: g.depEdgesTo, - opToOpEdge: g.opToOpEdge, - edges: g.edges, - entities: g.entities, - noOpOpEdges: make(map[*OpEdge]bool), + targets: g.targets, + targetNodes: g.targetNodes, + targetIdxMap: g.targetIdxMap, + opEdgesFrom: g.opEdgesFrom, + depEdgesFrom: g.depEdgesFrom, + depEdgesTo: g.depEdgesTo, + opToOpEdge: g.opToOpEdge, + edges: g.edges, + entities: g.entities, + noOpOpEdges: make(map[*OpEdge]bool), } // Any decorations for mutations will be copied. for edge, noop := range g.noOpOpEdges { @@ -133,7 +123,7 @@ func (g *Graph) ShallowClone() *Graph { } // GetNode returns the cached node for a given target and status. -func (g *Graph) GetNode(t *scpb.Target, s scpb.Status) (*scpb.Node, bool) { +func (g *Graph) GetNode(t *scpb.Target, s scpb.Status) (*screl.Node, bool) { targetStatuses := g.getTargetStatusMap(t) ts, ok := targetStatuses[s] return ts, ok @@ -142,14 +132,14 @@ func (g *Graph) GetNode(t *scpb.Target, s scpb.Status) (*scpb.Node, bool) { // Suppress the linter. var _ = (*Graph)(nil).GetNode -func (g *Graph) getOrCreateNode(t *scpb.Target, s scpb.Status) (*scpb.Node, error) { +func (g *Graph) getOrCreateNode(t *scpb.Target, s scpb.Status) (*screl.Node, error) { targetStatuses := g.getTargetStatusMap(t) if ts, ok := targetStatuses[s]; ok { return ts, nil } - ts := &scpb.Node{ - Target: t, - Status: s, + ts := &screl.Node{ + Target: t, + CurrentStatus: s, } targetStatuses[s] = ts if err := g.entities.Insert(ts); err != nil { @@ -158,7 +148,7 @@ func (g *Graph) getOrCreateNode(t *scpb.Target, s scpb.Status) (*scpb.Node, erro return ts, nil } -func (g *Graph) getTargetStatusMap(target *scpb.Target) map[scpb.Status]*scpb.Node { +func (g *Graph) getTargetStatusMap(target *scpb.Target) map[scpb.Status]*screl.Node { idx, ok := g.targetIdxMap[target] if !ok { panic(errors.Errorf("target %v does not exist", target)) @@ -176,7 +166,7 @@ var _ = (*Graph)(nil).containsTarget // GetOpEdgeFrom returns the unique outgoing op edge from the specified node, // if one exists. -func (g *Graph) GetOpEdgeFrom(n *scpb.Node) (*OpEdge, bool) { +func (g *Graph) GetOpEdgeFrom(n *screl.Node) (*OpEdge, bool) { oe, ok := g.opEdgesFrom[n] return oe, ok } @@ -259,20 +249,6 @@ func (g *Graph) IsNoOp(edge *OpEdge) bool { return len(edge.op) == 0 || g.noOpOpEdges[edge] } -// GetMetadataFromTarget returns the metadata for a given target node. -func (g *Graph) GetMetadataFromTarget(target *scpb.Target) scpb.ElementMetadata { - return scpb.ElementMetadata{ - TargetMetadata: scpb.TargetMetadata{ - SourceElementID: target.Metadata.SourceElementID, - SubWorkID: target.Metadata.SubWorkID, - StatementID: target.Metadata.StatementID, - }, - Statement: g.statements[target.Metadata.StatementID].Statement, - Username: g.authorization.Username, - AppName: g.authorization.AppName, - } -} - // Order returns the number of nodes in this graph. func (g *Graph) Order() int { n := 0 @@ -284,9 +260,9 @@ func (g *Graph) Order() int { // Validate returns an error if there's a cycle in the graph. func (g *Graph) Validate() (err error) { - marks := make(map[*scpb.Node]bool, g.Order()) - var visit func(n *scpb.Node) - visit = func(n *scpb.Node) { + marks := make(map[*screl.Node]bool, g.Order()) + var visit func(n *screl.Node) + visit = func(n *screl.Node) { if err != nil { return } @@ -305,7 +281,7 @@ func (g *Graph) Validate() (err error) { }) marks[n] = true } - _ = g.ForEachNode(func(n *scpb.Node) error { + _ = g.ForEachNode(func(n *screl.Node) error { visit(n) return nil }) @@ -313,7 +289,7 @@ func (g *Graph) Validate() (err error) { } // compareNodes compares two nodes in a graph. A nil nodes is the minimum value. -func (g *Graph) compareNodes(a, b *scpb.Node) (less, eq bool) { +func (g *Graph) compareNodes(a, b *screl.Node) (less, eq bool) { switch { case a == b: return false, true @@ -322,7 +298,7 @@ func (g *Graph) compareNodes(a, b *scpb.Node) (less, eq bool) { case b == nil: return false, false case a.Target == b.Target: - return a.Status < b.Status, a.Status == b.Status + return a.CurrentStatus < b.CurrentStatus, a.CurrentStatus == b.CurrentStatus default: aIdx, bIdx := g.targetIdxMap[a.Target], g.targetIdxMap[b.Target] return aIdx < bIdx, aIdx == bIdx diff --git a/pkg/sql/schemachanger/scgraph/graph_test.go b/pkg/sql/schemachanger/scplan/internal/scgraph/graph_test.go similarity index 75% rename from pkg/sql/schemachanger/scgraph/graph_test.go rename to pkg/sql/schemachanger/scplan/internal/scgraph/graph_test.go index 66c1b83da6d5..3708af56839c 100644 --- a/pkg/sql/schemachanger/scgraph/graph_test.go +++ b/pkg/sql/schemachanger/scplan/internal/scgraph/graph_test.go @@ -15,9 +15,9 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraph" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph" "github.com/stretchr/testify/require" ) @@ -80,51 +80,45 @@ func TestGraphRanks(t *testing.T) { t *testing.T, tc testCase, ) { // Setup a state based on if it is a add or drop. - state := scpb.State{ - Nodes: make([]*scpb.Node, 0, len(tc.addNode)), - } + ts := scpb.TargetState{Targets: make([]scpb.Target, len(tc.addNode))} + status := make([]scpb.Status, len(tc.addNode)) for idx := range tc.addNode { + ts.Targets[idx] = scpb.MakeTarget( + scpb.Status_PUBLIC, + &scpb.Table{ + TableID: descpb.ID(idx), + }, + nil, /* metadata */ + ) if tc.addNode[idx] { - state.Nodes = append(state.Nodes, &scpb.Node{ - Target: scpb.NewTarget( - scpb.Status_PUBLIC, - &scpb.Table{ - TableID: descpb.ID(idx), - }, - nil /* metadata */), - Status: scpb.Status_ABSENT, - }) + status[idx] = scpb.Status_ABSENT } else { - state.Nodes = append(state.Nodes, &scpb.Node{ - Target: scpb.NewTarget( - scpb.Status_PUBLIC, - &scpb.Table{ - TableID: descpb.ID(idx), - }, - nil /* metadata */), - Status: scpb.Status_PUBLIC, - }) + status[idx] = scpb.Status_PUBLIC } } // Setup the nodes first. - graph, err := scgraph.New(state) + graph, err := scgraph.New(scpb.CurrentState{TargetState: ts, Current: status}) require.NoError(t, err) // Setup op edges for all the nodes. for idx := range tc.addNode { if tc.addNode[idx] { - require.NoError(t, graph.AddOpEdges(state.Nodes[idx].Target, + require.NoError(t, graph.AddOpEdges( + &ts.Targets[idx], scpb.Status_ABSENT, scpb.Status_PUBLIC, true, scop.StatementPhase, - &scop.MakeColumnAbsent{})) + &scop.MakeColumnAbsent{}, + )) } else { - require.NoError(t, graph.AddOpEdges(state.Nodes[idx].Target, + require.NoError(t, graph.AddOpEdges( + &ts.Targets[idx], scpb.Status_PUBLIC, scpb.Status_ABSENT, true, scop.StatementPhase, - &scop.MakeColumnAbsent{})) + &scop.MakeColumnAbsent{}, + )) } } // Add the dep edges next. @@ -132,9 +126,9 @@ func TestGraphRanks(t *testing.T) { require.NoError(t, graph.AddDepEdge( fmt.Sprintf("%d to %d", edge.from, edge.to), scgraph.Precedence, - state.Nodes[edge.from].Target, + &ts.Targets[edge.from], scpb.Status_PUBLIC, - state.Nodes[edge.to].Target, + &ts.Targets[edge.to], scpb.Status_PUBLIC, )) } diff --git a/pkg/sql/schemachanger/scgraph/iteration.go b/pkg/sql/schemachanger/scplan/internal/scgraph/iteration.go similarity index 86% rename from pkg/sql/schemachanger/scgraph/iteration.go rename to pkg/sql/schemachanger/scplan/internal/scgraph/iteration.go index a1dcc08ab3c5..a68b975ef715 100644 --- a/pkg/sql/schemachanger/scgraph/iteration.go +++ b/pkg/sql/schemachanger/scplan/internal/scgraph/iteration.go @@ -12,12 +12,13 @@ package scgraph import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" "github.com/cockroachdb/cockroach/pkg/util/iterutil" ) // NodeIterator is used to iterate nodes. Return iterutil.StopIteration to // return early with no error. -type NodeIterator func(n *scpb.Node) error +type NodeIterator func(n *screl.Node) error // ForEachNode iterates the nodes in the graph. func (g *Graph) ForEachNode(it NodeIterator) error { @@ -59,12 +60,12 @@ type DepEdgeIterator func(de *DepEdge) error // ForEachDepEdgeFrom iterates the dep edges in the graph with the selected // source. -func (g *Graph) ForEachDepEdgeFrom(n *scpb.Node, it DepEdgeIterator) (err error) { +func (g *Graph) ForEachDepEdgeFrom(n *screl.Node, it DepEdgeIterator) (err error) { return g.depEdgesFrom.iterateSourceNode(n, it) } // ForEachDepEdgeTo iterates the dep edges in the graph with the selected // destination. -func (g *Graph) ForEachDepEdgeTo(n *scpb.Node, it DepEdgeIterator) (err error) { +func (g *Graph) ForEachDepEdgeTo(n *screl.Node, it DepEdgeIterator) (err error) { return g.depEdgesTo.iterateSourceNode(n, it) } diff --git a/pkg/sql/schemachanger/scgraphviz/BUILD.bazel b/pkg/sql/schemachanger/scplan/internal/scgraphviz/BUILD.bazel similarity index 71% rename from pkg/sql/schemachanger/scgraphviz/BUILD.bazel rename to pkg/sql/schemachanger/scplan/internal/scgraphviz/BUILD.bazel index fe77a5401072..2a9722e07d16 100644 --- a/pkg/sql/schemachanger/scgraphviz/BUILD.bazel +++ b/pkg/sql/schemachanger/scplan/internal/scgraphviz/BUILD.bazel @@ -3,13 +3,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "scgraphviz", srcs = ["graphviz.go"], - importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraphviz", + importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraphviz", visibility = ["//visibility:public"], deps = [ - "//pkg/sql/schemachanger/scgraph", "//pkg/sql/schemachanger/scop", "//pkg/sql/schemachanger/scpb", - "//pkg/sql/schemachanger/scplan", + "//pkg/sql/schemachanger/scplan/internal/scgraph", + "//pkg/sql/schemachanger/scplan/internal/scstage", + "//pkg/sql/schemachanger/screl", "//pkg/util/protoutil", "@com_github_cockroachdb_errors//:errors", "@com_github_emicklei_dot//:dot", diff --git a/pkg/sql/schemachanger/scgraphviz/graphviz.go b/pkg/sql/schemachanger/scplan/internal/scgraphviz/graphviz.go similarity index 73% rename from pkg/sql/schemachanger/scgraphviz/graphviz.go rename to pkg/sql/schemachanger/scplan/internal/scgraphviz/graphviz.go index bb0fa0807595..486d80e70378 100644 --- a/pkg/sql/schemachanger/scgraphviz/graphviz.go +++ b/pkg/sql/schemachanger/scplan/internal/scgraphviz/graphviz.go @@ -23,10 +23,11 @@ import ( "strconv" "strings" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraph" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scstage" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" "github.com/emicklei/dot" @@ -34,8 +35,8 @@ import ( ) // StagesURL returns a URL to a rendering of the stages of the Plan. -func StagesURL(p scplan.Plan) (string, error) { - gv, err := DrawStages(p) +func StagesURL(cs scpb.CurrentState, g *scgraph.Graph, stages []scstage.Stage) (string, error) { + gv, err := DrawStages(cs, g, stages) if err != nil { return "", err } @@ -44,8 +45,8 @@ func StagesURL(p scplan.Plan) (string, error) { // DependenciesURL returns a URL to a rendering of the graph used to build // the Plan. -func DependenciesURL(p scplan.Plan) (string, error) { - gv, err := DrawDependencies(p) +func DependenciesURL(cs scpb.CurrentState, g *scgraph.Graph) (string, error) { + gv, err := DrawDependencies(cs, g) if err != nil { return "", err } @@ -70,21 +71,23 @@ func buildURL(gv string) (string, error) { } // DecorateErrorWithPlanDetails adds plan graphviz URLs as error details. -func DecorateErrorWithPlanDetails(err error, p scplan.Plan) error { +func DecorateErrorWithPlanDetails( + err error, cs scpb.CurrentState, g *scgraph.Graph, stages []scstage.Stage, +) error { if err == nil { return nil } - if p.Stages != nil { - stagesURL, stagesErr := StagesURL(p) + if len(stages) > 0 { + stagesURL, stagesErr := StagesURL(cs, g, stages) if stagesErr != nil { return errors.CombineErrors(err, stagesErr) } err = errors.WithDetailf(err, "stages: %s", stagesURL) } - if p.Graph != nil { - dependenciesURL, dependenciesErr := DependenciesURL(p) + if g != nil { + dependenciesURL, dependenciesErr := DependenciesURL(cs, g) if dependenciesErr != nil { return errors.CombineErrors(err, dependenciesErr) } @@ -95,11 +98,11 @@ func DecorateErrorWithPlanDetails(err error, p scplan.Plan) error { } // DrawStages returns a graphviz string of the stages of the Plan. -func DrawStages(p scplan.Plan) (string, error) { - if p.Stages == nil { +func DrawStages(cs scpb.CurrentState, g *scgraph.Graph, stages []scstage.Stage) (string, error) { + if len(stages) == 0 { return "", errors.Errorf("missing stages in plan") } - gv, err := drawStages(p) + gv, err := drawStages(cs, g, stages) if err != nil { return "", err } @@ -107,78 +110,78 @@ func DrawStages(p scplan.Plan) (string, error) { } // DrawDependencies returns a graphviz string of graph used to build the Plan. -func DrawDependencies(p scplan.Plan) (string, error) { - if p.Graph == nil { +func DrawDependencies(cs scpb.CurrentState, g *scgraph.Graph) (string, error) { + if g == nil { return "", errors.Errorf("missing graph in plan") } - gv, err := drawDeps(p) + gv, err := drawDeps(cs, g) if err != nil { return "", err } return gv.String(), nil } -func drawStages(p scplan.Plan) (*dot.Graph, error) { - +func drawStages( + cs scpb.CurrentState, g *scgraph.Graph, stages []scstage.Stage, +) (*dot.Graph, error) { dg := dot.NewGraph() stagesSubgraph := dg.Subgraph("stages", dot.ClusterOption{}) targetsSubgraph := stagesSubgraph.Subgraph("targets", dot.ClusterOption{}) statementsSubgraph := stagesSubgraph.Subgraph("statements", dot.ClusterOption{}) - targetNodes := make(map[*scpb.Target]dot.Node, len(p.Initial.Nodes)) + targetNodes := make([]dot.Node, len(cs.TargetState.Targets)) // Add all the statements in their own section. // Note: Explains can only have one statement, so we aren't // going to bother adding arrows to them. - for idx, stmt := range p.Initial.Statements { - stmtNode := statementsSubgraph.Node(itoa(idx, len(p.Initial.Statements))) + for idx, stmt := range cs.TargetState.Statements { + stmtNode := statementsSubgraph.Node(itoa(idx, len(cs.TargetState.Statements))) stmtNode.Attr("label", htmlLabel(stmt)) stmtNode.Attr("fontsize", "9") stmtNode.Attr("shape", "none") } - for idx, n := range p.Initial.Nodes { - t := n.Target - tn := targetsSubgraph.Node(itoa(idx, len(p.Initial.Nodes))) + for idx, t := range cs.TargetState.Targets { + tn := targetsSubgraph.Node(itoa(idx, len(cs.TargetState.Targets))) tn.Attr("label", htmlLabel(t.Element())) tn.Attr("fontsize", "9") tn.Attr("shape", "none") - targetNodes[t] = tn + targetNodes[idx] = tn } // Want to draw an edge to the initial target statuses with some dots // or something. - curNodes := make([]dot.Node, len(p.Initial.Nodes)) - cur := p.Initial + curNodes := make([]dot.Node, len(cs.Current)) + cur := cs.Current curDummy := targetsSubgraph.Node("dummy") curDummy.Attr("shape", "point") curDummy.Attr("style", "invis") - for i, n := range p.Initial.Nodes { - label := targetStatusID(i, n.Status) + for i, status := range cs.Current { + label := targetStatusID(i, status) tsn := stagesSubgraph.Node(fmt.Sprintf("initial %d", i)) tsn.Attr("label", label) - tn := targetNodes[n.Target] + tn := targetNodes[i] e := tn.Edge(tsn) e.Dashed() - e.Label(fmt.Sprintf("to %s", n.Target.TargetStatus.String())) + e.Label(fmt.Sprintf("to %s", cs.TargetState.Targets[i].TargetStatus.String())) curNodes[i] = tsn } - for _, st := range p.Stages { + for _, st := range stages { stage := st.String() sg := stagesSubgraph.Subgraph(stage, dot.ClusterOption{}) next := st.After nextNodes := make([]dot.Node, len(curNodes)) m := make(map[scpb.Element][]scop.Op, len(curNodes)) for _, op := range st.EdgeOps { - if oe := p.Graph.GetOpEdgeFromOp(op); oe != nil { + if oe := g.GetOpEdgeFromOp(op); oe != nil { e := oe.To().Element() m[e] = append(m[e], op) } } - for i, n := range next.Nodes { + for i, status := range next { cst := sg.Node(fmt.Sprintf("%s: %d", stage, i)) - cst.Attr("label", targetStatusID(i, n.Status)) + cst.Attr("label", targetStatusID(i, status)) ge := curNodes[i].Edge(cst) - if n != cur.Nodes[i] { - if ops := m[n.Element()]; len(ops) > 0 { + if status != cur[i] { + if ops := m[cs.TargetState.Targets[i].Element()]; len(ops) > 0 { ge.Attr("label", htmlLabel(ops)) ge.Attr("fontsize", "9") } @@ -201,49 +204,50 @@ func drawStages(p scplan.Plan) (*dot.Graph, error) { return dg, nil } -func drawDeps(p scplan.Plan) (*dot.Graph, error) { +func drawDeps(cs scpb.CurrentState, g *scgraph.Graph) (*dot.Graph, error) { dg := dot.NewGraph() depsSubgraph := dg.Subgraph("deps", dot.ClusterOption{}) targetsSubgraph := depsSubgraph.Subgraph("targets", dot.ClusterOption{}) statementsSubgraph := depsSubgraph.Subgraph("statements", dot.ClusterOption{}) - targetNodes := make(map[*scpb.Target]dot.Node, len(p.Initial.Nodes)) + targetNodes := make([]dot.Node, len(cs.Current)) targetIdxMap := make(map[*scpb.Target]int) // Add all the statements in their own section. // Note: Explains can only have one statement, so we aren't // going to bother adding arrows to them. - for idx, stmt := range p.Initial.Statements { - stmtNode := statementsSubgraph.Node(itoa(idx, len(p.Initial.Statements))) + for idx, stmt := range cs.TargetState.Statements { + stmtNode := statementsSubgraph.Node(itoa(idx, len(cs.TargetState.Statements))) stmtNode.Attr("label", htmlLabel(stmt)) stmtNode.Attr("fontsize", "9") stmtNode.Attr("shape", "none") } - for idx, n := range p.Initial.Nodes { - t := n.Target - tn := targetsSubgraph.Node(itoa(idx, len(p.Initial.Nodes))) + targetStatusNodes := make([]map[scpb.Status]dot.Node, len(cs.Current)) + for idx, status := range cs.Current { + t := &cs.TargetState.Targets[idx] + tn := targetsSubgraph.Node(itoa(idx, len(cs.Current))) tn.Attr("label", htmlLabel(t.Element())) tn.Attr("fontsize", "9") tn.Attr("shape", "none") - targetNodes[t] = tn + targetNodes[idx] = tn targetIdxMap[t] = idx + targetStatusNodes[idx] = map[scpb.Status]dot.Node{status: tn} } - nodeNodes := make(map[*scpb.Node]dot.Node) - _ = p.Graph.ForEachNode(func(n *scpb.Node) error { - nodeNodes[n] = depsSubgraph.Node(targetStatusID(targetIdxMap[n.Target], n.Status)) + _ = g.ForEachNode(func(n *screl.Node) error { + tn := depsSubgraph.Node(targetStatusID(targetIdxMap[n.Target], n.CurrentStatus)) + targetStatusNodes[targetIdxMap[n.Target]][n.CurrentStatus] = tn return nil }) - - for _, n := range p.Initial.Nodes { - nn := nodeNodes[n] - tn := targetNodes[n.Target] + for idx, status := range cs.Current { + nn := targetStatusNodes[idx][status] + tn := targetNodes[idx] e := tn.Edge(nn) - e.Label(fmt.Sprintf("to %s", n.Target.TargetStatus.String())) + e.Label(fmt.Sprintf("to %s", cs.TargetState.Targets[idx].TargetStatus.String())) e.Dashed() } - _ = p.Graph.ForEachEdge(func(e scgraph.Edge) error { - from := nodeNodes[e.From()] - to := nodeNodes[e.To()] + _ = g.ForEachEdge(func(e scgraph.Edge) error { + from := targetStatusNodes[targetIdxMap[e.From().Target]][e.From().CurrentStatus] + to := targetStatusNodes[targetIdxMap[e.To().Target]][e.To().CurrentStatus] ge := from.Edge(to) switch e := e.(type) { case *scgraph.OpEdge: diff --git a/pkg/sql/schemachanger/scplan/scopt/BUILD.bazel b/pkg/sql/schemachanger/scplan/internal/scopt/BUILD.bazel similarity index 81% rename from pkg/sql/schemachanger/scplan/scopt/BUILD.bazel rename to pkg/sql/schemachanger/scplan/internal/scopt/BUILD.bazel index 77d07b482790..f1d280db9afa 100644 --- a/pkg/sql/schemachanger/scplan/scopt/BUILD.bazel +++ b/pkg/sql/schemachanger/scplan/internal/scopt/BUILD.bazel @@ -6,12 +6,12 @@ go_library( "registry.go", "rules.go", ], - importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/scopt", + importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scopt", visibility = ["//visibility:public"], deps = [ "//pkg/sql/schemachanger/rel", - "//pkg/sql/schemachanger/scgraph", "//pkg/sql/schemachanger/scpb", + "//pkg/sql/schemachanger/scplan/internal/scgraph", "//pkg/sql/schemachanger/screl", ], ) diff --git a/pkg/sql/schemachanger/scplan/scopt/registry.go b/pkg/sql/schemachanger/scplan/internal/scopt/registry.go similarity index 87% rename from pkg/sql/schemachanger/scplan/scopt/registry.go rename to pkg/sql/schemachanger/scplan/internal/scopt/registry.go index 4078fab36def..b631baa69b52 100644 --- a/pkg/sql/schemachanger/scplan/scopt/registry.go +++ b/pkg/sql/schemachanger/scplan/internal/scopt/registry.go @@ -12,8 +12,8 @@ package scopt import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraph" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" ) type deleteNodeOpt struct { @@ -46,10 +46,10 @@ func targetNodeVars(el rel.Var) (element, target, node rel.Var) { // original with these any transformations / optimizations applied. func OptimizeGraph(graph *scgraph.Graph) (*scgraph.Graph, error) { db := graph.Database() - nodesToMark := make(map[*scpb.Node]struct{}) + nodesToMark := make(map[*screl.Node]struct{}) for _, delete := range optRegistry.deleteQueries { err := delete.query.Iterate(db, func(r rel.Result) error { - nodesToMark[r.Var(delete.edgeFromVar).(*scpb.Node)] = struct{}{} + nodesToMark[r.Var(delete.edgeFromVar).(*screl.Node)] = struct{}{} return nil }) if err != nil { diff --git a/pkg/sql/schemachanger/scplan/scopt/rules.go b/pkg/sql/schemachanger/scplan/internal/scopt/rules.go similarity index 100% rename from pkg/sql/schemachanger/scplan/scopt/rules.go rename to pkg/sql/schemachanger/scplan/internal/scopt/rules.go diff --git a/pkg/sql/schemachanger/scplan/scstage/BUILD.bazel b/pkg/sql/schemachanger/scplan/internal/scstage/BUILD.bazel similarity index 85% rename from pkg/sql/schemachanger/scplan/scstage/BUILD.bazel rename to pkg/sql/schemachanger/scplan/internal/scstage/BUILD.bazel index 51a8e991396f..ff35f6be451b 100644 --- a/pkg/sql/schemachanger/scplan/scstage/BUILD.bazel +++ b/pkg/sql/schemachanger/scplan/internal/scstage/BUILD.bazel @@ -6,14 +6,14 @@ go_library( "build.go", "stage.go", ], - importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/scstage", + importpath = "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scstage", visibility = ["//visibility:public"], deps = [ "//pkg/jobs/jobspb", "//pkg/sql/catalog/descpb", - "//pkg/sql/schemachanger/scgraph", "//pkg/sql/schemachanger/scop", "//pkg/sql/schemachanger/scpb", + "//pkg/sql/schemachanger/scplan/internal/scgraph", "//pkg/sql/schemachanger/screl", "//pkg/util/iterutil", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/sql/schemachanger/scplan/scstage/build.go b/pkg/sql/schemachanger/scplan/internal/scstage/build.go similarity index 69% rename from pkg/sql/schemachanger/scplan/scstage/build.go rename to pkg/sql/schemachanger/scplan/internal/scstage/build.go index 1c854b7cba1d..8df3129e3405 100644 --- a/pkg/sql/schemachanger/scplan/scstage/build.go +++ b/pkg/sql/schemachanger/scplan/internal/scstage/build.go @@ -13,9 +13,9 @@ package scstage import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraph" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" "github.com/cockroachdb/cockroach/pkg/util/iterutil" "github.com/cockroachdb/errors" @@ -23,67 +23,84 @@ import ( // BuildStages builds the plan's stages for this and all subsequent phases. func BuildStages( - init scpb.State, phase scop.Phase, g *scgraph.Graph, scJobIDSupplier func() jobspb.JobID, + init scpb.CurrentState, phase scop.Phase, g *scgraph.Graph, scJobIDSupplier func() jobspb.JobID, ) []Stage { - newBuildState := func(isRevertibilityIgnored bool) *buildState { - b := buildState{ - g: g, - phase: phase, - state: shallowCopy(init), - fulfilled: make(map[*scpb.Node]struct{}, g.Order()), - scJobIDSupplier: scJobIDSupplier, - isRevertibilityIgnored: isRevertibilityIgnored, - } - for _, n := range init.Nodes { - b.fulfilled[n] = struct{}{} - } - return &b + c := buildContext{ + g: g, + scJobIDSupplier: scJobIDSupplier, + isRevertibilityIgnored: true, + targetState: init.TargetState, + startingStatuses: init.Current, + startingPhase: phase, } // Try building stages while ignoring revertibility constraints. // This is fine as long as there are no post-commit stages. - stages := buildStages(newBuildState(true /* isRevertibilityIgnored */)) + stages := buildStages(c) if n := len(stages); n > 0 && stages[n-1].Phase > scop.PreCommitPhase { - stages = buildStages(newBuildState(false /* isRevertibilityIgnored */)) + c.isRevertibilityIgnored = false + stages = buildStages(c) } return decorateStages(stages) } -func buildStages(b *buildState) (stages []Stage) { +// buildContext contains the global constants for building the stages. +// Only the BuildStages function mutates it, it's read-only everywhere else. +type buildContext struct { + g *scgraph.Graph + scJobIDSupplier func() jobspb.JobID + isRevertibilityIgnored bool + targetState scpb.TargetState + startingStatuses []scpb.Status + startingPhase scop.Phase +} + +func buildStages(bc buildContext) (stages []Stage) { + // Initialize the build state for this buildContext. + bs := buildState{ + incumbent: make([]scpb.Status, len(bc.startingStatuses)), + phase: bc.startingPhase, + fulfilled: make(map[*screl.Node]struct{}, bc.g.Order()), + } + for i, n := range bc.nodes(bc.startingStatuses) { + bs.incumbent[i] = n.CurrentStatus + bs.fulfilled[n] = struct{}{} + } // Build stages until reaching the terminal state. - for !b.isStateTerminal(b.state) { + for !bc.isStateTerminal(bs.incumbent) { // Generate a stage builder which can make progress. - sb := b.makeStageBuilder() + sb := bc.makeStageBuilder(bs) for !sb.canMakeProgress() { // When no further progress is possible, move to the next phase and try // again, until progress is possible. We haven't reached the terminal // state yet, so this is guaranteed (barring any horrible bugs). - if b.phase == scop.PreCommitPhase { + if bs.phase == scop.PreCommitPhase { // This is a special case. // We need to move to the post-commit phase, but this will require // creating a schema changer job, which in turn will require this // otherwise-empty pre-commit stage. break } - if b.phase == scop.LatestPhase { + if bs.phase == scop.LatestPhase { // This should never happen, we should always be able to make forward // progress because we haven't reached the terminal state yet. panic(errors.AssertionFailedf("unable to make progress")) } - b.phase++ - sb = b.makeStageBuilder() + bs.phase++ + sb = bc.makeStageBuilder(bs) } // Build the stage. - stages = append(stages, sb.build()) + stage := sb.build() + stages = append(stages, stage) // Update the build state with this stage's progress. for n := range sb.fulfilling { - b.fulfilled[n] = struct{}{} + bs.fulfilled[n] = struct{}{} } - b.state = sb.after() - switch b.phase { + bs.incumbent = stage.After + switch bs.phase { case scop.StatementPhase, scop.PreCommitPhase: // These phases can only have at most one stage each. - b.phase++ + bs.phase++ } } return stages @@ -92,20 +109,16 @@ func buildStages(b *buildState) (stages []Stage) { // buildState contains the global build state for building the stages. // Only the buildStages function mutates it, it's read-only everywhere else. type buildState struct { - g *scgraph.Graph - scJobIDSupplier func() jobspb.JobID - isRevertibilityIgnored bool - - state scpb.State + incumbent []scpb.Status phase scop.Phase - fulfilled map[*scpb.Node]struct{} + fulfilled map[*screl.Node]struct{} } // isStateTerminal returns true iff the state is terminal, according to the // graph. -func (b buildState) isStateTerminal(state scpb.State) bool { - for _, n := range state.Nodes { - if _, found := b.g.GetOpEdgeFrom(n); found { +func (bc buildContext) isStateTerminal(current []scpb.Status) bool { + for _, n := range bc.nodes(current) { + if _, found := bc.g.GetOpEdgeFrom(n); found { return false } } @@ -114,15 +127,15 @@ func (b buildState) isStateTerminal(state scpb.State) bool { // makeStageBuilder returns a stage builder with an operation type for which // progress can be made. Defaults to the mutation type if none make progress. -func (b buildState) makeStageBuilder() (sb stageBuilder) { +func (bc buildContext) makeStageBuilder(bs buildState) (sb stageBuilder) { opTypes := []scop.Type{scop.BackfillType, scop.ValidationType, scop.MutationType} - switch b.phase { + switch bs.phase { case scop.StatementPhase, scop.PreCommitPhase: // We don't allow expensive operations pre-commit. opTypes = []scop.Type{scop.MutationType} } for _, opType := range opTypes { - sb = b.makeStageBuilderForType(opType) + sb = bc.makeStageBuilderForType(bs, opType) if sb.canMakeProgress() { break } @@ -132,14 +145,15 @@ func (b buildState) makeStageBuilder() (sb stageBuilder) { // makeStageBuilderForType creates and populates a stage builder for the given // op type. -func (b buildState) makeStageBuilderForType(opType scop.Type) stageBuilder { +func (bc buildContext) makeStageBuilderForType(bs buildState, opType scop.Type) stageBuilder { sb := stageBuilder{ - bs: b, + bc: bc, + bs: bs, opType: opType, - current: make([]currentTargetState, len(b.state.Nodes)), - fulfilling: map[*scpb.Node]struct{}{}, + current: make([]currentTargetState, len(bc.targetState.Targets)), + fulfilling: map[*screl.Node]struct{}{}, } - for i, n := range b.state.Nodes { + for i, n := range bc.nodes(bs.incumbent) { t := sb.makeCurrentTargetState(n) sb.current[i] = t } @@ -167,15 +181,16 @@ func (b buildState) makeStageBuilderForType(opType scop.Type) stageBuilder { // stageBuilder contains the state for building one stage. type stageBuilder struct { + bc buildContext bs buildState opType scop.Type current []currentTargetState - fulfilling map[*scpb.Node]struct{} + fulfilling map[*screl.Node]struct{} opEdges []*scgraph.OpEdge } type currentTargetState struct { - n *scpb.Node + n *screl.Node e *scgraph.OpEdge // hasOpEdgeWithOps is true iff this stage already includes an op edge with @@ -183,15 +198,15 @@ type currentTargetState struct { hasOpEdgeWithOps bool } -func (sb stageBuilder) makeCurrentTargetState(n *scpb.Node) currentTargetState { - e, found := sb.bs.g.GetOpEdgeFrom(n) +func (sb stageBuilder) makeCurrentTargetState(n *screl.Node) currentTargetState { + e, found := sb.bc.g.GetOpEdgeFrom(n) if !found || !sb.isOutgoingOpEdgeAllowed(e) { return currentTargetState{n: n} } return currentTargetState{ n: n, e: e, - hasOpEdgeWithOps: !sb.bs.g.IsNoOp(e), + hasOpEdgeWithOps: !sb.bc.g.IsNoOp(e), } } @@ -214,7 +229,7 @@ func (sb stageBuilder) isOutgoingOpEdgeAllowed(e *scgraph.OpEdge) bool { if !e.IsPhaseSatisfied(sb.bs.phase) { return false } - if !sb.bs.isRevertibilityIgnored && sb.bs.phase == scop.PostCommitPhase && !e.Revertible() { + if !sb.bc.isRevertibilityIgnored && sb.bs.phase == scop.PostCommitPhase && !e.Revertible() { return false } return true @@ -241,8 +256,8 @@ func (sb stageBuilder) nextTargetState(t currentTargetState) currentTargetState return next } -func (sb stageBuilder) hasUnmetInboundDeps(n *scpb.Node) (ret bool) { - _ = sb.bs.g.ForEachDepEdgeTo(n, func(de *scgraph.DepEdge) error { +func (sb stageBuilder) hasUnmetInboundDeps(n *screl.Node) (ret bool) { + _ = sb.bc.g.ForEachDepEdgeTo(n, func(de *scgraph.DepEdge) error { if sb.isUnmetInboundDep(de) { ret = true return iterutil.StopIteration() @@ -281,16 +296,16 @@ func (sb *stageBuilder) isUnmetInboundDep(de *scgraph.DepEdge) bool { de.String(), de.Name())) } -func (sb stageBuilder) hasUnmeetableOutboundDeps(n *scpb.Node) (ret bool) { - candidates := make(map[*scpb.Node]int, len(sb.current)) +func (sb stageBuilder) hasUnmeetableOutboundDeps(n *screl.Node) (ret bool) { + candidates := make(map[*screl.Node]int, len(sb.current)) for i, t := range sb.current { if t.e != nil { candidates[t.e.To()] = i } } - visited := make(map[*scpb.Node]bool) - var visit func(n *scpb.Node) - visit = func(n *scpb.Node) { + visited := make(map[*screl.Node]bool) + var visit func(n *screl.Node) + visit = func(n *screl.Node) { if ret || visited[n] { return } @@ -309,7 +324,7 @@ func (sb stageBuilder) hasUnmeetableOutboundDeps(n *scpb.Node) (ret bool) { ret = true return } - _ = sb.bs.g.ForEachDepEdgeTo(n, func(de *scgraph.DepEdge) error { + _ = sb.bc.g.ForEachDepEdgeTo(n, func(de *scgraph.DepEdge) error { if ret { return iterutil.StopIteration() } @@ -326,7 +341,7 @@ func (sb stageBuilder) hasUnmeetableOutboundDeps(n *scpb.Node) (ret bool) { } return nil }) - _ = sb.bs.g.ForEachDepEdgeFrom(n, func(de *scgraph.DepEdge) error { + _ = sb.bc.g.ForEachDepEdgeFrom(n, func(de *scgraph.DepEdge) error { if ret { return iterutil.StopIteration() } @@ -344,13 +359,17 @@ func (sb stageBuilder) hasUnmeetableOutboundDeps(n *scpb.Node) (ret bool) { } func (sb stageBuilder) build() Stage { + after := make([]scpb.Status, len(sb.current)) + for i, t := range sb.current { + after[i] = t.n.CurrentStatus + } s := Stage{ - Before: sb.bs.state, - After: sb.after(), + Before: sb.bs.incumbent, + After: after, Phase: sb.bs.phase, } for _, e := range sb.opEdges { - if sb.bs.g.IsNoOp(e) { + if sb.bc.g.IsNoOp(e) { continue } s.EdgeOps = append(s.EdgeOps, e.Op()...) @@ -368,43 +387,36 @@ func (sb stageBuilder) build() Stage { // If this pre-commit stage is non-terminal, this means there will be at // least one post-commit stage, so we need to create a schema changer job // and update references for the affected descriptors. - if !sb.bs.isStateTerminal(s.After) { - s.ExtraOps = append(sb.addJobReferenceOps(s.After), sb.createSchemaChangeJobOp(s.After)) + if !sb.bc.isStateTerminal(after) { + s.ExtraOps = append(sb.bc.addJobReferenceOps(), sb.bc.createSchemaChangeJobOp(after)) } case scop.PostCommitPhase, scop.PostCommitNonRevertiblePhase: if sb.opType == scop.MutationType { - if sb.bs.isStateTerminal(s.After) { + if sb.bc.isStateTerminal(after) { // The terminal mutation stage needs to remove references to the schema // changer job in the affected descriptors. - s.ExtraOps = sb.removeJobReferenceOps(s.After) + s.ExtraOps = sb.bc.removeJobReferenceOps() } // Post-commit mutation stages all update the progress of the schema // changer job. - s.ExtraOps = append(s.ExtraOps, sb.updateJobProgressOp(s.After)) + s.ExtraOps = append(s.ExtraOps, sb.bc.updateJobProgressOp(after, s.Phase > scop.PostCommitPhase)) } } return s } -func (sb stageBuilder) after() scpb.State { - state := shallowCopy(sb.bs.state) - for i, t := range sb.current { - state.Nodes[i] = t.n - } - return state -} - -func (sb stageBuilder) createSchemaChangeJobOp(state scpb.State) scop.Op { +func (bc buildContext) createSchemaChangeJobOp(current []scpb.Status) scop.Op { return &scop.CreateDeclarativeSchemaChangerJob{ - JobID: sb.bs.scJobIDSupplier(), - State: shallowCopy(state), + JobID: bc.scJobIDSupplier(), + TargetState: bc.targetState, + Current: current, } } -func (sb stageBuilder) addJobReferenceOps(state scpb.State) []scop.Op { - jobID := sb.bs.scJobIDSupplier() +func (bc buildContext) addJobReferenceOps() []scop.Op { + jobID := bc.scJobIDSupplier() return generateOpsForJobIDs( - screl.GetDescIDs(state), + screl.GetDescIDs(bc.targetState), jobID, func(descID descpb.ID, id jobspb.JobID) scop.Op { return &scop.AddJobReference{DescriptorID: descID, JobID: jobID} @@ -412,18 +424,18 @@ func (sb stageBuilder) addJobReferenceOps(state scpb.State) []scop.Op { ) } -func (sb stageBuilder) updateJobProgressOp(state scpb.State) scop.Op { +func (bc buildContext) updateJobProgressOp(current []scpb.Status, isNonCancellable bool) scop.Op { return &scop.UpdateSchemaChangerJob{ - JobID: sb.bs.scJobIDSupplier(), - Statuses: state.Statuses(), - IsNonCancelable: sb.bs.phase >= scop.PostCommitNonRevertiblePhase, + JobID: bc.scJobIDSupplier(), + Current: current, + IsNonCancelable: isNonCancellable, } } -func (sb stageBuilder) removeJobReferenceOps(state scpb.State) []scop.Op { - jobID := sb.bs.scJobIDSupplier() +func (bc buildContext) removeJobReferenceOps() []scop.Op { + jobID := bc.scJobIDSupplier() return generateOpsForJobIDs( - screl.GetDescIDs(state), + screl.GetDescIDs(bc.targetState), jobID, func(descID descpb.ID, id jobspb.JobID) scop.Op { return &scop.RemoveJobReference{DescriptorID: descID, JobID: jobID} @@ -441,21 +453,18 @@ func generateOpsForJobIDs( return ops } -// shallowCopy creates a shallow copy of the passed state. Importantly, it -// retains copies to the same underlying nodes while allocating new backing -// slices. -func shallowCopy(cur scpb.State) scpb.State { - return scpb.State{ - Nodes: append( - make([]*scpb.Node, 0, len(cur.Nodes)), - cur.Nodes..., - ), - Statements: append( - make([]*scpb.Statement, 0, len(cur.Statements)), - cur.Statements..., - ), - Authorization: cur.Authorization, +func (bc buildContext) nodes(current []scpb.Status) []*screl.Node { + nodes := make([]*screl.Node, len(bc.targetState.Targets)) + for i, status := range current { + t := &bc.targetState.Targets[i] + n, ok := bc.g.GetNode(t, status) + if !ok { + panic(errors.AssertionFailedf("could not find node for element %s, target status %s, current status %s", + screl.ElementString(t.Element()), t.TargetStatus, status)) + } + nodes[i] = n } + return nodes } // decorateStages decorates stages with position in plan. diff --git a/pkg/sql/schemachanger/scplan/scstage/stage.go b/pkg/sql/schemachanger/scplan/internal/scstage/stage.go similarity index 66% rename from pkg/sql/schemachanger/scplan/scstage/stage.go rename to pkg/sql/schemachanger/scplan/internal/scstage/stage.go index 004536c0382f..884f02686df4 100644 --- a/pkg/sql/schemachanger/scplan/scstage/stage.go +++ b/pkg/sql/schemachanger/scplan/internal/scstage/stage.go @@ -13,22 +13,20 @@ package scstage import ( "fmt" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraph" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" + scgraph2 "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" "github.com/cockroachdb/errors" ) // A Stage is a sequence of ops to be executed "together" as part of a schema -// change. -// -// stages also contain the state before and after the execution of the ops in -// the stage, reflecting the fact that any set of ops can be thought of as a -// transition from one state to another. +// change. Stages also contain the statuses before and after the execution of +// the ops in the stage, reflecting the fact that any set of ops can be thought +// of as a transition from one state to another. type Stage struct { // Before and After are the states before and after the stage gets executed. - Before, After scpb.State + Before, After []scpb.Status // EdgeOps and ExtraOps are the collected ops in this stage: // - EdgeOps contains the ops originating from op-edges, that is to say, state @@ -74,15 +72,16 @@ func (s Stage) String() string { } // ValidateStages checks that the plan is valid. -func ValidateStages(stages []Stage, g *scgraph.Graph) error { +func ValidateStages(ts scpb.TargetState, stages []Stage, g *scgraph2.Graph) error { if len(stages) == 0 { return nil } // Check that each stage has internally-consistent states. for _, stage := range stages { - if err := validateInternalStageStates(stage); err != nil { - return errors.Wrapf(err, "%s", stage) + if na, nb := len(stage.After), len(stage.Before); na != nb { + return errors.Errorf("%s: Before state has %d nodes and After state has %d nodes", + stage, nb, na) } } @@ -97,11 +96,12 @@ func ValidateStages(stages []Stage, g *scgraph.Graph) error { } // Check that the final state is valid. - final := stages[len(stages)-1].After.Nodes - for i, node := range final { - if node.TargetStatus != node.Status { - return errors.Errorf("final status is %s instead of %s at index %d for adding %+v", - node.Status, node.TargetStatus, i, node.Element()) + final := stages[len(stages)-1].After + for i, actual := range final { + expected := ts.Targets[i].TargetStatus + if actual != expected { + return errors.Errorf("final status is %s instead of %s at index %d for adding %s", + actual, expected, i, screl.ElementString(ts.Targets[i].Element())) } } @@ -116,62 +116,31 @@ func ValidateStages(stages []Stage, g *scgraph.Graph) error { // Check stage internal subgraph consistency. for _, stage := range stages { - if err := validateStageSubgraph(stage, g); err != nil { + if err := validateStageSubgraph(ts, stage, g); err != nil { return errors.Wrapf(err, "%s", stage.String()) } } return nil } -func validateInternalStageStates(stage Stage) error { - before := stage.Before.Nodes - after := stage.After.Nodes - if na, nb := len(after), len(before); na != nb { - return errors.Errorf("Before state has %d nodes and After state has %d nodes", - nb, na) - } - for j := range before { - beforeTarget, afterTarget := before[j].Target, after[j].Target - if ea, eb := afterTarget.Element(), beforeTarget.Element(); ea != eb { - return errors.Errorf("target at index %d has Before element %+v and After element %+v", - j, eb, ea) - } - if ta, tb := afterTarget.TargetStatus, beforeTarget.TargetStatus; ta != tb { - return errors.Errorf("target at index %d has Before status %s and After status %s", - j, tb, ta) - } - } - return nil -} - func validateAdjacentStagesStates(previous, next Stage) error { - after := previous.After.Nodes - before := next.Before.Nodes - if na, nb := len(after), len(before); na != nb { + if na, nb := len(previous.After), len(next.Before); na != nb { return errors.Errorf("node count mismatch: %d != %d", na, nb) } - for j, beforeNode := range before { - afterNode := after[j] - if sa, sb := afterNode.Status, beforeNode.Status; sa != sb { + for j, before := range next.Before { + after := previous.After[j] + if before != after { return errors.Errorf("node status mismatch at index %d: %s != %s", - j, afterNode.Status.String(), beforeNode.Status.String()) - } - if ta, tb := afterNode.TargetStatus, beforeNode.TargetStatus; ta != tb { - return errors.Errorf("target status mismatch at index %d: %s != %s", - j, ta.String(), tb.String()) - } - if ea, eb := afterNode.Element(), beforeNode.Element(); ea != eb { - return errors.Errorf("target element mismatch at index %d: %+v != %+v", - j, ea, eb) + j, after.String(), before.String()) } } return nil } -func validateStageSubgraph(stage Stage, g *scgraph.Graph) error { +func validateStageSubgraph(ts scpb.TargetState, stage Stage, g *scgraph2.Graph) error { // Transform the ops in a non-repeating sequence of their original op edges. - var queue []*scgraph.OpEdge + var queue []*scgraph2.OpEdge for _, op := range stage.EdgeOps { oe := g.GetOpEdgeFromOp(op) if oe == nil { @@ -185,16 +154,25 @@ func validateStageSubgraph(stage Stage, g *scgraph.Graph) error { // Build the initial set of fulfilled nodes by traversing the graph // recursively and backwards. - fulfilled := map[*scpb.Node]bool{} - current := append([]*scpb.Node{}, stage.Before.Nodes...) + fulfilled := map[*screl.Node]bool{} + current := make([]*screl.Node, len(ts.Targets)) + for i, status := range stage.Before { + t := &ts.Targets[i] + n, ok := g.GetNode(t, status) + if !ok { + // This shouldn't happen. + return errors.Errorf("cannot find starting node for %s", screl.ElementString(t.Element())) + } + current[i] = n + } { - edgesTo := make(map[*scpb.Node][]scgraph.Edge, g.Order()) - _ = g.ForEachEdge(func(e scgraph.Edge) error { + edgesTo := make(map[*screl.Node][]scgraph2.Edge, g.Order()) + _ = g.ForEachEdge(func(e scgraph2.Edge) error { edgesTo[e.To()] = append(edgesTo[e.To()], e) return nil }) - var dfs func(n *scpb.Node) - dfs = func(n *scpb.Node) { + var dfs func(n *screl.Node) + dfs = func(n *screl.Node) { if _, found := fulfilled[n]; found { return } @@ -217,20 +195,20 @@ func validateStageSubgraph(stage Stage, g *scgraph.Graph) error { hasProgressed = false // Try to make progress for each target. for i, n := range current { - if n.Status == stage.After.Nodes[i].Status { + if n.CurrentStatus == stage.After[i] { // We're done for this target. continue } oe, ok := g.GetOpEdgeFrom(n) if !ok { // This shouldn't happen. - return errors.Errorf("cannot find op-edge path from %s to %s", - screl.NodeString(stage.Before.Nodes[i]), screl.NodeString(stage.After.Nodes[i])) + return errors.Errorf("cannot find op-edge path from %s to %s for %s", + stage.Before[i], stage.After[i], screl.ElementString(ts.Targets[i].Element())) } // Prevent making progress on this target if there are unmet dependencies. var hasUnmetDeps bool - if err := g.ForEachDepEdgeTo(oe.To(), func(de *scgraph.DepEdge) error { + if err := g.ForEachDepEdgeTo(oe.To(), func(de *scgraph2.DepEdge) error { hasUnmetDeps = hasUnmetDeps || !fulfilled[de.From()] return nil }); err != nil { @@ -255,9 +233,10 @@ func validateStageSubgraph(stage Stage, g *scgraph.Graph) error { } // When we stop making progress we expect to have reached the After state. for i, n := range current { - if n != stage.After.Nodes[i] { - return errors.Errorf("internal inconsistency, ended in non-terminal node %s after walking the graph", - screl.NodeString(stage.After.Nodes[i])) + if n.CurrentStatus != stage.After[i] { + return errors.Errorf("internal inconsistency, "+ + "ended in non-terminal status %s after walking the graph towards %s for %s", + n.CurrentStatus, stage.After[i], screl.ElementString(ts.Targets[i].Element())) } } diff --git a/pkg/sql/schemachanger/scplan/plan.go b/pkg/sql/schemachanger/scplan/plan.go index e4b10a6d8d33..c1ba5f4efa44 100644 --- a/pkg/sql/schemachanger/scplan/plan.go +++ b/pkg/sql/schemachanger/scplan/plan.go @@ -12,13 +12,14 @@ package scplan import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraph" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/deprules" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/opgen" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/scopt" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/scstage" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/deprules" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/opgen" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraphviz" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scopt" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scstage" "github.com/cockroachdb/errors" ) @@ -32,14 +33,23 @@ type Params struct { SchemaChangerJobIDSupplier func() jobspb.JobID } +// Exported internal types +type ( + // Graph is an exported alias of scgraph.Graph. + Graph = scgraph.Graph + + // Stage is an exported alias of scstage.Stage. + Stage = scstage.Stage +) + // A Plan is a schema change plan, primarily containing ops to be executed that // are partitioned into stages. type Plan struct { - Params Params - Initial scpb.State - Graph *scgraph.Graph - JobID jobspb.JobID - Stages []scstage.Stage + scpb.CurrentState + Params Params + Graph *scgraph.Graph + JobID jobspb.JobID + Stages []Stage } // StagesForCurrentPhase returns the stages in the execution phase specified in @@ -53,14 +63,29 @@ func (p Plan) StagesForCurrentPhase() []scstage.Stage { return p.Stages } +// DecorateErrorWithPlanDetails adds plan graphviz URLs as error details. +func (p Plan) DecorateErrorWithPlanDetails(err error) error { + return scgraphviz.DecorateErrorWithPlanDetails(err, p.CurrentState, p.Graph, p.Stages) +} + +// DependenciesURL returns a URL to render the dependency graph in the Plan. +func (p Plan) DependenciesURL() (string, error) { + return scgraphviz.DependenciesURL(p.CurrentState, p.Graph) +} + +// StagesURL returns a URL to render the stages in the Plan. +func (p Plan) StagesURL() (string, error) { + return scgraphviz.StagesURL(p.CurrentState, p.Graph, p.Stages) +} + // MakePlan generates a Plan for a particular phase of a schema change, given // the initial state for a set of targets. // Returns an error when planning fails. It is up to the caller to wrap this // error as an assertion failure and with useful debug information details. -func MakePlan(initial scpb.State, params Params) (p Plan, err error) { +func MakePlan(initial scpb.CurrentState, params Params) (p Plan, err error) { p = Plan{ - Initial: initial, - Params: params, + CurrentState: initial, + Params: params, } defer func() { if r := recover(); r != nil { @@ -68,24 +93,24 @@ func MakePlan(initial scpb.State, params Params) (p Plan, err error) { if !ok { rAsErr = errors.Errorf("panic during MakePlan: %v", r) } - err = errors.CombineErrors(err, rAsErr) + err = p.DecorateErrorWithPlanDetails(rAsErr) } }() - p.Graph = buildGraph(initial) + p.Graph = buildGraph(p.CurrentState) p.Stages = scstage.BuildStages(initial, params.ExecutionPhase, p.Graph, params.SchemaChangerJobIDSupplier) if n := len(p.Stages); n > 0 && p.Stages[n-1].Phase > scop.PreCommitPhase { // Only get the job ID if it's actually been assigned already. p.JobID = params.SchemaChangerJobIDSupplier() } - if err = scstage.ValidateStages(p.Stages, p.Graph); err != nil { + if err := scstage.ValidateStages(p.TargetState, p.Stages, p.Graph); err != nil { panic(errors.Wrapf(err, "invalid execution plan")) } return p, nil } -func buildGraph(initial scpb.State) *scgraph.Graph { - g, err := opgen.BuildGraph(initial) +func buildGraph(cs scpb.CurrentState) *scgraph.Graph { + g, err := opgen.BuildGraph(cs) if err != nil { panic(errors.Wrapf(err, "build graph op edges")) } diff --git a/pkg/sql/schemachanger/scplan/plan_test.go b/pkg/sql/schemachanger/scplan/plan_test.go index 5fc5bb05c573..dc1543316d78 100644 --- a/pkg/sql/schemachanger/scplan/plan_test.go +++ b/pkg/sql/schemachanger/scplan/plan_test.go @@ -25,12 +25,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scbuild" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scdeps/sctestutils" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scerrors" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraph" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraphviz" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/scstage" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraphviz" + "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scstage" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -94,7 +94,7 @@ func TestPlanDataDriven(t *testing.T) { sctestutils.WithBuilderDependenciesFromTestServer(s, func(deps scbuild.Dependencies) { stmts, err := parser.Parse(d.Input) require.NoError(t, err) - var state scpb.State + var state scpb.CurrentState for i := range stmts { state, err = scbuild.Build(ctx, deps, state, stmts[i].AST) require.NoError(t, err) @@ -105,7 +105,7 @@ func TestPlanDataDriven(t *testing.T) { }) if d.Cmd == "ops" { - return marshalOps(t, plan.Stages) + return marshalOps(t, plan.TargetState, plan.Stages) } return marshalDeps(t, &plan) case "unimplemented": @@ -117,7 +117,7 @@ func TestPlanDataDriven(t *testing.T) { stmt := stmts[0] alter, ok := stmt.AST.(*tree.AlterTable) require.Truef(t, ok, "not an ALTER TABLE statement: %s", stmt.SQL) - _, err = scbuild.Build(ctx, deps, scpb.State{}, alter) + _, err = scbuild.Build(ctx, deps, scpb.CurrentState{}, alter) require.Truef(t, scerrors.HasNotImplemented(err), "expected unimplemented, got %v", err) }) return "" @@ -149,9 +149,13 @@ func validatePlan(t *testing.T, plan *scplan.Plan) { } expected[j] = s } - e := marshalOps(t, expected) - truncatedPlan := sctestutils.MakePlan(t, stage.Before, stage.Phase) - a := marshalOps(t, truncatedPlan.Stages) + e := marshalOps(t, plan.TargetState, expected) + cs := scpb.CurrentState{ + TargetState: plan.TargetState, + Current: stage.Before, + } + truncatedPlan := sctestutils.MakePlan(t, cs, stage.Phase) + a := marshalOps(t, plan.TargetState, truncatedPlan.Stages) require.Equalf(t, e, a, "plan mismatch when re-planning %d stage(s) later", i) } } @@ -173,13 +177,13 @@ func indentText(input string, tab string) string { // marshalDeps marshals dependencies in scplan.Plan to a string. func marshalDeps(t *testing.T, plan *scplan.Plan) string { var sortedDeps []string - err := plan.Graph.ForEachNode(func(n *scpb.Node) error { + err := plan.Graph.ForEachNode(func(n *screl.Node) error { return plan.Graph.ForEachDepEdgeFrom(n, func(de *scgraph.DepEdge) error { var deps strings.Builder fmt.Fprintf(&deps, "- from: [%s, %s]\n", - screl.ElementString(de.From().Element()), de.From().Status) + screl.ElementString(de.From().Element()), de.From().CurrentStatus) fmt.Fprintf(&deps, " to: [%s, %s]\n", - screl.ElementString(de.To().Element()), de.To().Status) + screl.ElementString(de.To().Element()), de.To().CurrentStatus) fmt.Fprintf(&deps, " kind: %s\n", de.Kind()) fmt.Fprintf(&deps, " rule: %s\n", de.Name()) sortedDeps = append(sortedDeps, deps.String()) @@ -200,19 +204,22 @@ func marshalDeps(t *testing.T, plan *scplan.Plan) string { } // marshalOps marshals operations in scplan.Plan to a string. -func marshalOps(t *testing.T, stages []scstage.Stage) string { +func marshalOps(t *testing.T, ts scpb.TargetState, stages []scstage.Stage) string { var sb strings.Builder for _, stage := range stages { sb.WriteString(stage.String()) sb.WriteString("\n transitions:\n") var transitionsBuf strings.Builder - for i := range stage.Before.Nodes { - before, after := stage.Before.Nodes[i], stage.After.Nodes[i] + for i, before := range stage.Before { + after := stage.After[i] if before == after { continue } - _, _ = fmt.Fprintf(&transitionsBuf, "%s -> %s\n", - screl.NodeString(before), after.Status) + n := &screl.Node{ + Target: &ts.Targets[i], + CurrentStatus: before, + } + _, _ = fmt.Fprintf(&transitionsBuf, "%s -> %s\n", screl.NodeString(n), after) } sb.WriteString(indentText(transitionsBuf.String(), " ")) ops := stage.Ops() diff --git a/pkg/sql/schemachanger/scplan/testdata/alter_table b/pkg/sql/schemachanger/scplan/testdata/alter_table index 07768189c944..84e36dc82dd3 100644 --- a/pkg/sql/schemachanger/scplan/testdata/alter_table +++ b/pkg/sql/schemachanger/scplan/testdata/alter_table @@ -21,9 +21,10 @@ PreCommitPhase stage 1 of 1 with 5 MutationType ops PgAttributeNum: 2 TableID: 54 *scop.LogEvent - DescID: 54 + Authorization: + UserName: root Element: - column: + Column: columnId: 2 familyName: primary nullable: true @@ -33,12 +34,11 @@ PreCommitPhase stage 1 of 1 with 5 MutationType ops family: IntFamily oid: 20 width: 64 - Metadata: - Statement: ALTER TABLE defaultdb.foo ADD COLUMN j INT8 - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: ALTER TABLE ‹defaultdb›.public.‹foo› ADD COLUMN ‹j› INT8 + StatementTag: ALTER TABLE + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 8 *scop.MakeAddedIndexDeleteOnly IndexID: 2 @@ -56,11 +56,13 @@ PreCommitPhase stage 1 of 1 with 5 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: ALTER TABLE defaultdb.foo ADD COLUMN j INT8 + redactedstatement: ALTER TABLE ‹defaultdb›.public.‹foo› ADD COLUMN ‹j› INT8 + statementtag: ALTER TABLE PostCommitPhase stage 1 of 4 with 3 MutationType ops transitions: [[Column:{DescID: 54, ColumnID: 2}, PUBLIC], DELETE_ONLY] -> DELETE_AND_WRITE_ONLY @@ -168,9 +170,10 @@ PreCommitPhase stage 1 of 1 with 5 MutationType ops PgAttributeNum: 2 TableID: 54 *scop.LogEvent - DescID: 54 + Authorization: + UserName: root Element: - column: + Column: columnId: 2 defaultExpr: 123:::INT8 familyName: primary @@ -181,12 +184,11 @@ PreCommitPhase stage 1 of 1 with 5 MutationType ops family: IntFamily oid: 20 width: 64 - Metadata: - Statement: ALTER TABLE defaultdb.foo ADD COLUMN j INT8 DEFAULT 123 - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: ALTER TABLE ‹defaultdb›.public.‹foo› ADD COLUMN ‹j› INT8 DEFAULT ‹123› + StatementTag: ALTER TABLE + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 8 *scop.MakeAddedIndexDeleteOnly IndexID: 2 @@ -204,11 +206,14 @@ PreCommitPhase stage 1 of 1 with 5 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: ALTER TABLE defaultdb.foo ADD COLUMN j INT8 DEFAULT 123 + redactedstatement: ALTER TABLE ‹defaultdb›.public.‹foo› ADD COLUMN ‹j› INT8 DEFAULT + ‹123› + statementtag: ALTER TABLE PostCommitPhase stage 1 of 4 with 3 MutationType ops transitions: [[Column:{DescID: 54, ColumnID: 2}, PUBLIC], DELETE_ONLY] -> DELETE_AND_WRITE_ONLY @@ -318,9 +323,10 @@ PreCommitPhase stage 1 of 1 with 7 MutationType ops PgAttributeNum: 2 TableID: 54 *scop.LogEvent - DescID: 54 + Authorization: + UserName: root Element: - column: + Column: columnId: 2 defaultExpr: 123:::INT8 familyName: primary @@ -331,12 +337,11 @@ PreCommitPhase stage 1 of 1 with 7 MutationType ops family: IntFamily oid: 20 width: 64 - Metadata: - Statement: ALTER TABLE defaultdb.foo ADD COLUMN j INT8 DEFAULT 123 - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: ALTER TABLE ‹defaultdb›.public.‹foo› ADD COLUMN ‹j› INT8 DEFAULT ‹123› + StatementTag: ALTER TABLE + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 8 *scop.MakeAddedColumnDeleteOnly ColumnID: 3 @@ -350,9 +355,10 @@ PreCommitPhase stage 1 of 1 with 7 MutationType ops PgAttributeNum: 3 TableID: 54 *scop.LogEvent - DescID: 54 + Authorization: + UserName: root Element: - column: + Column: columnId: 3 defaultExpr: 456:::INT8 familyName: primary @@ -363,13 +369,12 @@ PreCommitPhase stage 1 of 1 with 7 MutationType ops family: IntFamily oid: 20 width: 64 - Metadata: - Statement: ALTER TABLE defaultdb.foo ADD COLUMN k INT8 DEFAULT 456 - TargetMetadata: - SourceElementID: 1 - StatementID: 1 - SubWorkID: 1 - Username: root + Statement: ALTER TABLE ‹defaultdb›.public.‹foo› ADD COLUMN ‹k› INT8 DEFAULT ‹456› + StatementTag: ALTER TABLE + TargetMetadata: + SourceElementID: 1 + StatementID: 1 + SubWorkID: 1 TargetStatus: 8 *scop.MakeAddedIndexDeleteOnly IndexID: 2 @@ -388,12 +393,18 @@ PreCommitPhase stage 1 of 1 with 7 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: ALTER TABLE defaultdb.foo ADD COLUMN j INT8 DEFAULT 123 + redactedstatement: ALTER TABLE ‹defaultdb›.public.‹foo› ADD COLUMN ‹j› INT8 DEFAULT + ‹123› + statementtag: ALTER TABLE - statement: ALTER TABLE defaultdb.foo ADD COLUMN k INT8 DEFAULT 456 + redactedstatement: ALTER TABLE ‹defaultdb›.public.‹foo› ADD COLUMN ‹k› INT8 DEFAULT + ‹456› + statementtag: ALTER TABLE PostCommitPhase stage 1 of 4 with 4 MutationType ops transitions: [[Column:{DescID: 54, ColumnID: 2}, PUBLIC], DELETE_ONLY] -> DELETE_AND_WRITE_ONLY @@ -514,9 +525,10 @@ PreCommitPhase stage 1 of 1 with 5 MutationType ops PgAttributeNum: 2 TableID: 54 *scop.LogEvent - DescID: 54 + Authorization: + UserName: root Element: - column: + Column: columnId: 2 computerExpr: i + 1:::INT8 familyName: primary @@ -527,12 +539,12 @@ PreCommitPhase stage 1 of 1 with 5 MutationType ops family: IntFamily oid: 20 width: 64 - Metadata: - Statement: ALTER TABLE defaultdb.foo ADD COLUMN a INT8 AS (i + 1) STORED - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: ALTER TABLE ‹defaultdb›.public.‹foo› ADD COLUMN ‹a› INT8 AS (‹i› + ‹1›) + STORED + StatementTag: ALTER TABLE + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 8 *scop.MakeAddedIndexDeleteOnly IndexID: 2 @@ -550,11 +562,14 @@ PreCommitPhase stage 1 of 1 with 5 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: ALTER TABLE defaultdb.foo ADD COLUMN a INT8 AS (i + 1) STORED + redactedstatement: ALTER TABLE ‹defaultdb›.public.‹foo› ADD COLUMN ‹a› INT8 AS + (‹i› + ‹1›) STORED + statementtag: ALTER TABLE PostCommitPhase stage 1 of 4 with 3 MutationType ops transitions: [[Column:{DescID: 54, ColumnID: 2}, PUBLIC], DELETE_ONLY] -> DELETE_AND_WRITE_ONLY @@ -669,9 +684,10 @@ PreCommitPhase stage 1 of 1 with 9 MutationType ops PgAttributeNum: 2 TableID: 54 *scop.LogEvent - DescID: 54 + Authorization: + UserName: root Element: - column: + Column: columnId: 2 familyName: primary nullable: true @@ -681,12 +697,11 @@ PreCommitPhase stage 1 of 1 with 9 MutationType ops family: IntFamily oid: 20 width: 64 - Metadata: - Statement: ALTER TABLE defaultdb.foo ADD COLUMN a INT8 - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: ALTER TABLE ‹defaultdb›.public.‹foo› ADD COLUMN ‹a› INT8 + StatementTag: ALTER TABLE + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 8 *scop.MakeAddedIndexDeleteOnly IndexID: 2 @@ -710,9 +725,10 @@ PreCommitPhase stage 1 of 1 with 9 MutationType ops PgAttributeNum: 3 TableID: 55 *scop.LogEvent - DescID: 55 + Authorization: + UserName: root Element: - column: + Column: columnId: 3 familyName: primary nullable: true @@ -722,13 +738,12 @@ PreCommitPhase stage 1 of 1 with 9 MutationType ops family: IntFamily oid: 20 width: 64 - Metadata: - Statement: ALTER TABLE defaultdb.bar ADD COLUMN b INT8 - TargetMetadata: - SourceElementID: 1 - StatementID: 1 - SubWorkID: 1 - Username: root + Statement: ALTER TABLE ‹defaultdb›.public.‹bar› ADD COLUMN ‹b› INT8 + StatementTag: ALTER TABLE + TargetMetadata: + SourceElementID: 1 + StatementID: 1 + SubWorkID: 1 TargetStatus: 8 *scop.MakeAddedIndexDeleteOnly IndexID: 2 @@ -750,12 +765,16 @@ PreCommitPhase stage 1 of 1 with 9 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: ALTER TABLE defaultdb.foo ADD COLUMN a INT8 + redactedstatement: ALTER TABLE ‹defaultdb›.public.‹foo› ADD COLUMN ‹a› INT8 + statementtag: ALTER TABLE - statement: ALTER TABLE defaultdb.bar ADD COLUMN b INT8 + redactedstatement: ALTER TABLE ‹defaultdb›.public.‹bar› ADD COLUMN ‹b› INT8 + statementtag: ALTER TABLE PostCommitPhase stage 1 of 4 with 5 MutationType ops transitions: [[Column:{DescID: 54, ColumnID: 2}, PUBLIC], DELETE_ONLY] -> DELETE_AND_WRITE_ONLY diff --git a/pkg/sql/schemachanger/scplan/testdata/create_index b/pkg/sql/schemachanger/scplan/testdata/create_index index f44f0f02aac3..35b6df92adf3 100644 --- a/pkg/sql/schemachanger/scplan/testdata/create_index +++ b/pkg/sql/schemachanger/scplan/testdata/create_index @@ -26,11 +26,14 @@ PreCommitPhase stage 1 of 1 with 3 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: CREATE INDEX id1 ON defaultdb.t1 (id, name) STORING (money) + redactedstatement: CREATE INDEX ‹id1› ON ‹defaultdb›.public.‹t1› (‹id›, ‹name›) + STORING (‹money›) + statementtag: CREATE INDEX PostCommitPhase stage 1 of 4 with 2 MutationType ops transitions: [[SecondaryIndex:{DescID: 54, IndexID: 2}, PUBLIC], DELETE_ONLY] -> DELETE_AND_WRITE_ONLY @@ -111,12 +114,15 @@ PreCommitPhase stage 1 of 1 with 3 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: CREATE INVERTED INDEX CONCURRENTLY id1 ON defaultdb.t1 (id, name) STORING (money) + redactedstatement: CREATE INVERTED INDEX CONCURRENTLY ‹id1› ON ‹defaultdb›.public.‹t1› + (‹id›, ‹name›) STORING (‹money›) + statementtag: CREATE INDEX PostCommitPhase stage 1 of 4 with 2 MutationType ops transitions: [[SecondaryIndex:{DescID: 54, IndexID: 2}, PUBLIC], DELETE_ONLY] -> DELETE_AND_WRITE_ONLY @@ -206,12 +212,15 @@ PreCommitPhase stage 1 of 1 with 4 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: CREATE INDEX id1 ON defaultdb.t1 (id, name) STORING (money) PARTITION BY LIST (id) (PARTITION p1 VALUES IN (1)) + redactedstatement: CREATE INDEX ‹id1› ON ‹defaultdb›.public.‹t1› (‹id›, ‹name›) + STORING (‹money›) PARTITION BY LIST (‹id›) (PARTITION ‹p1› VALUES IN (‹1›)) + statementtag: CREATE INDEX PostCommitPhase stage 1 of 4 with 2 MutationType ops transitions: [[SecondaryIndex:{DescID: 54, IndexID: 2}, PUBLIC], DELETE_ONLY] -> DELETE_AND_WRITE_ONLY diff --git a/pkg/sql/schemachanger/scplan/testdata/drop_database b/pkg/sql/schemachanger/scplan/testdata/drop_database index 32cb866468bb..0c586bcd7683 100644 --- a/pkg/sql/schemachanger/scplan/testdata/drop_database +++ b/pkg/sql/schemachanger/scplan/testdata/drop_database @@ -49,59 +49,137 @@ CREATE VIEW db1.sc1.v5 AS (SELECT 'a'::db1.sc1.typ::string AS k, n2, n1 from db1 ops DROP DATABASE db1 CASCADE ---- -StatementPhase stage 1 of 1 with 14 MutationType ops +StatementPhase stage 1 of 1 with 36 MutationType ops transitions: [[Sequence:{DescID: 57}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 57}, ABSENT], PUBLIC] -> ABSENT [[Table:{DescID: 60}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 60}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 60, ColumnID: 1}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 60, ColumnID: 1}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 60, ColumnID: 2}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 60, ColumnID: 2}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 60, ColumnID: 3}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 60, ColumnID: 3}, ABSENT], PUBLIC] -> ABSENT [[PrimaryIndex:{DescID: 60, IndexID: 1}, ABSENT], PUBLIC] -> VALIDATED + [[ConstraintComment:{DescID: 60, ConstraintType: PrimaryKey, Name: t1_pkey}, ABSENT], PUBLIC] -> ABSENT + [[IndexComment:{DescID: 60, IndexID: 1}, ABSENT], PUBLIC] -> ABSENT [[Schema:{DescID: 55}, ABSENT], PUBLIC] -> TXN_DROPPED + [[SchemaComment:{DescID: 55}, ABSENT], PUBLIC] -> ABSENT [[Sequence:{DescID: 58}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 58}, ABSENT], PUBLIC] -> ABSENT [[Table:{DescID: 59}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 59}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 59, ColumnID: 1}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 59, ColumnID: 1}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 59, ColumnID: 2}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 59, ColumnID: 2}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 59, ColumnID: 3}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 59, ColumnID: 3}, ABSENT], PUBLIC] -> ABSENT [[PrimaryIndex:{DescID: 59, IndexID: 1}, ABSENT], PUBLIC] -> VALIDATED + [[ConstraintComment:{DescID: 59, ConstraintType: PrimaryKey, Name: t1_pkey}, ABSENT], PUBLIC] -> ABSENT + [[IndexComment:{DescID: 59, IndexID: 1}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 61}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 61}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 62}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 62}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 63}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 63}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 64}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 64}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 67}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 67}, ABSENT], PUBLIC] -> ABSENT [[Type:{DescID: 65}, ABSENT], PUBLIC] -> TXN_DROPPED [[Type:{DescID: 66}, ABSENT], PUBLIC] -> TXN_DROPPED [[Schema:{DescID: 56}, ABSENT], PUBLIC] -> TXN_DROPPED + [[SchemaComment:{DescID: 56}, ABSENT], PUBLIC] -> ABSENT [[Database:{DescID: 54}, ABSENT], PUBLIC] -> TXN_DROPPED + [[DatabaseComment:{DescID: 54}, ABSENT], PUBLIC] -> ABSENT ops: *scop.MarkDescriptorAsDroppedSynthetically DescID: 57 + *scop.RemoveTableComment + TableID: 57 *scop.MarkDescriptorAsDroppedSynthetically DescID: 60 + *scop.RemoveTableComment + TableID: 60 + *scop.RemoveColumnComment + ColumnID: 1 + TableID: 60 + *scop.RemoveColumnComment + ColumnID: 2 + TableID: 60 + *scop.RemoveColumnComment + ColumnID: 3 + TableID: 60 + *scop.RemoveConstraintComment + ConstraintName: t1_pkey + ConstraintType: 4 + TableID: 60 + *scop.RemoveIndexComment + IndexID: 1 + TableID: 60 *scop.MarkDescriptorAsDroppedSynthetically DescID: 55 + *scop.RemoveSchemaComment + SchemaID: 55 *scop.MarkDescriptorAsDroppedSynthetically DescID: 58 + *scop.RemoveTableComment + TableID: 58 *scop.MarkDescriptorAsDroppedSynthetically DescID: 59 + *scop.RemoveTableComment + TableID: 59 + *scop.RemoveColumnComment + ColumnID: 1 + TableID: 59 + *scop.RemoveColumnComment + ColumnID: 2 + TableID: 59 + *scop.RemoveColumnComment + ColumnID: 3 + TableID: 59 + *scop.RemoveConstraintComment + ConstraintName: t1_pkey + ConstraintType: 4 + TableID: 59 + *scop.RemoveIndexComment + IndexID: 1 + TableID: 59 *scop.MarkDescriptorAsDroppedSynthetically DescID: 61 + *scop.RemoveTableComment + TableID: 61 *scop.MarkDescriptorAsDroppedSynthetically DescID: 62 + *scop.RemoveTableComment + TableID: 62 *scop.MarkDescriptorAsDroppedSynthetically DescID: 63 + *scop.RemoveTableComment + TableID: 63 *scop.MarkDescriptorAsDroppedSynthetically DescID: 64 + *scop.RemoveTableComment + TableID: 64 *scop.MarkDescriptorAsDroppedSynthetically DescID: 67 + *scop.RemoveTableComment + TableID: 67 *scop.MarkDescriptorAsDroppedSynthetically DescID: 65 *scop.MarkDescriptorAsDroppedSynthetically DescID: 66 *scop.MarkDescriptorAsDroppedSynthetically DescID: 56 + *scop.RemoveSchemaComment + SchemaID: 56 *scop.MarkDescriptorAsDroppedSynthetically DescID: 54 + *scop.RemoveDatabaseComment + DatabaseID: 54 PreCommitPhase stage 1 of 1 with 56 MutationType ops transitions: [[Sequence:{DescID: 57}, ABSENT], TXN_DROPPED] -> DROPPED @@ -337,11 +415,13 @@ PreCommitPhase stage 1 of 1 with 56 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: DROP DATABASE db1 CASCADE + redactedstatement: DROP DATABASE ‹db1› CASCADE + statementtag: DROP DATABASE PostCommitNonRevertiblePhase stage 1 of 1 with 46 MutationType ops transitions: [[Sequence:{DescID: 57}, ABSENT], DROPPED] -> ABSENT @@ -368,184 +448,185 @@ PostCommitNonRevertiblePhase stage 1 of 1 with 46 MutationType ops [[Database:{DescID: 54}, ABSENT], DROPPED] -> ABSENT ops: *scop.LogEvent - DescID: 57 + Authorization: + UserName: root Element: - sequence: + Sequence: sequenceId: 57 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 3 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 3 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 57 *scop.LogEvent - DescID: 60 + Authorization: + UserName: root Element: - table: + Table: tableId: 60 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 3 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 3 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 60 *scop.DrainDescriptorName TableID: 55 *scop.LogEvent - DescID: 55 + Authorization: + UserName: root Element: - schema: + Schema: dependentObjects: - 57 - 60 schemaId: 55 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 2 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 2 + SubWorkID: 1 TargetStatus: 1 *scop.DeleteDescriptor DescriptorID: 55 *scop.LogEvent - DescID: 58 + Authorization: + UserName: root Element: - sequence: + Sequence: sequenceId: 58 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 6 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 6 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 58 *scop.LogEvent - DescID: 59 + Authorization: + UserName: root Element: - table: + Table: tableId: 59 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 6 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 6 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 59 *scop.LogEvent - DescID: 61 + Authorization: + UserName: root Element: - view: + View: tableId: 61 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 8 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 8 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 61 *scop.LogEvent - DescID: 62 + Authorization: + UserName: root Element: - view: + View: tableId: 62 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 9 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 9 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 62 *scop.LogEvent - DescID: 63 + Authorization: + UserName: root Element: - view: + View: tableId: 63 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 10 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 10 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 63 *scop.LogEvent - DescID: 64 + Authorization: + UserName: root Element: - view: + View: tableId: 64 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 10 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 10 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 64 *scop.LogEvent - DescID: 67 + Authorization: + UserName: root Element: - view: + View: tableId: 67 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 12 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 12 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 67 *scop.LogEvent - DescID: 65 + Authorization: + UserName: root Element: - type: + Type: typeId: 65 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 6 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 6 + SubWorkID: 1 TargetStatus: 1 *scop.DeleteDescriptor DescriptorID: 65 *scop.LogEvent - DescID: 66 + Authorization: + UserName: root Element: - type: + Type: typeId: 66 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 6 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 6 + SubWorkID: 1 TargetStatus: 1 *scop.DeleteDescriptor DescriptorID: 66 *scop.DrainDescriptorName TableID: 56 *scop.LogEvent - DescID: 56 + Authorization: + UserName: root Element: - schema: + Schema: dependentObjects: - 58 - 59 @@ -557,31 +638,30 @@ PostCommitNonRevertiblePhase stage 1 of 1 with 46 MutationType ops - 66 - 67 schemaId: 56 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 2 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 2 + SubWorkID: 1 TargetStatus: 1 *scop.DeleteDescriptor DescriptorID: 56 *scop.DrainDescriptorName TableID: 54 *scop.LogEvent - DescID: 54 + Authorization: + UserName: root Element: - database: + Database: databaseId: 54 dependentObjects: - 55 - 56 - Metadata: - Statement: DROP DATABASE db1 CASCADE - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: DROP DATABASE ‹db1› CASCADE + StatementTag: DROP DATABASE + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForDatabase DatabaseID: 54 diff --git a/pkg/sql/schemachanger/scplan/testdata/drop_schema b/pkg/sql/schemachanger/scplan/testdata/drop_schema index 43e824b52b6c..f84351c9dc7b 100644 --- a/pkg/sql/schemachanger/scplan/testdata/drop_schema +++ b/pkg/sql/schemachanger/scplan/testdata/drop_schema @@ -393,43 +393,88 @@ DROP SCHEMA defaultdb.SC1 CASCADE ops DROP SCHEMA defaultdb.SC1 CASCADE ---- -StatementPhase stage 1 of 1 with 10 MutationType ops +StatementPhase stage 1 of 1 with 23 MutationType ops transitions: [[Sequence:{DescID: 55}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 55}, ABSENT], PUBLIC] -> ABSENT [[Table:{DescID: 56}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 56}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 56, ColumnID: 1}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 56, ColumnID: 1}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 56, ColumnID: 2}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 56, ColumnID: 2}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 56, ColumnID: 3}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 56, ColumnID: 3}, ABSENT], PUBLIC] -> ABSENT [[PrimaryIndex:{DescID: 56, IndexID: 1}, ABSENT], PUBLIC] -> VALIDATED + [[ConstraintComment:{DescID: 56, ConstraintType: PrimaryKey, Name: t1_pkey}, ABSENT], PUBLIC] -> ABSENT + [[IndexComment:{DescID: 56, IndexID: 1}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 57}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 57}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 58}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 58}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 59}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 59}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 60}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 60}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 63}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 63}, ABSENT], PUBLIC] -> ABSENT [[Type:{DescID: 61}, ABSENT], PUBLIC] -> TXN_DROPPED [[Type:{DescID: 62}, ABSENT], PUBLIC] -> TXN_DROPPED [[Schema:{DescID: 54}, ABSENT], PUBLIC] -> TXN_DROPPED + [[SchemaComment:{DescID: 54}, ABSENT], PUBLIC] -> ABSENT ops: *scop.MarkDescriptorAsDroppedSynthetically DescID: 55 + *scop.RemoveTableComment + TableID: 55 *scop.MarkDescriptorAsDroppedSynthetically DescID: 56 + *scop.RemoveTableComment + TableID: 56 + *scop.RemoveColumnComment + ColumnID: 1 + TableID: 56 + *scop.RemoveColumnComment + ColumnID: 2 + TableID: 56 + *scop.RemoveColumnComment + ColumnID: 3 + TableID: 56 + *scop.RemoveConstraintComment + ConstraintName: t1_pkey + ConstraintType: 4 + TableID: 56 + *scop.RemoveIndexComment + IndexID: 1 + TableID: 56 *scop.MarkDescriptorAsDroppedSynthetically DescID: 57 + *scop.RemoveTableComment + TableID: 57 *scop.MarkDescriptorAsDroppedSynthetically DescID: 58 + *scop.RemoveTableComment + TableID: 58 *scop.MarkDescriptorAsDroppedSynthetically DescID: 59 + *scop.RemoveTableComment + TableID: 59 *scop.MarkDescriptorAsDroppedSynthetically DescID: 60 + *scop.RemoveTableComment + TableID: 60 *scop.MarkDescriptorAsDroppedSynthetically DescID: 63 + *scop.RemoveTableComment + TableID: 63 *scop.MarkDescriptorAsDroppedSynthetically DescID: 61 *scop.MarkDescriptorAsDroppedSynthetically DescID: 62 *scop.MarkDescriptorAsDroppedSynthetically DescID: 54 + *scop.RemoveSchemaComment + SchemaID: 54 PreCommitPhase stage 1 of 1 with 43 MutationType ops transitions: [[Sequence:{DescID: 55}, ABSENT], TXN_DROPPED] -> DROPPED @@ -610,11 +655,13 @@ PreCommitPhase stage 1 of 1 with 43 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: DROP SCHEMA defaultdb.sc1 CASCADE + redactedstatement: DROP SCHEMA ‹defaultdb›.‹sc1› CASCADE + statementtag: DROP SCHEMA PostCommitNonRevertiblePhase stage 1 of 1 with 33 MutationType ops transitions: [[Sequence:{DescID: 55}, ABSENT], DROPPED] -> ABSENT @@ -633,137 +680,138 @@ PostCommitNonRevertiblePhase stage 1 of 1 with 33 MutationType ops [[Schema:{DescID: 54}, ABSENT], DROPPED] -> ABSENT ops: *scop.LogEvent - DescID: 55 + Authorization: + UserName: root Element: - sequence: + Sequence: sequenceId: 55 - Metadata: - Statement: DROP SCHEMA defaultdb.sc1 CASCADE - TargetMetadata: - SourceElementID: 2 - SubWorkID: 1 - Username: root + Statement: DROP SCHEMA ‹defaultdb›.‹sc1› CASCADE + StatementTag: DROP SCHEMA + TargetMetadata: + SourceElementID: 2 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 55 *scop.LogEvent - DescID: 56 + Authorization: + UserName: root Element: - table: + Table: tableId: 56 - Metadata: - Statement: DROP SCHEMA defaultdb.sc1 CASCADE - TargetMetadata: - SourceElementID: 2 - SubWorkID: 1 - Username: root + Statement: DROP SCHEMA ‹defaultdb›.‹sc1› CASCADE + StatementTag: DROP SCHEMA + TargetMetadata: + SourceElementID: 2 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 56 *scop.LogEvent - DescID: 57 + Authorization: + UserName: root Element: - view: + View: tableId: 57 - Metadata: - Statement: DROP SCHEMA defaultdb.sc1 CASCADE - TargetMetadata: - SourceElementID: 4 - SubWorkID: 1 - Username: root + Statement: DROP SCHEMA ‹defaultdb›.‹sc1› CASCADE + StatementTag: DROP SCHEMA + TargetMetadata: + SourceElementID: 4 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 57 *scop.LogEvent - DescID: 58 + Authorization: + UserName: root Element: - view: + View: tableId: 58 - Metadata: - Statement: DROP SCHEMA defaultdb.sc1 CASCADE - TargetMetadata: - SourceElementID: 5 - SubWorkID: 1 - Username: root + Statement: DROP SCHEMA ‹defaultdb›.‹sc1› CASCADE + StatementTag: DROP SCHEMA + TargetMetadata: + SourceElementID: 5 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 58 *scop.LogEvent - DescID: 59 + Authorization: + UserName: root Element: - view: + View: tableId: 59 - Metadata: - Statement: DROP SCHEMA defaultdb.sc1 CASCADE - TargetMetadata: - SourceElementID: 6 - SubWorkID: 1 - Username: root + Statement: DROP SCHEMA ‹defaultdb›.‹sc1› CASCADE + StatementTag: DROP SCHEMA + TargetMetadata: + SourceElementID: 6 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 59 *scop.LogEvent - DescID: 60 + Authorization: + UserName: root Element: - view: + View: tableId: 60 - Metadata: - Statement: DROP SCHEMA defaultdb.sc1 CASCADE - TargetMetadata: - SourceElementID: 6 - SubWorkID: 1 - Username: root + Statement: DROP SCHEMA ‹defaultdb›.‹sc1› CASCADE + StatementTag: DROP SCHEMA + TargetMetadata: + SourceElementID: 6 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 60 *scop.LogEvent - DescID: 63 + Authorization: + UserName: root Element: - view: + View: tableId: 63 - Metadata: - Statement: DROP SCHEMA defaultdb.sc1 CASCADE - TargetMetadata: - SourceElementID: 8 - SubWorkID: 1 - Username: root + Statement: DROP SCHEMA ‹defaultdb›.‹sc1› CASCADE + StatementTag: DROP SCHEMA + TargetMetadata: + SourceElementID: 8 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 63 *scop.LogEvent - DescID: 61 + Authorization: + UserName: root Element: - type: + Type: typeId: 61 - Metadata: - Statement: DROP SCHEMA defaultdb.sc1 CASCADE - TargetMetadata: - SourceElementID: 2 - SubWorkID: 1 - Username: root + Statement: DROP SCHEMA ‹defaultdb›.‹sc1› CASCADE + StatementTag: DROP SCHEMA + TargetMetadata: + SourceElementID: 2 + SubWorkID: 1 TargetStatus: 1 *scop.DeleteDescriptor DescriptorID: 61 *scop.LogEvent - DescID: 62 + Authorization: + UserName: root Element: - type: + Type: typeId: 62 - Metadata: - Statement: DROP SCHEMA defaultdb.sc1 CASCADE - TargetMetadata: - SourceElementID: 2 - SubWorkID: 1 - Username: root + Statement: DROP SCHEMA ‹defaultdb›.‹sc1› CASCADE + StatementTag: DROP SCHEMA + TargetMetadata: + SourceElementID: 2 + SubWorkID: 1 TargetStatus: 1 *scop.DeleteDescriptor DescriptorID: 62 *scop.DrainDescriptorName TableID: 54 *scop.LogEvent - DescID: 54 + Authorization: + UserName: root Element: - schema: + Schema: dependentObjects: - 55 - 56 @@ -775,12 +823,11 @@ PostCommitNonRevertiblePhase stage 1 of 1 with 33 MutationType ops - 62 - 63 schemaId: 54 - Metadata: - Statement: DROP SCHEMA defaultdb.sc1 CASCADE - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: DROP SCHEMA ‹defaultdb›.‹sc1› CASCADE + StatementTag: DROP SCHEMA + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 1 *scop.DeleteDescriptor DescriptorID: 54 diff --git a/pkg/sql/schemachanger/scplan/testdata/drop_sequence b/pkg/sql/schemachanger/scplan/testdata/drop_sequence index 45c5136ff791..cf618c30b2f0 100644 --- a/pkg/sql/schemachanger/scplan/testdata/drop_sequence +++ b/pkg/sql/schemachanger/scplan/testdata/drop_sequence @@ -5,12 +5,15 @@ CREATE SEQUENCE defaultdb.SQ1 ops DROP SEQUENCE defaultdb.SQ1 CASCADE ---- -StatementPhase stage 1 of 1 with 1 MutationType ops +StatementPhase stage 1 of 1 with 2 MutationType ops transitions: [[Sequence:{DescID: 54}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 54}, ABSENT], PUBLIC] -> ABSENT ops: *scop.MarkDescriptorAsDroppedSynthetically DescID: 54 + *scop.RemoveTableComment + TableID: 54 PreCommitPhase stage 1 of 1 with 4 MutationType ops transitions: [[Sequence:{DescID: 54}, ABSENT], TXN_DROPPED] -> DROPPED @@ -30,26 +33,28 @@ PreCommitPhase stage 1 of 1 with 4 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: DROP SEQUENCE defaultdb.sq1 CASCADE + redactedstatement: DROP SEQUENCE ‹defaultdb›.public.‹sq1› CASCADE + statementtag: DROP SEQUENCE PostCommitNonRevertiblePhase stage 1 of 1 with 4 MutationType ops transitions: [[Sequence:{DescID: 54}, ABSENT], DROPPED] -> ABSENT ops: *scop.LogEvent - DescID: 54 + Authorization: + UserName: root Element: - sequence: + Sequence: sequenceId: 54 - Metadata: - Statement: DROP SEQUENCE defaultdb.sq1 CASCADE - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: DROP SEQUENCE ‹defaultdb›.public.‹sq1› CASCADE + StatementTag: DROP SEQUENCE + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 54 @@ -71,12 +76,15 @@ CREATE TABLE defaultdb.blog_posts2 (id INT8 PRIMARY KEY, val INT8 DEFAULT nextva ops DROP SEQUENCE defaultdb.SQ1 CASCADE ---- -StatementPhase stage 1 of 1 with 1 MutationType ops +StatementPhase stage 1 of 1 with 2 MutationType ops transitions: [[Sequence:{DescID: 54}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 54}, ABSENT], PUBLIC] -> ABSENT ops: *scop.MarkDescriptorAsDroppedSynthetically DescID: 54 + *scop.RemoveTableComment + TableID: 54 PreCommitPhase stage 1 of 1 with 12 MutationType ops transitions: [[Sequence:{DescID: 54}, ABSENT], TXN_DROPPED] -> DROPPED @@ -122,26 +130,28 @@ PreCommitPhase stage 1 of 1 with 12 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: DROP SEQUENCE defaultdb.sq1 CASCADE + redactedstatement: DROP SEQUENCE ‹defaultdb›.public.‹sq1› CASCADE + statementtag: DROP SEQUENCE PostCommitNonRevertiblePhase stage 1 of 1 with 6 MutationType ops transitions: [[Sequence:{DescID: 54}, ABSENT], DROPPED] -> ABSENT ops: *scop.LogEvent - DescID: 54 + Authorization: + UserName: root Element: - sequence: + Sequence: sequenceId: 54 - Metadata: - Statement: DROP SEQUENCE defaultdb.sq1 CASCADE - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: DROP SEQUENCE ‹defaultdb›.public.‹sq1› CASCADE + StatementTag: DROP SEQUENCE + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 54 diff --git a/pkg/sql/schemachanger/scplan/testdata/drop_table b/pkg/sql/schemachanger/scplan/testdata/drop_table index b8057c545b59..e7fb48d4cf43 100644 --- a/pkg/sql/schemachanger/scplan/testdata/drop_table +++ b/pkg/sql/schemachanger/scplan/testdata/drop_table @@ -38,24 +38,72 @@ CREATE VIEW v1 AS (SELECT customer_id, carrier FROM defaultdb.shipments) ops DROP TABLE defaultdb.shipments CASCADE; ---- -StatementPhase stage 1 of 1 with 3 MutationType ops +StatementPhase stage 1 of 1 with 15 MutationType ops transitions: [[Table:{DescID: 57}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 57}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 57, ColumnID: 1}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 57, ColumnID: 1}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 57, ColumnID: 2}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 57, ColumnID: 2}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 57, ColumnID: 3}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 57, ColumnID: 3}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 57, ColumnID: 4}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 57, ColumnID: 4}, ABSENT], PUBLIC] -> ABSENT [[Column:{DescID: 57, ColumnID: 5}, ABSENT], PUBLIC] -> DELETE_AND_WRITE_ONLY + [[ColumnComment:{DescID: 57, ColumnID: 5}, ABSENT], PUBLIC] -> ABSENT [[PrimaryIndex:{DescID: 57, IndexID: 1}, ABSENT], PUBLIC] -> VALIDATED + [[ConstraintComment:{DescID: 57, ConstraintType: PrimaryKey, Name: shipments_pkey}, ABSENT], PUBLIC] -> ABSENT + [[IndexComment:{DescID: 57, IndexID: 1}, ABSENT], PUBLIC] -> ABSENT + [[ConstraintComment:{DescID: 57, ConstraintType: FK, Name: fk_customers}, ABSENT], PUBLIC] -> ABSENT + [[ConstraintComment:{DescID: 57, ConstraintType: FK, Name: fk_orders}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 59}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 59}, ABSENT], PUBLIC] -> ABSENT [[Sequence:{DescID: 58}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 58}, ABSENT], PUBLIC] -> ABSENT ops: *scop.MarkDescriptorAsDroppedSynthetically DescID: 57 + *scop.RemoveTableComment + TableID: 57 + *scop.RemoveColumnComment + ColumnID: 1 + TableID: 57 + *scop.RemoveColumnComment + ColumnID: 2 + TableID: 57 + *scop.RemoveColumnComment + ColumnID: 3 + TableID: 57 + *scop.RemoveColumnComment + ColumnID: 4 + TableID: 57 + *scop.RemoveColumnComment + ColumnID: 5 + TableID: 57 + *scop.RemoveConstraintComment + ConstraintName: shipments_pkey + ConstraintType: 4 + TableID: 57 + *scop.RemoveIndexComment + IndexID: 1 + TableID: 57 + *scop.RemoveConstraintComment + ConstraintName: fk_customers + ConstraintType: 3 + TableID: 57 + *scop.RemoveConstraintComment + ConstraintName: fk_orders + ConstraintType: 3 + TableID: 57 *scop.MarkDescriptorAsDroppedSynthetically DescID: 59 + *scop.RemoveTableComment + TableID: 59 *scop.MarkDescriptorAsDroppedSynthetically DescID: 58 + *scop.RemoveTableComment + TableID: 58 PreCommitPhase stage 1 of 1 with 22 MutationType ops transitions: [[Table:{DescID: 57}, ABSENT], TXN_DROPPED] -> DROPPED @@ -151,11 +199,13 @@ PreCommitPhase stage 1 of 1 with 22 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: DROP TABLE defaultdb.shipments CASCADE + redactedstatement: DROP TABLE ‹defaultdb›.public.‹shipments› CASCADE + statementtag: DROP TABLE PostCommitNonRevertiblePhase stage 1 of 1 with 13 MutationType ops transitions: [[Table:{DescID: 57}, ABSENT], DROPPED] -> ABSENT @@ -169,44 +219,44 @@ PostCommitNonRevertiblePhase stage 1 of 1 with 13 MutationType ops [[Sequence:{DescID: 58}, ABSENT], DROPPED] -> ABSENT ops: *scop.LogEvent - DescID: 57 + Authorization: + UserName: root Element: - table: + Table: tableId: 57 - Metadata: - Statement: DROP TABLE defaultdb.shipments CASCADE - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: DROP TABLE ‹defaultdb›.public.‹shipments› CASCADE + StatementTag: DROP TABLE + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 57 *scop.LogEvent - DescID: 59 + Authorization: + UserName: root Element: - view: + View: tableId: 59 - Metadata: - Statement: DROP TABLE defaultdb.shipments CASCADE - TargetMetadata: - SourceElementID: 3 - SubWorkID: 1 - Username: root + Statement: DROP TABLE ‹defaultdb›.public.‹shipments› CASCADE + StatementTag: DROP TABLE + TargetMetadata: + SourceElementID: 3 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 59 *scop.LogEvent - DescID: 58 + Authorization: + UserName: root Element: - sequence: + Sequence: sequenceId: 58 - Metadata: - Statement: DROP TABLE defaultdb.shipments CASCADE - TargetMetadata: - SourceElementID: 2 - SubWorkID: 1 - Username: root + Statement: DROP TABLE ‹defaultdb›.public.‹shipments› CASCADE + StatementTag: DROP TABLE + TargetMetadata: + SourceElementID: 2 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 58 diff --git a/pkg/sql/schemachanger/scplan/testdata/drop_type b/pkg/sql/schemachanger/scplan/testdata/drop_type index 86417d57e7db..e806e60ab117 100644 --- a/pkg/sql/schemachanger/scplan/testdata/drop_type +++ b/pkg/sql/schemachanger/scplan/testdata/drop_type @@ -37,41 +37,43 @@ PreCommitPhase stage 1 of 1 with 7 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: DROP TYPE defaultdb.typ + redactedstatement: DROP TYPE ‹defaultdb›.‹public›.‹typ› + statementtag: DROP TYPE PostCommitNonRevertiblePhase stage 1 of 1 with 7 MutationType ops transitions: [[Type:{DescID: 54}, ABSENT], DROPPED] -> ABSENT [[Type:{DescID: 55}, ABSENT], DROPPED] -> ABSENT ops: *scop.LogEvent - DescID: 54 + Authorization: + UserName: root Element: - type: + Type: typeId: 54 - Metadata: - Statement: DROP TYPE defaultdb.typ - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: DROP TYPE ‹defaultdb›.‹public›.‹typ› + StatementTag: DROP TYPE + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 1 *scop.DeleteDescriptor DescriptorID: 54 *scop.LogEvent - DescID: 55 + Authorization: + UserName: root Element: - type: + Type: typeId: 55 - Metadata: - Statement: DROP TYPE defaultdb.typ - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: DROP TYPE ‹defaultdb›.‹public›.‹typ› + StatementTag: DROP TYPE + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 1 *scop.DeleteDescriptor DescriptorID: 55 diff --git a/pkg/sql/schemachanger/scplan/testdata/drop_view b/pkg/sql/schemachanger/scplan/testdata/drop_view index 0b4d1e3e320e..75a723d3a3f0 100644 --- a/pkg/sql/schemachanger/scplan/testdata/drop_view +++ b/pkg/sql/schemachanger/scplan/testdata/drop_view @@ -9,12 +9,15 @@ CREATE VIEW defaultdb.v1 AS (SELECT name FROM defaultdb.t1) ops DROP VIEW defaultdb.v1 ---- -StatementPhase stage 1 of 1 with 1 MutationType ops +StatementPhase stage 1 of 1 with 2 MutationType ops transitions: [[View:{DescID: 55}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 55}, ABSENT], PUBLIC] -> ABSENT ops: *scop.MarkDescriptorAsDroppedSynthetically DescID: 55 + *scop.RemoveTableComment + TableID: 55 PreCommitPhase stage 1 of 1 with 6 MutationType ops transitions: [[View:{DescID: 55}, ABSENT], TXN_DROPPED] -> DROPPED @@ -41,26 +44,28 @@ PreCommitPhase stage 1 of 1 with 6 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: DROP VIEW defaultdb.v1 + redactedstatement: DROP VIEW ‹defaultdb›.public.‹v1› + statementtag: DROP VIEW PostCommitNonRevertiblePhase stage 1 of 1 with 5 MutationType ops transitions: [[View:{DescID: 55}, ABSENT], DROPPED] -> ABSENT ops: *scop.LogEvent - DescID: 55 + Authorization: + UserName: root Element: - view: + View: tableId: 55 - Metadata: - Statement: DROP VIEW defaultdb.v1 - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: DROP VIEW ‹defaultdb›.public.‹v1› + StatementTag: DROP VIEW + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 55 @@ -133,24 +138,39 @@ CREATE VIEW v5 AS (SELECT 'a'::defaultdb.typ::string AS k, n2, n1 from defaultdb ops DROP VIEW defaultdb.v1 CASCADE ---- -StatementPhase stage 1 of 1 with 5 MutationType ops +StatementPhase stage 1 of 1 with 10 MutationType ops transitions: [[View:{DescID: 55}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 55}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 56}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 56}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 57}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 57}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 58}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 58}, ABSENT], PUBLIC] -> ABSENT [[View:{DescID: 61}, ABSENT], PUBLIC] -> TXN_DROPPED + [[TableComment:{DescID: 61}, ABSENT], PUBLIC] -> ABSENT ops: *scop.MarkDescriptorAsDroppedSynthetically DescID: 55 + *scop.RemoveTableComment + TableID: 55 *scop.MarkDescriptorAsDroppedSynthetically DescID: 56 + *scop.RemoveTableComment + TableID: 56 *scop.MarkDescriptorAsDroppedSynthetically DescID: 57 + *scop.RemoveTableComment + TableID: 57 *scop.MarkDescriptorAsDroppedSynthetically DescID: 58 + *scop.RemoveTableComment + TableID: 58 *scop.MarkDescriptorAsDroppedSynthetically DescID: 61 + *scop.RemoveTableComment + TableID: 61 PreCommitPhase stage 1 of 1 with 25 MutationType ops transitions: [[View:{DescID: 55}, ABSENT], TXN_DROPPED] -> DROPPED @@ -261,11 +281,13 @@ PreCommitPhase stage 1 of 1 with 25 MutationType ops JobID: 1 *scop.CreateDeclarativeSchemaChangerJob JobID: 1 - State: + TargetState: Authorization: - Username: root + UserName: root Statements: - statement: DROP VIEW defaultdb.v1 CASCADE + redactedstatement: DROP VIEW ‹defaultdb›.public.‹v1› CASCADE + statementtag: DROP VIEW PostCommitNonRevertiblePhase stage 1 of 1 with 17 MutationType ops transitions: [[View:{DescID: 55}, ABSENT], DROPPED] -> ABSENT @@ -275,72 +297,72 @@ PostCommitNonRevertiblePhase stage 1 of 1 with 17 MutationType ops [[View:{DescID: 61}, ABSENT], DROPPED] -> ABSENT ops: *scop.LogEvent - DescID: 55 + Authorization: + UserName: root Element: - view: + View: tableId: 55 - Metadata: - Statement: DROP VIEW defaultdb.v1 CASCADE - TargetMetadata: - SourceElementID: 1 - SubWorkID: 1 - Username: root + Statement: DROP VIEW ‹defaultdb›.public.‹v1› CASCADE + StatementTag: DROP VIEW + TargetMetadata: + SourceElementID: 1 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 55 *scop.LogEvent - DescID: 56 + Authorization: + UserName: root Element: - view: + View: tableId: 56 - Metadata: - Statement: DROP VIEW defaultdb.v1 CASCADE - TargetMetadata: - SourceElementID: 2 - SubWorkID: 1 - Username: root + Statement: DROP VIEW ‹defaultdb›.public.‹v1› CASCADE + StatementTag: DROP VIEW + TargetMetadata: + SourceElementID: 2 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 56 *scop.LogEvent - DescID: 57 + Authorization: + UserName: root Element: - view: + View: tableId: 57 - Metadata: - Statement: DROP VIEW defaultdb.v1 CASCADE - TargetMetadata: - SourceElementID: 3 - SubWorkID: 1 - Username: root + Statement: DROP VIEW ‹defaultdb›.public.‹v1› CASCADE + StatementTag: DROP VIEW + TargetMetadata: + SourceElementID: 3 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 57 *scop.LogEvent - DescID: 58 + Authorization: + UserName: root Element: - view: + View: tableId: 58 - Metadata: - Statement: DROP VIEW defaultdb.v1 CASCADE - TargetMetadata: - SourceElementID: 3 - SubWorkID: 1 - Username: root + Statement: DROP VIEW ‹defaultdb›.public.‹v1› CASCADE + StatementTag: DROP VIEW + TargetMetadata: + SourceElementID: 3 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 58 *scop.LogEvent - DescID: 61 + Authorization: + UserName: root Element: - view: + View: tableId: 61 - Metadata: - Statement: DROP VIEW defaultdb.v1 CASCADE - TargetMetadata: - SourceElementID: 5 - SubWorkID: 1 - Username: root + Statement: DROP VIEW ‹defaultdb›.public.‹v1› CASCADE + StatementTag: DROP VIEW + TargetMetadata: + SourceElementID: 5 + SubWorkID: 1 TargetStatus: 1 *scop.CreateGcJobForTable TableID: 61 diff --git a/pkg/sql/schemachanger/screl/BUILD.bazel b/pkg/sql/schemachanger/screl/BUILD.bazel index 2e2715e43a17..34dafcfffb3a 100644 --- a/pkg/sql/schemachanger/screl/BUILD.bazel +++ b/pkg/sql/schemachanger/screl/BUILD.bazel @@ -8,6 +8,7 @@ go_library( "compare.go", "doc.go", "format.go", + "node.go", "scalars.go", ":gen-attr-stringer", # keep ], diff --git a/pkg/sql/schemachanger/screl/attr.go b/pkg/sql/schemachanger/screl/attr.go index b3887e1bd89e..cb7d7a37937e 100644 --- a/pkg/sql/schemachanger/screl/attr.go +++ b/pkg/sql/schemachanger/screl/attr.go @@ -56,10 +56,10 @@ const ( Name // IndexID is the index ID to which this element corresponds. IndexID - // TargetStatus is the Status of a Target. + // TargetStatus is the target status of an element. TargetStatus - // Status is the Status of a Node. - Status + // CurrentStatus is the current status of an element. + CurrentStatus // Element references an element. Element // Target is the reference from a node to a target. @@ -77,13 +77,11 @@ var t = reflect.TypeOf // Schema is the schema exported by this package covering the elements of scpb. var Schema = rel.MustSchema("screl", rel.AttrType(Element, t((*protoutil.Message)(nil)).Elem()), - rel.EntityMapping( - t((*scpb.Node)(nil)), - rel.EntityAttr(Status, "Status"), + rel.EntityMapping(t((*Node)(nil)), + rel.EntityAttr(CurrentStatus, "CurrentStatus"), rel.EntityAttr(Target, "Target"), ), - rel.EntityMapping( - t((*scpb.Target)(nil)), + rel.EntityMapping(t((*scpb.Target)(nil)), rel.EntityAttr(TargetStatus, "TargetStatus"), rel.EntityAttr(Element, elementProtoElementSelectors...), ), @@ -223,6 +221,28 @@ var Schema = rel.MustSchema("screl", rel.EntityAttr(DescID, "DatabaseID"), rel.EntityAttr(ReferencedDescID, "SchemaID"), ), + rel.EntityMapping(t((*scpb.TableComment)(nil)), + rel.EntityAttr(DescID, "TableID"), + ), + rel.EntityMapping(t((*scpb.DatabaseComment)(nil)), + rel.EntityAttr(DescID, "DatabaseID"), + ), + rel.EntityMapping(t((*scpb.SchemaComment)(nil)), + rel.EntityAttr(DescID, "SchemaID"), + ), + rel.EntityMapping(t((*scpb.ColumnComment)(nil)), + rel.EntityAttr(DescID, "TableID"), + rel.EntityAttr(ColumnID, "ColumnID"), + ), + rel.EntityMapping(t((*scpb.IndexComment)(nil)), + rel.EntityAttr(DescID, "TableID"), + rel.EntityAttr(IndexID, "IndexID"), + ), + rel.EntityMapping(t((*scpb.ConstraintComment)(nil)), + rel.EntityAttr(DescID, "TableID"), + rel.EntityAttr(Name, "ConstraintName"), + rel.EntityAttr(ConstraintType, "ConstraintType"), + ), ) // JoinTargetNode generates a clause that joins the target and node vars @@ -231,7 +251,7 @@ func JoinTargetNode(element, target, node rel.Var) rel.Clause { return rel.And( target.Type((*scpb.Target)(nil)), target.AttrEqVar(Element, element), - node.Type((*scpb.Node)(nil)), + node.Type((*Node)(nil)), node.AttrEqVar(Target, target), ) } diff --git a/pkg/sql/schemachanger/screl/attr_string.go b/pkg/sql/schemachanger/screl/attr_string.go index bb153ceb5422..5e89764bdbcd 100644 --- a/pkg/sql/schemachanger/screl/attr_string.go +++ b/pkg/sql/schemachanger/screl/attr_string.go @@ -14,7 +14,7 @@ func _() { _ = x[Name-4] _ = x[IndexID-5] _ = x[TargetStatus-6] - _ = x[Status-7] + _ = x[CurrentStatus-7] _ = x[Element-8] _ = x[Target-9] _ = x[Username-10] @@ -22,9 +22,9 @@ func _() { _ = x[ConstraintOrdinal-12] } -const _Attr_name = "DescIDReferencedDescIDColumnIDNameIndexIDTargetStatusStatusElementTargetUsernameConstraintTypeConstraintOrdinal" +const _Attr_name = "DescIDReferencedDescIDColumnIDNameIndexIDTargetStatusCurrentStatusElementTargetUsernameConstraintTypeConstraintOrdinal" -var _Attr_index = [...]uint8{0, 6, 22, 30, 34, 41, 53, 59, 66, 72, 80, 94, 111} +var _Attr_index = [...]uint8{0, 6, 22, 30, 34, 41, 53, 66, 73, 79, 87, 101, 118} func (i Attr) String() string { i -= 1 diff --git a/pkg/sql/schemachanger/screl/compare.go b/pkg/sql/schemachanger/screl/compare.go index 6a6463453150..1fe12f2a3bde 100644 --- a/pkg/sql/schemachanger/screl/compare.go +++ b/pkg/sql/schemachanger/screl/compare.go @@ -27,7 +27,7 @@ var equalityAttrs = []rel.Attr{ Username, IndexID, TargetStatus, - Status, + CurrentStatus, } // EqualElements returns true if the two elements are equal. diff --git a/pkg/sql/schemachanger/screl/format.go b/pkg/sql/schemachanger/screl/format.go index 8debed891510..700731fd02f4 100644 --- a/pkg/sql/schemachanger/screl/format.go +++ b/pkg/sql/schemachanger/screl/format.go @@ -22,7 +22,7 @@ import ( ) // NodeString formats a node as a string by invoking FormatNode. -func NodeString(n *scpb.Node) string { +func NodeString(n *Node) string { var v redact.StringBuilder if err := FormatNode(&v, n); err != nil { return fmt.Sprintf("failed for format node: %v", err) @@ -31,7 +31,7 @@ func NodeString(n *scpb.Node) string { } // FormatNode formats the node into the SafeWriter. -func FormatNode(w redact.SafeWriter, e *scpb.Node) (err error) { +func FormatNode(w redact.SafeWriter, e *Node) (err error) { w.SafeString("[[") if err := FormatElement(w, e.Element()); err != nil { return err @@ -39,7 +39,7 @@ func FormatNode(w redact.SafeWriter, e *scpb.Node) (err error) { w.SafeString(", ") w.SafeString(redact.SafeString(e.Target.TargetStatus.String())) w.SafeString("], ") - w.SafeString(redact.SafeString(e.Status.String())) + w.SafeString(redact.SafeString(e.CurrentStatus.String())) w.SafeString("]") return nil } diff --git a/pkg/sql/rowenc/helpers_test.go b/pkg/sql/schemachanger/screl/node.go similarity index 57% rename from pkg/sql/rowenc/helpers_test.go rename to pkg/sql/schemachanger/screl/node.go index 4cb7ed757ef9..3cd12d0b5331 100644 --- a/pkg/sql/rowenc/helpers_test.go +++ b/pkg/sql/schemachanger/screl/node.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Cockroach Authors. +// Copyright 2022 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -8,10 +8,12 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package rowenc +package screl -// EncodeArray forwards the definition of encodeArray for testing. -var EncodeArray = encodeArray +import "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" -// DecodeArray forwards the definition of decodeArray for testing. -var DecodeArray = decodeArray +// Node represents a target element with a given current status. +type Node struct { + *scpb.Target + CurrentStatus scpb.Status +} diff --git a/pkg/sql/schemachanger/screl/query_test.go b/pkg/sql/schemachanger/screl/query_test.go index a89043047683..daa8eda5d355 100644 --- a/pkg/sql/schemachanger/screl/query_test.go +++ b/pkg/sql/schemachanger/screl/query_test.go @@ -24,28 +24,31 @@ import ( func TestQueryBasic(t *testing.T) { mkType := func(id descpb.ID) *scpb.Target { - return scpb.NewTarget(scpb.Status_PUBLIC, &scpb.Type{TypeID: id}, nil /* metadata */) + t := scpb.MakeTarget(scpb.Status_PUBLIC, &scpb.Type{TypeID: id}, nil /* metadata */) + return &t } mkTypeRef := func(typID, descID descpb.ID) *scpb.Target { - return scpb.NewTarget(scpb.Status_PUBLIC, &scpb.ViewDependsOnType{ + t := scpb.MakeTarget(scpb.Status_PUBLIC, &scpb.ViewDependsOnType{ TypeID: typID, TableID: descID, }, nil /* metadata */) + return &t } mkTable := func(id descpb.ID) *scpb.Target { - return scpb.NewTarget(scpb.Status_PUBLIC, &scpb.Table{TableID: id}, nil /* metadata */) + t := scpb.MakeTarget(scpb.Status_PUBLIC, &scpb.Table{TableID: id}, nil /* metadata */) + return &t } - concatNodes := func(nodes ...[]*scpb.Node) []*scpb.Node { - var ret []*scpb.Node + concatNodes := func(nodes ...[]*screl.Node) []*screl.Node { + var ret []*screl.Node for _, n := range nodes { ret = append(ret, n...) } return ret } - mkNodes := func(status scpb.Status, targets ...*scpb.Target) []*scpb.Node { - var ret []*scpb.Node + mkNodes := func(status scpb.Status, targets ...*scpb.Target) []*screl.Node { + var ret []*screl.Node for _, t := range targets { - ret = append(ret, &scpb.Node{Status: status, Target: t}) + ret = append(ret, &screl.Node{CurrentStatus: status, Target: t}) } return ret } @@ -76,7 +79,7 @@ func TestQueryBasic(t *testing.T) { screl.JoinTargetNode(typeEl, typeTarget, typeNode), dir.Entities(screl.TargetStatus, tableTarget, refTarget, typeTarget), - status.Entities(screl.Status, tableNode, refNode, typeNode), + status.Entities(screl.CurrentStatus, tableNode, refNode, typeNode), ) ) type queryExpectations struct { @@ -85,7 +88,7 @@ func TestQueryBasic(t *testing.T) { exp []string } for _, c := range []struct { - nodes []*scpb.Node + nodes []*screl.Node queries []queryExpectations }{ { @@ -156,7 +159,7 @@ func formatResults(r rel.Result, nodes []rel.Var) string { var buf strings.Builder for _, n := range nodes { buf.WriteString("\n") - buf.WriteString(screl.NodeString(r.Var(n).(*scpb.Node))) + buf.WriteString(screl.NodeString(r.Var(n).(*screl.Node))) } return buf.String() } diff --git a/pkg/sql/schemachanger/screl/scalars.go b/pkg/sql/schemachanger/screl/scalars.go index 7711adaee2b4..b861e0c3d38e 100644 --- a/pkg/sql/schemachanger/screl/scalars.go +++ b/pkg/sql/schemachanger/screl/scalars.go @@ -31,12 +31,12 @@ func GetDescID(e scpb.Element) descpb.ID { } // GetDescIDs returns the descriptor IDs referenced in the state's elements. -func GetDescIDs(s scpb.State) descpb.IDs { +func GetDescIDs(s scpb.TargetState) descpb.IDs { descIDSet := catalog.MakeDescriptorIDSet() - for i := range s.Nodes { + for i := range s.Targets { // Depending on the element type either a single descriptor ID // will exist or multiple (i.e. foreign keys). - if id := GetDescID(s.Nodes[i].Element()); id != descpb.InvalidID { + if id := GetDescID(s.Targets[i].Element()); id != descpb.InvalidID { descIDSet.Add(id) } } diff --git a/pkg/sql/schemachanger/scrun/BUILD.bazel b/pkg/sql/schemachanger/scrun/BUILD.bazel index a574954cf914..c3120428054d 100644 --- a/pkg/sql/schemachanger/scrun/BUILD.bazel +++ b/pkg/sql/schemachanger/scrun/BUILD.bazel @@ -12,13 +12,10 @@ go_library( deps = [ "//pkg/jobs/jobspb", "//pkg/settings/cluster", - "//pkg/sql/catalog/descpb", "//pkg/sql/schemachanger/scexec", - "//pkg/sql/schemachanger/scgraphviz", "//pkg/sql/schemachanger/scop", "//pkg/sql/schemachanger/scpb", "//pkg/sql/schemachanger/scplan", - "//pkg/sql/schemachanger/scplan/scstage", "//pkg/util/log/logcrash", "@com_github_cockroachdb_errors//:errors", ], diff --git a/pkg/sql/schemachanger/scrun/scrun.go b/pkg/sql/schemachanger/scrun/scrun.go index 2273aadafdba..d3c177d70987 100644 --- a/pkg/sql/schemachanger/scrun/scrun.go +++ b/pkg/sql/schemachanger/scrun/scrun.go @@ -15,13 +15,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scgraphviz" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan" - "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/scstage" "github.com/cockroachdb/cockroach/pkg/util/log/logcrash" "github.com/cockroachdb/errors" ) @@ -30,8 +27,8 @@ import ( // state. These are the immediate changes which take place at DDL statement // execution time (scop.StatementPhase). func RunStatementPhase( - ctx context.Context, knobs *TestingKnobs, deps scexec.Dependencies, state scpb.State, -) (scpb.State, jobspb.JobID, error) { + ctx context.Context, knobs *TestingKnobs, deps scexec.Dependencies, state scpb.CurrentState, +) (scpb.CurrentState, jobspb.JobID, error) { return runTransactionPhase(ctx, knobs, deps, state, scop.StatementPhase) } @@ -40,8 +37,8 @@ func RunStatementPhase( // than the asynchronous changes which are done by the schema changer job // after the transaction commits. func RunPreCommitPhase( - ctx context.Context, knobs *TestingKnobs, deps scexec.Dependencies, state scpb.State, -) (scpb.State, jobspb.JobID, error) { + ctx context.Context, knobs *TestingKnobs, deps scexec.Dependencies, state scpb.CurrentState, +) (scpb.CurrentState, jobspb.JobID, error) { return runTransactionPhase(ctx, knobs, deps, state, scop.PreCommitPhase) } @@ -49,31 +46,31 @@ func runTransactionPhase( ctx context.Context, knobs *TestingKnobs, deps scexec.Dependencies, - state scpb.State, + state scpb.CurrentState, phase scop.Phase, -) (scpb.State, jobspb.JobID, error) { - if len(state.Nodes) == 0 { - return scpb.State{}, jobspb.InvalidJobID, nil +) (scpb.CurrentState, jobspb.JobID, error) { + if len(state.Current) == 0 { + return scpb.CurrentState{}, jobspb.InvalidJobID, nil } sc, err := scplan.MakePlan(state, scplan.Params{ ExecutionPhase: phase, SchemaChangerJobIDSupplier: deps.TransactionalJobRegistry().SchemaChangerJobID, }) if err != nil { - return scpb.State{}, jobspb.InvalidJobID, scgraphviz.DecorateErrorWithPlanDetails(err, sc) + return scpb.CurrentState{}, jobspb.InvalidJobID, err + } + after := state.Current + if len(after) == 0 { + return scpb.CurrentState{}, jobspb.InvalidJobID, nil } - after := state stages := sc.StagesForCurrentPhase() for i := range stages { if err := executeStage(ctx, knobs, deps, sc, i, stages[i]); err != nil { - return scpb.State{}, jobspb.InvalidJobID, err + return scpb.CurrentState{}, jobspb.InvalidJobID, err } after = stages[i].After } - if len(after.Nodes) == 0 { - return scpb.State{}, jobspb.InvalidJobID, nil - } - return after, sc.JobID, nil + return scpb.CurrentState{TargetState: state.TargetState, Current: after}, sc.JobID, nil } // RunSchemaChangesInJob contains the business logic for the Resume method of a @@ -84,24 +81,17 @@ func RunSchemaChangesInJob( settings *cluster.Settings, deps JobRunDependencies, jobID jobspb.JobID, - jobDescriptorIDs []descpb.ID, jobDetails jobspb.NewSchemaChangeDetails, jobProgress jobspb.NewSchemaChangeProgress, rollback bool, ) error { - state := makeState(ctx, - settings, - jobDetails.Targets, - jobProgress.States, - jobProgress.Statements, - jobProgress.Authorization, - rollback) + state := makeState(ctx, settings, jobDetails.TargetState, jobProgress.Current, rollback) sc, err := scplan.MakePlan(state, scplan.Params{ ExecutionPhase: scop.PostCommitPhase, SchemaChangerJobIDSupplier: func() jobspb.JobID { return jobID }, }) if err != nil { - return scgraphviz.DecorateErrorWithPlanDetails(err, sc) + return err } for i := range sc.Stages { @@ -121,17 +111,15 @@ func executeStage( deps scexec.Dependencies, p scplan.Plan, stageIdx int, - stage scstage.Stage, + stage scplan.Stage, ) error { if knobs != nil && knobs.BeforeStage != nil { if err := knobs.BeforeStage(p, stageIdx); err != nil { return err } } - err := scexec.ExecuteStage(ctx, deps, stage.Ops()) - if err != nil { - err = errors.Wrapf(err, "error executing %s", stage.String()) - return scgraphviz.DecorateErrorWithPlanDetails(err, p) + if err := scexec.ExecuteStage(ctx, deps, stage.Ops()); err != nil { + return errors.Wrapf(p.DecorateErrorWithPlanDetails(err), "error executing %s", stage.String()) } return nil } @@ -139,34 +127,25 @@ func executeStage( func makeState( ctx context.Context, sv *cluster.Settings, - protos []*scpb.Target, - states []scpb.Status, - statements []*scpb.Statement, - authorization *scpb.Authorization, + targetState scpb.TargetState, + incumbent []scpb.Status, rollback bool, -) scpb.State { - if len(protos) != len(states) { +) scpb.CurrentState { + if len(targetState.Targets) != len(incumbent) { logcrash.ReportOrPanic(ctx, &sv.SV, "unexpected slice size mismatch %d and %d", - len(protos), len(states)) + len(targetState.Targets), len(incumbent)) } - ts := scpb.State{ - Statements: statements, - Authorization: *authorization, - } - ts.Nodes = make([]*scpb.Node, len(protos)) - for i := range protos { - ts.Nodes[i] = &scpb.Node{ - Target: protos[i], - Status: states[i], - } - if rollback { - switch ts.Nodes[i].TargetStatus { + s := scpb.CurrentState{TargetState: targetState, Current: incumbent} + if rollback { + for i := range s.Targets { + t := &s.Targets[i] + switch t.TargetStatus { case scpb.Status_PUBLIC: - ts.Nodes[i].TargetStatus = scpb.Status_ABSENT + t.TargetStatus = scpb.Status_ABSENT case scpb.Status_ABSENT: - ts.Nodes[i].TargetStatus = scpb.Status_PUBLIC + t.TargetStatus = scpb.Status_PUBLIC } } } - return ts + return s } diff --git a/pkg/sql/schemachanger/testdata/alter_table_add_column b/pkg/sql/schemachanger/testdata/alter_table_add_column index 3d06fe092285..5771a8ec5cb8 100644 --- a/pkg/sql/schemachanger/testdata/alter_table_add_column +++ b/pkg/sql/schemachanger/testdata/alter_table_add_column @@ -17,7 +17,7 @@ begin transaction #1 ## PreCommitPhase stage 1 of 1 with 5 MutationType ops create job #1: "schema change job" descriptor IDs: [56] -write *eventpb.AlterTable to event log for descriptor #56: ALTER TABLE db.public.tbl ADD COLUMN j INT8 NOT NULL DEFAULT 42 +write *eventpb.AlterTable to event log for descriptor #56: ALTER TABLE ‹db›.‹public›.‹tbl› ADD COLUMN ‹j› INT8 NOT NULL DEFAULT ‹42› upsert descriptor #56 ... - columnIds: diff --git a/pkg/sql/schemachanger/testdata/drop b/pkg/sql/schemachanger/testdata/drop index 22b3333cba62..ede150c2e3ff 100644 --- a/pkg/sql/schemachanger/testdata/drop +++ b/pkg/sql/schemachanger/testdata/drop @@ -12,7 +12,8 @@ DROP SCHEMA db.sc; ---- begin transaction #1 # begin StatementPhase -## StatementPhase stage 1 of 1 with 1 MutationType ops +## StatementPhase stage 1 of 1 with 2 MutationType ops +delete comment for descriptor #56 of type SchemaCommentType # end StatementPhase # begin PreCommitPhase ## PreCommitPhase stage 1 of 1 with 5 MutationType ops @@ -38,7 +39,7 @@ commit transaction #1 # begin PostCommitPhase begin transaction #2 ## PostCommitNonRevertiblePhase stage 1 of 1 with 6 MutationType ops -write *eventpb.DropSchema to event log for descriptor #56: DROP SCHEMA db.sc +write *eventpb.DropSchema to event log for descriptor #56: DROP SCHEMA ‹db›.‹sc› update progress of schema change job #1 set schema change job #1 to non-cancellable delete schema namespace entry {54 0 sc} -> 56 @@ -62,7 +63,12 @@ DROP TABLE db.sc.t; ---- begin transaction #1 # begin StatementPhase -## StatementPhase stage 1 of 1 with 1 MutationType ops +## StatementPhase stage 1 of 1 with 6 MutationType ops +delete comment for descriptor #58 of type TableCommentType +delete comment for descriptor #58 of type ColumnCommentType +delete comment for descriptor #58 of type ColumnCommentType +delete comment for descriptor #58 of type IndexCommentType +delete comment for constraint on #58, name: t_pkey, type: PrimaryKey # end StatementPhase # begin PreCommitPhase ## PreCommitPhase stage 1 of 1 with 4 MutationType ops @@ -90,7 +96,7 @@ begin transaction #2 ## PostCommitNonRevertiblePhase stage 1 of 1 with 4 MutationType ops create job #2: "GC for dropping descriptor 58" descriptor IDs: [58] -write *eventpb.DropTable to event log for descriptor #58: DROP TABLE db.sc.t +write *eventpb.DropTable to event log for descriptor #58: DROP TABLE ‹db›.‹sc›.‹t› update progress of schema change job #1 set schema change job #1 to non-cancellable upsert descriptor #58 @@ -113,7 +119,8 @@ DROP SCHEMA db.sc CASCADE; ---- begin transaction #1 # begin StatementPhase -## StatementPhase stage 1 of 1 with 3 MutationType ops +## StatementPhase stage 1 of 1 with 4 MutationType ops +delete comment for descriptor #57 of type SchemaCommentType # end StatementPhase # begin PreCommitPhase ## PreCommitPhase stage 1 of 1 with 11 MutationType ops @@ -155,7 +162,7 @@ commit transaction #1 # begin PostCommitPhase begin transaction #2 ## PostCommitNonRevertiblePhase stage 1 of 1 with 12 MutationType ops -write *eventpb.DropSchema to event log for descriptor #57: DROP SCHEMA db.sc CASCADE +write *eventpb.DropSchema to event log for descriptor #57: DROP SCHEMA ‹db›.‹sc› CASCADE update progress of schema change job #1 set schema change job #1 to non-cancellable delete schema namespace entry {54 0 sc} -> 57 @@ -170,7 +177,9 @@ DROP DATABASE db CASCADE; ---- begin transaction #1 # begin StatementPhase -## StatementPhase stage 1 of 1 with 2 MutationType ops +## StatementPhase stage 1 of 1 with 4 MutationType ops +delete comment for descriptor #55 of type SchemaCommentType +delete comment for descriptor #54 of type DatabaseCommentType # end StatementPhase # begin PreCommitPhase ## PreCommitPhase stage 1 of 1 with 6 MutationType ops @@ -201,7 +210,7 @@ begin transaction #2 ## PostCommitNonRevertiblePhase stage 1 of 1 with 9 MutationType ops create job #2: "GC for dropping descriptors and parent database 54" descriptor IDs: [] -write *eventpb.DropDatabase to event log for descriptor #54: DROP DATABASE db CASCADE +write *eventpb.DropDatabase to event log for descriptor #54: DROP DATABASE ‹db› CASCADE update progress of schema change job #1 set schema change job #1 to non-cancellable delete database namespace entry {0 0 db} -> 54 @@ -251,7 +260,29 @@ DROP DATABASE db1 CASCADE ---- begin transaction #1 # begin StatementPhase -## StatementPhase stage 1 of 1 with 14 MutationType ops +## StatementPhase stage 1 of 1 with 36 MutationType ops +delete comment for descriptor #64 of type TableCommentType +delete comment for descriptor #67 of type TableCommentType +delete comment for descriptor #67 of type ColumnCommentType +delete comment for descriptor #67 of type ColumnCommentType +delete comment for descriptor #67 of type ColumnCommentType +delete comment for descriptor #67 of type IndexCommentType +delete comment for descriptor #62 of type SchemaCommentType +delete comment for descriptor #65 of type TableCommentType +delete comment for descriptor #66 of type TableCommentType +delete comment for descriptor #66 of type ColumnCommentType +delete comment for descriptor #66 of type ColumnCommentType +delete comment for descriptor #66 of type ColumnCommentType +delete comment for descriptor #66 of type IndexCommentType +delete comment for descriptor #68 of type TableCommentType +delete comment for descriptor #69 of type TableCommentType +delete comment for descriptor #70 of type TableCommentType +delete comment for descriptor #71 of type TableCommentType +delete comment for descriptor #74 of type TableCommentType +delete comment for descriptor #63 of type SchemaCommentType +delete comment for descriptor #61 of type DatabaseCommentType +delete comment for constraint on #67, name: t1_pkey, type: PrimaryKey +delete comment for constraint on #66, name: t1_pkey, type: PrimaryKey # end StatementPhase # begin PreCommitPhase ## PreCommitPhase stage 1 of 1 with 56 MutationType ops @@ -566,7 +597,7 @@ begin transaction #2 ## PostCommitNonRevertiblePhase stage 1 of 1 with 46 MutationType ops create job #2: "GC for dropping descriptors 64 67 65 66 68 69 70 71 74 and parent database 61" descriptor IDs: [64 67 65 66 68 69 70 71 74] -write *eventpb.DropDatabase to event log for descriptor #61: DROP DATABASE db1 CASCADE +write *eventpb.DropDatabase to event log for descriptor #61: DROP DATABASE ‹db1› CASCADE update progress of schema change job #1 set schema change job #1 to non-cancellable delete database namespace entry {0 0 db1} -> 61 diff --git a/pkg/sql/scrub.go b/pkg/sql/scrub.go index d65c3a0a4b0b..42c818af1c64 100644 --- a/pkg/sql/scrub.go +++ b/pkg/sql/scrub.go @@ -22,7 +22,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/errors" ) @@ -224,6 +223,7 @@ func (n *scrubNode) startScrubTable( return err } n.run.checkQueue = append(n.run.checkQueue, checks...) + case *tree.ScrubOptionPhysical: if physicalCheckSet { return pgerror.Newf(pgcode.Syntax, @@ -234,8 +234,8 @@ func (n *scrubNode) startScrubTable( "cannot use AS OF SYSTEM TIME with PHYSICAL option") } physicalCheckSet = true - physicalChecks := createPhysicalCheckOperations(tableDesc, tableName) - n.run.checkQueue = append(n.run.checkQueue, physicalChecks...) + return pgerror.Newf(pgcode.FeatureNotSupported, "PHYSICAL scrub not implemented") + case *tree.ScrubOptionConstraint: if constraintsSet { return pgerror.Newf(pgcode.Syntax, @@ -248,6 +248,7 @@ func (n *scrubNode) startScrubTable( return err } n.run.checkQueue = append(n.run.checkQueue, constraintsToCheck...) + default: panic(errors.AssertionFailedf("unhandled SCRUB option received: %+v", v)) } @@ -269,8 +270,7 @@ func (n *scrubNode) startScrubTable( } n.run.checkQueue = append(n.run.checkQueue, constraintsToCheck...) - physicalChecks := createPhysicalCheckOperations(tableDesc, tableName) - n.run.checkQueue = append(n.run.checkQueue, physicalChecks...) + // Physical checks are no longer implemented. } return nil } @@ -335,17 +335,6 @@ func pairwiseOp(left []string, right []string, op string) []string { return res } -// createPhysicalCheckOperations will return the physicalCheckOperation -// for all indexes on a table. -func createPhysicalCheckOperations( - tableDesc catalog.TableDescriptor, tableName *tree.TableName, -) (checks []checkOperation) { - for _, idx := range tableDesc.ActiveIndexes() { - checks = append(checks, newPhysicalCheckOperation(tableName, tableDesc, idx)) - } - return checks -} - // createIndexCheckOperations will return the checkOperations for the // provided indexes. If indexNames is nil, then all indexes are // returned. @@ -468,40 +457,3 @@ func createConstraintCheckOperations( } return results, nil } - -// scrubRunDistSQL run a distSQLPhysicalPlan plan in distSQL. If non-nil -// rowContainerHelper is returned, the caller must close it. -func scrubRunDistSQL( - ctx context.Context, planCtx *PlanningCtx, p *planner, plan *PhysicalPlan, columnTypes []*types.T, -) (*rowContainerHelper, error) { - var rowContainer rowContainerHelper - rowContainer.Init(columnTypes, &p.extendedEvalCtx, "scrub" /* opName */) - rowResultWriter := NewRowResultWriter(&rowContainer) - recv := MakeDistSQLReceiver( - ctx, - rowResultWriter, - tree.Rows, - p.ExecCfg().RangeDescriptorCache, - p.txn, - p.ExecCfg().Clock, - p.extendedEvalCtx.Tracing, - p.ExecCfg().ContentionRegistry, - nil, /* testingPushCallback */ - ) - defer recv.Release() - - // Copy the evalCtx, as dsp.Run() might change it. - evalCtxCopy := p.extendedEvalCtx - p.extendedEvalCtx.DistSQLPlanner.Run( - planCtx, p.txn, plan, recv, &evalCtxCopy, nil, /* finishedSetupFn */ - )() - if rowResultWriter.Err() != nil { - rowContainer.Close(ctx) - return nil, rowResultWriter.Err() - } else if rowContainer.Len() == 0 { - rowContainer.Close(ctx) - return nil, nil - } - - return &rowContainer, nil -} diff --git a/pkg/sql/scrub_physical.go b/pkg/sql/scrub_physical.go deleted file mode 100644 index dd7dc8e9cc77..000000000000 --- a/pkg/sql/scrub_physical.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2017 The Cockroach Authors. -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package sql - -import ( - "context" - "time" - - "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/rowexec" - "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/span" - "github.com/cockroachdb/errors" -) - -var _ checkOperation = &physicalCheckOperation{} - -// physicalCheckOperation is a check on an indexes physical data. -type physicalCheckOperation struct { - tableName *tree.TableName - tableDesc catalog.TableDescriptor - index catalog.Index - - // columns is a list of the columns returned in the query result - // tree.Datums. - columns []catalog.Column - // primaryColIdxs maps PrimaryIndex.Columns to the row - // indexes in the query result tree.Datums. - primaryColIdxs []int - - run physicalCheckRun -} - -// physicalCheckRun contains the run-time state for -// physicalCheckOperation during local execution. -type physicalCheckRun struct { - started bool - - rows *rowContainerHelper - iterator *rowContainerIterator - // If currentRow is nil, it means that all rows have been exhausted. - currentRow tree.Datums -} - -func newPhysicalCheckOperation( - tableName *tree.TableName, tableDesc catalog.TableDescriptor, index catalog.Index, -) *physicalCheckOperation { - return &physicalCheckOperation{ - tableName: tableName, - tableDesc: tableDesc, - index: index, - } -} - -// Start implements the checkOperation interface. -// It will plan and run the physical data check using the distSQL -// execution engine. -func (o *physicalCheckOperation) Start(params runParams) error { - ctx := params.ctx - // Collect all of the columns, their types, and their IDs. - var columnIDs []tree.ColumnID - colIDToIdx := catalog.ColumnIDToOrdinalMap(o.tableDesc.PublicColumns()) - columns := make([]catalog.Column, len(columnIDs)) - - // Collect all of the columns being scanned. - if o.index.GetID() == o.tableDesc.GetPrimaryIndexID() { - for _, c := range o.tableDesc.PublicColumns() { - columnIDs = append(columnIDs, tree.ColumnID(c.GetID())) - } - } else { - for i := 0; i < o.index.NumKeyColumns(); i++ { - id := o.index.GetKeyColumnID(i) - columnIDs = append(columnIDs, tree.ColumnID(id)) - } - for i := 0; i < o.index.NumKeySuffixColumns(); i++ { - id := o.index.GetKeySuffixColumnID(i) - columnIDs = append(columnIDs, tree.ColumnID(id)) - } - for i := 0; i < o.index.NumSecondaryStoredColumns(); i++ { - id := o.index.GetStoredColumnID(i) - columnIDs = append(columnIDs, tree.ColumnID(id)) - } - } - - for i := range columnIDs { - idx := colIDToIdx.GetDefault(descpb.ColumnID(columnIDs[i])) - columns = append(columns, o.tableDesc.PublicColumns()[idx]) - } - - // Find the row indexes for all of the primary index columns. - primaryColIdxs, err := getPrimaryColIdxs(o.tableDesc, columns) - if err != nil { - return err - } - - indexFlags := &tree.IndexFlags{ - IndexID: tree.IndexID(o.index.GetID()), - NoIndexJoin: true, - } - scan := params.p.Scan() - scan.isCheck = true - colCfg := scanColumnsConfig{wantedColumns: columnIDs, addUnwantedAsHidden: true} - if err := scan.initTable(ctx, params.p, o.tableDesc, indexFlags, colCfg); err != nil { - return err - } - scan.index = scan.specifiedIndex - sb := span.MakeBuilder(params.EvalContext(), params.ExecCfg().Codec, o.tableDesc, o.index) - defer sb.Release() - scan.spans, err = sb.UnconstrainedSpans() - if err != nil { - return err - } - scan.isFull = true - - planCtx := params.extendedEvalCtx.DistSQLPlanner.NewPlanningCtx(ctx, params.extendedEvalCtx, params.p, params.p.txn, true /* distribute */) - // Since physicalCheckOperation might be only one of many check operations - // that scrubNode needs to perform, we need to make sure that scrubNode - // is not closed when this physical check operation is being cleaned up. - planCtx.ignoreClose = true - physPlan, err := params.extendedEvalCtx.DistSQLPlanner.createScrubPhysicalCheck(planCtx, scan) - if err != nil { - return err - } - - o.primaryColIdxs = primaryColIdxs - o.columns = columns - o.run.started = true - rows, err := scrubRunDistSQL(ctx, planCtx, params.p, physPlan, rowexec.ScrubTypes) - if rows == nil || err != nil { - // If either there were no rows that failed the check operation or an - // error was encountered, we short-circuit and don't set currentRow. - // This will indicate that we're done. - return err - } - o.run.rows = rows - o.run.iterator = newRowContainerIterator(ctx, *rows, rowexec.ScrubTypes) - o.run.currentRow, err = o.run.iterator.Next() - return err -} - -// Next implements the checkOperation interface. -func (o *physicalCheckOperation) Next(params runParams) (tree.Datums, error) { - timestamp, err := tree.MakeDTimestamp( - params.extendedEvalCtx.GetStmtTimestamp(), time.Nanosecond) - if err != nil { - return nil, err - } - - details, ok := o.run.currentRow[2].(*tree.DJSON) - if !ok { - return nil, errors.Errorf("expected row value 3 to be DJSON, got: %T", o.run.currentRow[2]) - } - - res := tree.Datums{ - // TODO(joey): Add the job UUID once the SCRUB command uses jobs. - tree.DNull, /* job_uuid */ - o.run.currentRow[0], /* errorType */ - tree.NewDString(o.tableName.Catalog()), - tree.NewDString(o.tableName.Table()), - o.run.currentRow[1], /* primaryKey */ - timestamp, - tree.DBoolFalse, - details, - } - - // Advance to the next row. - o.run.currentRow, err = o.run.iterator.Next() - return res, err -} - -// Started implements the checkOperation interface. -func (o *physicalCheckOperation) Started() bool { - return o.run.started -} - -// Done implements the checkOperation interface. -func (o *physicalCheckOperation) Done(context.Context) bool { - return o.run.currentRow == nil -} - -// Close implements the checkOperation interface. -func (o *physicalCheckOperation) Close(ctx context.Context) { - if o.run.rows != nil { - o.run.rows.Close(ctx) - o.run.rows = nil - } - if o.run.iterator != nil { - o.run.iterator.Close() - o.run.iterator = nil - } -} diff --git a/pkg/sql/scrub_test.go b/pkg/sql/scrub_test.go index 559268b6b4dc..89ece54c6cf5 100644 --- a/pkg/sql/scrub_test.go +++ b/pkg/sql/scrub_test.go @@ -25,10 +25,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/scrub" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" - "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -351,7 +351,7 @@ INSERT INTO t.test VALUES (10, 2); // Create the primary index key. values := []tree.Datum{tree.NewDInt(10), tree.NewDInt(2)} primaryIndexKeyPrefix := rowenc.MakeIndexKeyPrefix( - keys.SystemSQLCodec, tableDesc, tableDesc.GetPrimaryIndexID()) + keys.SystemSQLCodec, tableDesc.GetID(), tableDesc.GetPrimaryIndexID()) primaryIndexKey, _, err := rowenc.EncodeIndexKey( tableDesc, tableDesc.GetPrimaryIndex(), colIDtoRowIndex, values, primaryIndexKeyPrefix) if err != nil { @@ -366,8 +366,8 @@ INSERT INTO t.test VALUES (10, 2); // constraint. values = []tree.Datum{tree.NewDInt(10), tree.NewDInt(0)} // Encode the column value. - valueBuf, err := rowenc.EncodeTableValue( - []byte(nil), tableDesc.PublicColumns()[1].GetID(), values[1], []byte(nil)) + valueBuf, err := valueside.Encode( + []byte(nil), valueside.MakeColumnIDDelta(0, tableDesc.PublicColumns()[1].GetID()), values[1], []byte(nil)) if err != nil { t.Fatalf("unexpected error: %s", err) } @@ -559,396 +559,6 @@ ALTER TABLE t.child ADD FOREIGN KEY (parent_id, parent_id2) REFERENCES t.parent runScrub(t, db, `EXPERIMENTAL SCRUB TABLE t.child AS OF SYSTEM TIME '-1ms' WITH OPTIONS CONSTRAINT ALL`, exp) } -// TestScrubPhysicalNonnullableNullInSingleColumnFamily tests that -// `SCRUB TABLE ... WITH OPTIONS PHYSICAL` will find any rows where a -// value is NULL for a column that is not-nullable and the only column -// in a family. To test this, a row is created that we later overwrite -// the value for. The value that is inserted is the sentinel value as -// the column is the only one in the family. -func TestScrubPhysicalNonnullableNullInSingleColumnFamily(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.Background()) - - // Create the table and the row entry. - if _, err := db.Exec(` -CREATE DATABASE t; -CREATE TABLE t.test (k INT PRIMARY KEY, v INT NOT NULL); -INSERT INTO t.test VALUES (217, 314); -`); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - - // Construct datums for our row values (k, v). - values := []tree.Datum{tree.NewDInt(217), tree.NewDInt(314)} - - var colIDtoRowIndex catalog.TableColMap - colIDtoRowIndex.Set(tableDesc.PublicColumns()[0].GetID(), 0) - colIDtoRowIndex.Set(tableDesc.PublicColumns()[1].GetID(), 1) - - // Create the primary index key - primaryIndexKeyPrefix := rowenc.MakeIndexKeyPrefix( - keys.SystemSQLCodec, tableDesc, tableDesc.GetPrimaryIndexID()) - primaryIndexKey, _, err := rowenc.EncodeIndexKey( - tableDesc, tableDesc.GetPrimaryIndex(), colIDtoRowIndex, values, primaryIndexKeyPrefix) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // Add the family suffix to the key. - family := tableDesc.GetFamilies()[0] - primaryIndexKey = keys.MakeFamilyKey(primaryIndexKey, uint32(family.ID)) - - // Create an empty sentinel value. - var value roachpb.Value - value.SetTuple([]byte(nil)) - - if err := kvDB.Put(context.Background(), primaryIndexKey, &value); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // Run SCRUB and find the errors we created. - rows, err := db.Query(`EXPERIMENTAL SCRUB TABLE t.test WITH OPTIONS PHYSICAL`) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - defer rows.Close() - results, err := sqlutils.GetScrubResultRows(rows) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } else if len(results) != 1 { - t.Fatalf("expected 1 result, got %d. got %#v", len(results), results) - } - - if result := results[0]; result.ErrorType != string(scrub.UnexpectedNullValueError) { - t.Fatalf("expected %q error, instead got: %s", - scrub.UnexpectedNullValueError, result.ErrorType) - } else if result.Database != "t" { - t.Fatalf("expected database %q, got %q", "t", result.Database) - } else if result.Table != "test" { - t.Fatalf("expected table %q, got %q", "test", result.Table) - } else if result.PrimaryKey != "(217)" { - t.Fatalf("expected primaryKey %q, got %q", "(217)", result.PrimaryKey) - } else if result.Repaired { - t.Fatalf("expected repaired %v, got %v", false, result.Repaired) - } else if !strings.Contains(result.Details, `"k": "217"`) { - t.Fatalf("expected error details to contain `%s`, got %s", `"k": "217"`, result.Details) - } else if !strings.Contains(result.Details, `"v": ""`) { - t.Fatalf("expected error details to contain `%s`, got %s", `"v": ""`, result.Details) - } -} - -// TestScrubPhysicalNonnullableNullInMulticolumnFamily tests that -// `SCRUB TABLE ... WITH OPTIONS PHYSICAL` will find any rows where a -// value is NULL for a column that is not-nullable and is not the only -// column in a family. To test this, a row is created that we later -// overwrite the value for. The value that is inserted is missing one of -// the columns that belongs in the family. -func TestScrubPhysicalNonnullableNullInMulticolumnFamily(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.Background()) - - // Create the table and the row entry. - if _, err := db.Exec(` -CREATE DATABASE t; -CREATE TABLE t.test (k INT PRIMARY KEY, v INT NOT NULL, b INT NOT NULL, FAMILY (k), FAMILY (v, b)); -INSERT INTO t.test VALUES (217, 314, 1337); -`); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - - // Construct datums for our row values (k, v, b). - values := []tree.Datum{tree.NewDInt(217), tree.NewDInt(314), tree.NewDInt(1337)} - - var colIDtoRowIndex catalog.TableColMap - colIDtoRowIndex.Set(tableDesc.PublicColumns()[0].GetID(), 0) - colIDtoRowIndex.Set(tableDesc.PublicColumns()[1].GetID(), 1) - colIDtoRowIndex.Set(tableDesc.PublicColumns()[2].GetID(), 2) - - // Create the primary index key - primaryIndexKeyPrefix := rowenc.MakeIndexKeyPrefix( - keys.SystemSQLCodec, tableDesc, tableDesc.GetPrimaryIndexID()) - primaryIndexKey, _, err := rowenc.EncodeIndexKey( - tableDesc, tableDesc.GetPrimaryIndex(), colIDtoRowIndex, values, primaryIndexKeyPrefix) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // Add the family suffix to the key, in particular we care about the - // second column family. - family := tableDesc.GetFamilies()[1] - primaryIndexKey = keys.MakeFamilyKey(primaryIndexKey, uint32(family.ID)) - - // Encode the second column value. - valueBuf, err := rowenc.EncodeTableValue( - []byte(nil), tableDesc.PublicColumns()[1].GetID(), values[1], []byte(nil)) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // Construct the tuple for the family that is missing a column value, i.e. it is NULL. - var value roachpb.Value - value.SetTuple(valueBuf) - - // Overwrite the existing value. - if err := kvDB.Put(context.Background(), primaryIndexKey, &value); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // Run SCRUB and find the errors we created. - rows, err := db.Query(`EXPERIMENTAL SCRUB TABLE t.test WITH OPTIONS PHYSICAL`) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - defer rows.Close() - results, err := sqlutils.GetScrubResultRows(rows) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } else if len(results) != 1 { - t.Fatalf("expected 1 result, got %d. got %#v", len(results), results) - } - - if result := results[0]; result.ErrorType != string(scrub.UnexpectedNullValueError) { - t.Fatalf("expected %q error, instead got: %s", - scrub.UnexpectedNullValueError, result.ErrorType) - } else if result.Database != "t" { - t.Fatalf("expected database %q, got %q", "t", result.Database) - } else if result.Table != "test" { - t.Fatalf("expected table %q, got %q", "test", result.Table) - } else if result.PrimaryKey != "(217)" { - t.Fatalf("expected primaryKey %q, got %q", "(217)", result.PrimaryKey) - } else if result.Repaired { - t.Fatalf("expected repaired %v, got %v", false, result.Repaired) - } else if !strings.Contains(result.Details, `"k": "217"`) { - t.Fatalf("expected error details to contain `%s`, got %s", `"k": "217"`, result.Details) - } else if !strings.Contains(result.Details, `"v": "314"`) { - t.Fatalf("expected error details to contain `%s`, got %s", `"v": "314"`, result.Details) - } else if !strings.Contains(result.Details, `"b": ""`) { - t.Fatalf("expected error details to contain `%s`, got %s", `"b": ""`, result.Details) - } -} - -// TestScrubPhysicalUnexpectedFamilyID tests that `SCRUB TABLE ... WITH -// OPTIONS PHYSICAL` will find any rows where a primary index as key -// with an invalid family ID. To test this, a table is made with 2 -// families and then the first family is dropped. A row is then inserted -// using the KV client which has the ID of the first family. -func TestScrubPhysicalUnexpectedFamilyID(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - skip.WithIssue(t, 51797, "currently KV pairs with unexpected family IDs are not noticed by the fetcher") - s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.Background()) - - // Create the table and the row entry. - if _, err := db.Exec(` -CREATE DATABASE t; -CREATE TABLE t.test ( - k INT PRIMARY KEY, - v1 INT NOT NULL, - v2 INT NOT NULL, - FAMILY first (v1), - FAMILY second (v2) -); -`); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - oldTableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - - // Drop the first column family. - if _, err := db.Exec(`ALTER TABLE t.test DROP COLUMN v1`); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - - // Construct datums for our row values (k, v1). - values := []tree.Datum{tree.NewDInt(217), tree.NewDInt(314)} - - var colIDtoRowIndex catalog.TableColMap - colIDtoRowIndex.Set(tableDesc.PublicColumns()[0].GetID(), 0) - colIDtoRowIndex.Set(tableDesc.PublicColumns()[1].GetID(), 1) - - // Create the primary index key - primaryIndexKeyPrefix := rowenc.MakeIndexKeyPrefix( - keys.SystemSQLCodec, tableDesc, tableDesc.GetPrimaryIndexID()) - primaryIndexKey, _, err := rowenc.EncodeIndexKey( - tableDesc, tableDesc.GetPrimaryIndex(), colIDtoRowIndex, values, primaryIndexKeyPrefix) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // Add the correct family suffix to the key. - primaryIndexKeyWithFamily := keys.MakeFamilyKey(primaryIndexKey, uint32(tableDesc.GetFamilies()[1].ID)) - - // Encode the second column value. - valueBuf, err := rowenc.EncodeTableValue( - []byte(nil), tableDesc.PublicColumns()[1].GetID(), values[1], []byte(nil)) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - var value roachpb.Value - value.SetTuple(valueBuf) - - // Insert the value. - if err := kvDB.Put(context.Background(), primaryIndexKeyWithFamily, &value); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // Create a k/v with an incorrect family suffix to the key. - primaryIndexKeyWithFamily = keys.MakeFamilyKey(primaryIndexKey, - uint32(oldTableDesc.GetFamilies()[1].ID)) - - // Encode the second column value. - valueBuf, err = rowenc.EncodeTableValue( - []byte(nil), tableDesc.PublicColumns()[1].GetID(), values[1], []byte(nil)) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - value = roachpb.Value{} - value.SetTuple(valueBuf) - - // Insert the incorrect family k/v. - if err := kvDB.Put(context.Background(), primaryIndexKeyWithFamily, &value); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // Run SCRUB and find the errors we created. - rows, err := db.Query(`EXPERIMENTAL SCRUB TABLE t.test WITH OPTIONS PHYSICAL`) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - defer rows.Close() - results, err := sqlutils.GetScrubResultRows(rows) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } else if len(results) != 1 { - t.Fatalf("expected 1 result, got %d. got %#v", len(results), results) - } - - if result := results[0]; result.ErrorType != string(scrub.UnexpectedNullValueError) { - t.Fatalf("expected %q error, instead got: %s", - scrub.UnexpectedNullValueError, result.ErrorType) - } else if result.Database != "t" { - t.Fatalf("expected database %q, got %q", "t", result.Database) - } else if result.Table != "test" { - t.Fatalf("expected table %q, got %q", "test", result.Table) - } else if result.PrimaryKey != "(217)" { - t.Fatalf("expected primaryKey %q, got %q", "(217)", result.PrimaryKey) - } else if result.Repaired { - t.Fatalf("expected repaired %v, got %v", false, result.Repaired) - } else if !strings.Contains(result.Details, `"k": "217"`) { - t.Fatalf("expected error details to contain `%s`, got %s", `"k": "217"`, result.Details) - } else if !strings.Contains(result.Details, `"v": "314"`) { - t.Fatalf("expected error details to contain `%s`, got %s", `"v": "314"`, result.Details) - } else if !strings.Contains(result.Details, `"b": ""`) { - t.Fatalf("expected error details to contain `%s`, got %s", `"b": ""`, result.Details) - } -} - -// TestScrubPhysicalIncorrectPrimaryIndexValueColumn tests that -// `SCRUB TABLE ... WITH OPTIONS PHYSICAL` will find any rows where a -// value has an encoded column ID that does not correspond to the table -// descriptor. To test this, a row is inserted using the KV client. -func TestScrubPhysicalIncorrectPrimaryIndexValueColumn(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - skip.WithIssue(t, 51797, "the test is not failing, as it would be expected") - s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.Background()) - - // Create the table and the row entry. - if _, err := db.Exec(` -CREATE DATABASE t; -CREATE TABLE t.test (k INT PRIMARY KEY, v1 INT, v2 INT); -`); err != nil { - t.Fatalf("unexpected error: %s", err) - } - tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") - - // Construct datums for our row values (k, v1, v2). - values := []tree.Datum{tree.NewDInt(217), tree.NewDInt(314), tree.NewDInt(1337)} - - var colIDtoRowIndex catalog.TableColMap - colIDtoRowIndex.Set(tableDesc.PublicColumns()[0].GetID(), 0) - colIDtoRowIndex.Set(tableDesc.PublicColumns()[1].GetID(), 1) - colIDtoRowIndex.Set(tableDesc.PublicColumns()[2].GetID(), 2) - - // Create the primary index key - primaryIndexKeyPrefix := rowenc.MakeIndexKeyPrefix( - keys.SystemSQLCodec, tableDesc, tableDesc.GetPrimaryIndexID()) - primaryIndexKey, _, err := rowenc.EncodeIndexKey( - tableDesc, tableDesc.GetPrimaryIndex(), colIDtoRowIndex, values, primaryIndexKeyPrefix) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - // Add the default family suffix to the key. - primaryIndexKey = keys.MakeFamilyKey(primaryIndexKey, uint32(tableDesc.GetFamilies()[0].ID)) - - // Encode the second column values. The second column is encoded with - // a garbage colIDDiff. - valueBuf, err := rowenc.EncodeTableValue( - []byte(nil), tableDesc.PublicColumns()[1].GetID(), values[1], []byte(nil)) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - valueBuf, err = rowenc.EncodeTableValue(valueBuf, 1000, values[2], []byte(nil)) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // Construct the tuple for the family that is missing a column value, i.e. it is NULL. - var value roachpb.Value - value.SetTuple(valueBuf) - - // Overwrite the existing value. - if err := kvDB.Put(context.Background(), primaryIndexKey, &value); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // Run SCRUB and find the errors we created. - rows, err := db.Query(`EXPERIMENTAL SCRUB TABLE t.test WITH OPTIONS PHYSICAL`) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - defer rows.Close() - - results, err := sqlutils.GetScrubResultRows(rows) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } else if len(results) != 1 { - t.Fatalf("expected 1 result, got %d. got %#v", len(results), results) - } - - if result := results[0]; result.ErrorType != string(scrub.UnexpectedNullValueError) { - t.Fatalf("expected %q error, instead got: %s", - scrub.UnexpectedNullValueError, result.ErrorType) - } else if result.Database != "t" { - t.Fatalf("expected database %q, got %q", "t", result.Database) - } else if result.Table != "test" { - t.Fatalf("expected table %q, got %q", "test", result.Table) - } else if result.PrimaryKey != "(217)" { - t.Fatalf("expected primaryKey %q, got %q", "(217)", result.PrimaryKey) - } else if result.Repaired { - t.Fatalf("expected repaired %v, got %v", false, result.Repaired) - } else if !strings.Contains(result.Details, `"k": "217"`) { - t.Fatalf("expected error details to contain `%s`, got %s", `"k": "217"`, result.Details) - } else if !strings.Contains(result.Details, `"v": "314"`) { - t.Fatalf("expected error details to contain `%s`, got %s", `"v": "314"`, result.Details) - } else if !strings.Contains(result.Details, `"b": ""`) { - t.Fatalf("expected error details to contain `%s`, got %s", `"b": ""`, result.Details) - } -} - type expectedScrubResult struct { ErrorType string Database string diff --git a/pkg/sql/sem/builtins/BUILD.bazel b/pkg/sql/sem/builtins/BUILD.bazel index 9bb43e48cf40..c0478db1087e 100644 --- a/pkg/sql/sem/builtins/BUILD.bazel +++ b/pkg/sql/sem/builtins/BUILD.bazel @@ -59,6 +59,8 @@ go_library( "//pkg/sql/protoreflect", "//pkg/sql/roleoption", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", + "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/sql/sessiondatapb", @@ -93,7 +95,7 @@ go_library( "//pkg/util/ulid", "//pkg/util/unaccent", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_golang_geo//s1", "@com_github_knz_strtime//:strtime", diff --git a/pkg/sql/sem/builtins/aggregate_builtins.go b/pkg/sql/sem/builtins/aggregate_builtins.go index e60213e40cb5..ca347f68528e 100644 --- a/pkg/sql/sem/builtins/aggregate_builtins.go +++ b/pkg/sql/sem/builtins/aggregate_builtins.go @@ -18,7 +18,7 @@ import ( "strconv" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/geo" "github.com/cockroachdb/cockroach/pkg/geo/geopb" "github.com/cockroachdb/cockroach/pkg/geo/geos" @@ -2004,21 +2004,21 @@ func (a *regressionAccumulatorDecimalBase) add( a.syy.Set(&a.tmpSyy) a.sxy.Set(&a.tmpSxy) - size := int64(tree.SizeOfDecimal(&a.n) + - tree.SizeOfDecimal(&a.sx) + - tree.SizeOfDecimal(&a.sxx) + - tree.SizeOfDecimal(&a.sy) + - tree.SizeOfDecimal(&a.syy) + - tree.SizeOfDecimal(&a.sxy) + - tree.SizeOfDecimal(&a.tmpX) + - tree.SizeOfDecimal(&a.tmpY) + - tree.SizeOfDecimal(&a.scale) + - tree.SizeOfDecimal(&a.tmpN) + - tree.SizeOfDecimal(&a.tmpSx) + - tree.SizeOfDecimal(&a.tmpSxx) + - tree.SizeOfDecimal(&a.tmpSy) + - tree.SizeOfDecimal(&a.tmpSyy) + - tree.SizeOfDecimal(&a.tmpSxy)) + size := int64(a.n.Size() + + a.sx.Size() + + a.sxx.Size() + + a.sy.Size() + + a.syy.Size() + + a.sxy.Size() + + a.tmpX.Size() + + a.tmpY.Size() + + a.scale.Size() + + a.tmpN.Size() + + a.tmpSx.Size() + + a.tmpSxx.Size() + + a.tmpSy.Size() + + a.tmpSyy.Size() + + a.tmpSxy.Size()) if err := a.updateMemoryUsage(ctx, size); err != nil { return err } @@ -2687,7 +2687,7 @@ func (a *intSumAggregate) Add(ctx context.Context, datum tree.Datum, _ ...tree.D if err != nil { return err } - if err := a.updateMemoryUsage(ctx, int64(tree.SizeOfDecimal(&a.decSum))); err != nil { + if err := a.updateMemoryUsage(ctx, int64(a.decSum.Size())); err != nil { return err } } @@ -2755,7 +2755,7 @@ func (a *decimalSumAggregate) Add(ctx context.Context, datum tree.Datum, _ ...tr return err } - if err := a.updateMemoryUsage(ctx, int64(tree.SizeOfDecimal(&a.sum))); err != nil { + if err := a.updateMemoryUsage(ctx, int64(a.sum.Size())); err != nil { return err } @@ -3058,11 +3058,11 @@ func (a *decimalSqrDiffAggregate) Add( a.ed.Sub(&a.tmp, d, &a.mean) a.ed.Add(&a.sqrDiff, &a.sqrDiff, a.ed.Mul(&a.delta, &a.delta, &a.tmp)) - size := int64(tree.SizeOfDecimal(&a.count) + - tree.SizeOfDecimal(&a.mean) + - tree.SizeOfDecimal(&a.sqrDiff) + - tree.SizeOfDecimal(&a.delta) + - tree.SizeOfDecimal(&a.tmp)) + size := int64(a.count.Size() + + a.mean.Size() + + a.sqrDiff.Size() + + a.delta.Size() + + a.tmp.Size()) if err := a.updateMemoryUsage(ctx, size); err != nil { return err } @@ -3257,13 +3257,13 @@ func (a *decimalSumSqrDiffsAggregate) Add( // Update running mean. a.ed.Add(&a.mean, &a.mean, &a.tmp) - size := int64(tree.SizeOfDecimal(&a.count) + - tree.SizeOfDecimal(&a.mean) + - tree.SizeOfDecimal(&a.sqrDiff) + - tree.SizeOfDecimal(&a.tmpCount) + - tree.SizeOfDecimal(&a.tmpMean) + - tree.SizeOfDecimal(&a.delta) + - tree.SizeOfDecimal(&a.tmp)) + size := int64(a.count.Size() + + a.mean.Size() + + a.sqrDiff.Size() + + a.tmpCount.Size() + + a.tmpMean.Size() + + a.delta.Size() + + a.tmp.Size()) if err := a.updateMemoryUsage(ctx, size); err != nil { return err } diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go index 8b3ff8cd6707..b5fbad1e0c5c 100644 --- a/pkg/sql/sem/builtins/builtins.go +++ b/pkg/sql/sem/builtins/builtins.go @@ -60,6 +60,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/protoreflect" "github.com/cockroachdb/cockroach/pkg/sql/roleoption" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" @@ -341,6 +342,45 @@ var builtins = map[string]builtinDefinition{ ), ), + "prettify_statement": makeBuiltin(tree.FunctionProperties{Category: categoryString}, + stringOverload1( + func(evalCtx *tree.EvalContext, s string) (tree.Datum, error) { + formattedStmt, err := prettyStatement(tree.DefaultPrettyCfg(), s) + if err != nil { + return nil, err + } + return tree.NewDString(formattedStmt), nil + }, + types.String, + "Prettifies a statement using a the default pretty-printing config.", + tree.VolatilityImmutable, + ), + tree.Overload{ + Types: tree.ArgTypes{ + {"statement", types.String}, + {"line_width", types.Int}, + {"align_mode", types.Int}, + {"case_mode", types.Int}, + }, + ReturnType: tree.FixedReturnType(types.String), + Fn: func(_ *tree.EvalContext, args tree.Datums) (tree.Datum, error) { + stmt := string(tree.MustBeDString(args[0])) + lineWidth := int(tree.MustBeDInt(args[1])) + alignMode := int(tree.MustBeDInt(args[2])) + caseMode := int(tree.MustBeDInt(args[3])) + formattedStmt, err := prettyStatementCustomConfig(stmt, lineWidth, alignMode, caseMode) + if err != nil { + return nil, err + } + return tree.NewDString(formattedStmt), nil + }, + Info: "Prettifies a statement using a user-configured pretty-printing config.\n" + + "Align mode values range from 0 - 3, representing no, partial, full, and extra alignment respectively.\n" + + "Case mode values range between 0 - 1, representing lower casing and upper casing respectively.", + Volatility: tree.VolatilityImmutable, + }, + ), + "substr": substringImpls, "substring": substringImpls, @@ -4022,7 +4062,7 @@ value if you rely on the HLC for accuracy.`, var out []byte for i, arg := range args { var err error - out, err = rowenc.EncodeTableKey(out, arg, encoding.Ascending) + out, err = keyside.Encode(out, arg, encoding.Ascending) if err != nil { return nil, pgerror.Newf( pgcode.DatatypeMismatch, @@ -4749,8 +4789,8 @@ value if you rely on the HLC for accuracy.`, colMap.Set(id, i) } // Finally, encode the index key using the provided datums. - keyPrefix := rowenc.MakeIndexKeyPrefix(ctx.Codec, tableDesc, index.GetID()) - res, _, err := rowenc.EncodePartialIndexKey(tableDesc, index, len(datums), colMap, datums, keyPrefix) + keyPrefix := rowenc.MakeIndexKeyPrefix(ctx.Codec, tableDesc.GetID(), index.GetID()) + res, _, err := rowenc.EncodePartialIndexKey(index, len(datums), colMap, datums, keyPrefix) if err != nil { return nil, err } @@ -8835,3 +8875,34 @@ func parseContextFromDateStyle( tree.NewParseTimeContextOptionDateStyle(ds), ), nil } + +func prettyStatementCustomConfig( + stmt string, lineWidth int, alignMode int, caseSetting int, +) (string, error) { + cfg := tree.DefaultPrettyCfg() + cfg.LineWidth = lineWidth + cfg.Align = tree.PrettyAlignMode(alignMode) + caseMode := tree.CaseMode(caseSetting) + if caseMode == tree.LowerCase { + cfg.Case = func(str string) string { return strings.ToLower(str) } + } else if caseMode == tree.UpperCase { + cfg.Case = func(str string) string { return strings.ToUpper(str) } + } + return prettyStatement(cfg, stmt) +} + +func prettyStatement(p tree.PrettyCfg, stmt string) (string, error) { + stmts, err := parser.Parse(stmt) + if err != nil { + return "", err + } + var formattedStmt strings.Builder + for idx := range stmts { + formattedStmt.WriteString(p.Pretty(stmts[idx].AST)) + if len(stmts) > 1 { + formattedStmt.WriteString(";") + } + formattedStmt.WriteString("\n") + } + return formattedStmt.String(), nil +} diff --git a/pkg/sql/sem/builtins/math_builtins.go b/pkg/sql/sem/builtins/math_builtins.go index b439a469e953..05afd4baa7cc 100644 --- a/pkg/sql/sem/builtins/math_builtins.go +++ b/pkg/sql/sem/builtins/math_builtins.go @@ -14,7 +14,7 @@ import ( "math" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" diff --git a/pkg/sql/sem/builtins/pg_builtins.go b/pkg/sql/sem/builtins/pg_builtins.go index 6c1de28e7025..53542f056f71 100644 --- a/pkg/sql/sem/builtins/pg_builtins.go +++ b/pkg/sql/sem/builtins/pg_builtins.go @@ -24,12 +24,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/ipaddr" "github.com/cockroachdb/errors" "github.com/lib/pq/oid" @@ -492,50 +491,8 @@ func getTableNameForArg(ctx *tree.EvalContext, arg tree.Datum) (*tree.TableName, } } -// priv represents a privilege parsed from an Access Privilege Inquiry -// Function's privilege string argument. The structure is distinct from -// privilege.Kind due to differences in how PostgreSQL and CockroachDB -// handle the GRANT privilege. -// -// In PostgreSQL, each privilege (SELECT, INSERT, etc.) has an optional -// "grant option" bit associated with it. A role can only grant a privilege -// on an object to others if it is the owner of the object or if it itself -// holds that privilege **with grant option** on the object. With this -// construction, there is no need for a separate GRANT privilege. -// -// In CockroachDB, there exists a distinct GRANT privilege and no concept of -// a "grant option" on other privileges. A role can only grant a privilege -// on an object to others if it is the owner of the object or if it itself -// holds both (1) that privilege on the object and (2) the GRANT privilege -// on the object. However, this behavior may change in the future, see -// https://github.com/cockroachdb/cockroach/issues/67410. -// -// For the sake of parsing the privilege argument of these builtins, it is -// helpful to represent privileges more closely to how they are represented -// in PostgreSQL. This allows us to represent a single priv with a fake -// "grant option", which is later computed as a conjunction between that -// priv's kind and the GRANT privilege, while also computing a disjunction -// across all comma-separated privilege strings. -// -// For instance, consider the following argument string: -// -// arg = "SELECT, UPDATE WITH GRANT OPTION, DELETE" -// -// This would be represented as the following list of priv structs: -// -// privs = []priv{{SELECT, false}, {UPDATE, true}, {DELETE, false}} -// -// Which would be evaluated as: -// -// res = check(SELECT) || (check(UPDATE) && check(GRANT)) || check(DELETE) -// -type priv struct { - kind privilege.Kind - grantOption bool -} - -// privMap maps a privilege string to a priv. -type privMap map[string]priv +// privMap maps a privilege string to a Privilege. +type privMap map[string]privilege.Privilege // parsePrivilegeStr recognizes privilege strings for has_foo_privilege // builtins, which are known as Access Privilege Inquiry Functions. @@ -544,10 +501,10 @@ type privMap map[string]priv // names, producing a list of privileges. It is liberal about whitespace between // items, not so much about whitespace within items. The allowed privilege names // and their corresponding privileges are given as a privMap. -func parsePrivilegeStr(arg tree.Datum, m privMap) ([]priv, error) { +func parsePrivilegeStr(arg tree.Datum, m privMap) ([]privilege.Privilege, error) { argStr := string(tree.MustBeDString(arg)) privStrs := strings.Split(argStr, ",") - res := make([]priv, len(privStrs)) + res := make([]privilege.Privilege, len(privStrs)) for i, privStr := range privStrs { // Privileges are case-insensitive. privStr = strings.ToUpper(privStr) @@ -568,7 +525,7 @@ func parsePrivilegeStr(arg tree.Datum, m privMap) ([]priv, error) { // If any of the checks return True or NULL, the function short-circuits with // that result. Otherwise, it returns False. func runPrivilegeChecks( - privs []priv, check func(privilege.Kind) (tree.Datum, error), + privs []privilege.Privilege, check func(privilege.Privilege) (tree.Datum, error), ) (tree.Datum, error) { for _, p := range privs { d, err := runSinglePrivilegeCheck(p, check) @@ -583,22 +540,24 @@ func runPrivilegeChecks( } // runSinglePrivilegeCheck runs the provided check function for the privilege. -// If the privilege has the grantOption flag set to true, it also runs the +// If the privilege has the GrantOption flag set to true, it also runs the // provided function with the GRANT privilege and only returns True if both -// calls returns True. See the comment on priv for justification. +// calls returns True. See the comment on Privilege for justification. func runSinglePrivilegeCheck( - priv priv, check func(privilege.Kind) (tree.Datum, error), + priv privilege.Privilege, check func(privilege.Privilege) (tree.Datum, error), ) (tree.Datum, error) { - d, err := check(priv.kind) + d, err := check(priv) if err != nil { return nil, err } switch d { case tree.DBoolFalse, tree.DNull: case tree.DBoolTrue: - if priv.grantOption { - // grantOption is set, so AND the result with check(GRANT). - d, err = check(privilege.GRANT) + // todo remove this check after migrating from evalPrivilegeCheck to hasPrivilege + // https://github.com/cockroachdb/cockroach/issues/66173 + if priv.GrantOption { + // GrantOption is set, so AND the result with check(GRANT). + d, err = check(privilege.Privilege{Kind: privilege.GRANT}) if err != nil { return nil, err } @@ -1488,20 +1447,21 @@ SELECT description } privs, err := parsePrivilegeStr(args[1], privMap{ - "SELECT": {privilege.SELECT, false}, - "SELECT WITH GRANT OPTION": {privilege.SELECT, true}, - "INSERT": {privilege.INSERT, false}, - "INSERT WITH GRANT OPTION": {privilege.INSERT, true}, - "UPDATE": {privilege.UPDATE, false}, - "UPDATE WITH GRANT OPTION": {privilege.UPDATE, true}, - "REFERENCES": {privilege.SELECT, false}, - "REFERENCES WITH GRANT OPTION": {privilege.SELECT, true}, + "SELECT": {Kind: privilege.SELECT}, + "SELECT WITH GRANT OPTION": {Kind: privilege.SELECT, GrantOption: true}, + "INSERT": {Kind: privilege.INSERT}, + "INSERT WITH GRANT OPTION": {Kind: privilege.INSERT, GrantOption: true}, + "UPDATE": {Kind: privilege.UPDATE}, + "UPDATE WITH GRANT OPTION": {Kind: privilege.UPDATE, GrantOption: true}, + "REFERENCES": {Kind: privilege.SELECT}, + "REFERENCES WITH GRANT OPTION": {Kind: privilege.SELECT, GrantOption: true}, }) if err != nil { return nil, err } - return runPrivilegeChecks(privs, func(priv privilege.Kind) (tree.Datum, error) { - return hasPrivilege(ctx, specifier, user, priv) + return runPrivilegeChecks(privs, func(priv privilege.Privilege) (tree.Datum, error) { + ret, err := ctx.Planner.HasPrivilege(ctx.Context, specifier, user, priv) + return handleTableHasPrivilegeError(specifier, ret, err) }) }, ), @@ -1511,40 +1471,28 @@ SELECT description argTypeOpts{{"table", strOrOidTypes}, {"column", []*types.T{types.String, types.Int}}}, func(ctx *tree.EvalContext, args tree.Datums, user security.SQLUsername) (tree.Datum, error) { tableArg := tree.UnwrapDatum(ctx, args[0]) - specifier, err := tableHasPrivilegeSpecifier(tableArg) + colArg := tree.UnwrapDatum(ctx, args[1]) + specifier, err := columnHasPrivilegeSpecifier(tableArg, colArg) if err != nil { return nil, err } - // Note that we only verify the column exists for has_column_privilege. - colArg := tree.UnwrapDatum(ctx, args[1]) - switch t := colArg.(type) { - case *tree.DString: - // When colArg is a string, it specifies the attribute name. - n := tree.Name(*t) - specifier.ColumnName = &n - case *tree.DInt: - // When colArg is an integer, it specifies the attribute number. - attNum := uint32(*t) - specifier.ColumnAttNum = &attNum - default: - return nil, errors.AssertionFailedf("unexpected arg type %T", t) - } privs, err := parsePrivilegeStr(args[2], privMap{ - "SELECT": {privilege.SELECT, false}, - "SELECT WITH GRANT OPTION": {privilege.SELECT, true}, - "INSERT": {privilege.INSERT, false}, - "INSERT WITH GRANT OPTION": {privilege.INSERT, true}, - "UPDATE": {privilege.UPDATE, false}, - "UPDATE WITH GRANT OPTION": {privilege.UPDATE, true}, - "REFERENCES": {privilege.SELECT, false}, - "REFERENCES WITH GRANT OPTION": {privilege.SELECT, true}, + "SELECT": {Kind: privilege.SELECT}, + "SELECT WITH GRANT OPTION": {Kind: privilege.SELECT, GrantOption: true}, + "INSERT": {Kind: privilege.INSERT}, + "INSERT WITH GRANT OPTION": {Kind: privilege.INSERT, GrantOption: true}, + "UPDATE": {Kind: privilege.UPDATE}, + "UPDATE WITH GRANT OPTION": {Kind: privilege.UPDATE, GrantOption: true}, + "REFERENCES": {Kind: privilege.SELECT}, + "REFERENCES WITH GRANT OPTION": {Kind: privilege.SELECT, GrantOption: true}, }) if err != nil { return nil, err } - return runPrivilegeChecks(privs, func(priv privilege.Kind) (tree.Datum, error) { - return hasPrivilege(ctx, specifier, user, priv) + return runPrivilegeChecks(privs, func(priv privilege.Privilege) (tree.Datum, error) { + ret, err := ctx.Planner.HasPrivilege(ctx.Context, specifier, user, priv) + return handleTableHasPrivilegeError(specifier, ret, err) }) }, ), @@ -1553,44 +1501,30 @@ SELECT description "database", argTypeOpts{{"database", strOrOidTypes}}, func(ctx *tree.EvalContext, args tree.Datums, user security.SQLUsername) (tree.Datum, error) { - dbArg := tree.UnwrapDatum(ctx, args[0]) - db, err := getNameForArg(ctx, dbArg, "pg_database", "datname") + + databaseArg := tree.UnwrapDatum(ctx, args[0]) + specifier, err := databaseHasPrivilegeSpecifier(databaseArg) if err != nil { return nil, err } - retNull := false - if db == "" { - switch dbArg.(type) { - case *tree.DString: - return nil, pgerror.Newf(pgcode.InvalidCatalogName, - "database %s does not exist", dbArg) - case *tree.DOid: - // Postgres returns NULL if no matching language is found - // when given an OID. - retNull = true - } - } privs, err := parsePrivilegeStr(args[1], privMap{ - "CREATE": {privilege.CREATE, false}, - "CREATE WITH GRANT OPTION": {privilege.CREATE, true}, - "CONNECT": {privilege.CONNECT, false}, - "CONNECT WITH GRANT OPTION": {privilege.CONNECT, true}, - "TEMPORARY": {privilege.CREATE, false}, - "TEMPORARY WITH GRANT OPTION": {privilege.CREATE, true}, - "TEMP": {privilege.CREATE, false}, - "TEMP WITH GRANT OPTION": {privilege.CREATE, true}, + "CREATE": {Kind: privilege.CREATE}, + "CREATE WITH GRANT OPTION": {Kind: privilege.CREATE, GrantOption: true}, + "CONNECT": {Kind: privilege.CONNECT}, + "CONNECT WITH GRANT OPTION": {Kind: privilege.CONNECT, GrantOption: true}, + "TEMPORARY": {Kind: privilege.CREATE}, + "TEMPORARY WITH GRANT OPTION": {Kind: privilege.CREATE, GrantOption: true}, + "TEMP": {Kind: privilege.CREATE}, + "TEMP WITH GRANT OPTION": {Kind: privilege.CREATE, GrantOption: true}, }) if err != nil { return nil, err } - if retNull { - return tree.DNull, nil - } - databasePrivilegePred := fmt.Sprintf("database_name = '%s'", db) - return runPrivilegeChecks(privs, func(priv privilege.Kind) (tree.Datum, error) { - return evalPrivilegeCheck(ctx, `"".crdb_internal`, "cluster_database_privileges", - user, databasePrivilegePred, priv) + + return runPrivilegeChecks(privs, func(priv privilege.Privilege) (tree.Datum, error) { + ret, err := ctx.Planner.HasPrivilege(ctx.Context, specifier, user, priv) + return handleDatabaseHasPrivilegeError(specifier, ret, err) }) }, ), @@ -1618,8 +1552,8 @@ SELECT description } privs, err := parsePrivilegeStr(args[1], privMap{ - "USAGE": {privilege.USAGE, false}, - "USAGE WITH GRANT OPTION": {privilege.USAGE, true}, + "USAGE": {Kind: privilege.USAGE}, + "USAGE WITH GRANT OPTION": {Kind: privilege.USAGE, GrantOption: true}, }) if err != nil { return nil, err @@ -1667,8 +1601,8 @@ SELECT description // TODO(nvanbenschoten): this privilege is incorrect, but we don't // currently have an EXECUTE privilege and we aren't even checking // this down below, so it's fine for now. - "EXECUTE": {privilege.USAGE, false}, - "EXECUTE WITH GRANT OPTION": {privilege.USAGE, true}, + "EXECUTE": {Kind: privilege.USAGE}, + "EXECUTE WITH GRANT OPTION": {Kind: privilege.USAGE, GrantOption: true}, }) if err != nil { return nil, err @@ -1705,8 +1639,8 @@ SELECT description } privs, err := parsePrivilegeStr(args[1], privMap{ - "USAGE": {privilege.USAGE, false}, - "USAGE WITH GRANT OPTION": {privilege.USAGE, true}, + "USAGE": {Kind: privilege.USAGE}, + "USAGE WITH GRANT OPTION": {Kind: privilege.USAGE, GrantOption: true}, }) if err != nil { return nil, err @@ -1747,10 +1681,10 @@ SELECT description } privs, err := parsePrivilegeStr(args[1], privMap{ - "CREATE": {privilege.CREATE, false}, - "CREATE WITH GRANT OPTION": {privilege.CREATE, true}, - "USAGE": {privilege.USAGE, false}, - "USAGE WITH GRANT OPTION": {privilege.USAGE, true}, + "CREATE": {Kind: privilege.CREATE}, + "CREATE WITH GRANT OPTION": {Kind: privilege.CREATE, GrantOption: true}, + "USAGE": {Kind: privilege.USAGE}, + "USAGE WITH GRANT OPTION": {Kind: privilege.USAGE, GrantOption: true}, }) if err != nil { return nil, err @@ -1760,9 +1694,9 @@ SELECT description } pred := fmt.Sprintf("table_catalog = '%s' AND table_schema = '%s'", ctx.SessionData().Database, schema) - return runPrivilegeChecks(privs, func(priv privilege.Kind) (tree.Datum, error) { + return runPrivilegeChecks(privs, func(priv privilege.Privilege) (tree.Datum, error) { return evalPrivilegeCheck(ctx, "information_schema", "schema_privileges", - user, pred, priv) + user, pred, priv.Kind) }) }, ), @@ -1805,12 +1739,12 @@ SELECT description privs, err := parsePrivilegeStr(args[1], privMap{ // Sequences and other table objects cannot be given a USAGE privilege, // so we check for SELECT here instead. See privilege.TablePrivileges. - "USAGE": {privilege.SELECT, false}, - "USAGE WITH GRANT OPTION": {privilege.SELECT, true}, - "SELECT": {privilege.SELECT, false}, - "SELECT WITH GRANT OPTION": {privilege.SELECT, true}, - "UPDATE": {privilege.UPDATE, false}, - "UPDATE WITH GRANT OPTION": {privilege.UPDATE, true}, + "USAGE": {Kind: privilege.SELECT}, + "USAGE WITH GRANT OPTION": {Kind: privilege.SELECT, GrantOption: true}, + "SELECT": {Kind: privilege.SELECT}, + "SELECT WITH GRANT OPTION": {Kind: privilege.SELECT, GrantOption: true}, + "UPDATE": {Kind: privilege.UPDATE}, + "UPDATE WITH GRANT OPTION": {Kind: privilege.UPDATE, GrantOption: true}, }) if err != nil { return nil, err @@ -1818,9 +1752,9 @@ SELECT description if retNull { return tree.DNull, nil } - return runPrivilegeChecks(privs, func(priv privilege.Kind) (tree.Datum, error) { + return runPrivilegeChecks(privs, func(priv privilege.Privilege) (tree.Datum, error) { return evalPrivilegeCheck(ctx, "information_schema", "table_privileges", - user, pred, priv) + user, pred, priv.Kind) }) }, ), @@ -1848,8 +1782,8 @@ SELECT description } privs, err := parsePrivilegeStr(args[1], privMap{ - "USAGE": {privilege.USAGE, false}, - "USAGE WITH GRANT OPTION": {privilege.USAGE, true}, + "USAGE": {Kind: privilege.USAGE}, + "USAGE WITH GRANT OPTION": {Kind: privilege.USAGE, GrantOption: true}, }) if err != nil { return nil, err @@ -1874,28 +1808,29 @@ SELECT description } privs, err := parsePrivilegeStr(args[1], privMap{ - "SELECT": {privilege.SELECT, false}, - "SELECT WITH GRANT OPTION": {privilege.SELECT, true}, - "INSERT": {privilege.INSERT, false}, - "INSERT WITH GRANT OPTION": {privilege.INSERT, true}, - "UPDATE": {privilege.UPDATE, false}, - "UPDATE WITH GRANT OPTION": {privilege.UPDATE, true}, - "DELETE": {privilege.DELETE, false}, - "DELETE WITH GRANT OPTION": {privilege.DELETE, true}, - "TRUNCATE": {privilege.DELETE, false}, - "TRUNCATE WITH GRANT OPTION": {privilege.DELETE, true}, - "REFERENCES": {privilege.SELECT, false}, - "REFERENCES WITH GRANT OPTION": {privilege.SELECT, true}, - "TRIGGER": {privilege.CREATE, false}, - "TRIGGER WITH GRANT OPTION": {privilege.CREATE, true}, - "RULE": {privilege.RULE, false}, - "RULE WITH GRANT OPTION": {privilege.RULE, true}, + "SELECT": {Kind: privilege.SELECT}, + "SELECT WITH GRANT OPTION": {Kind: privilege.SELECT, GrantOption: true}, + "INSERT": {Kind: privilege.INSERT}, + "INSERT WITH GRANT OPTION": {Kind: privilege.INSERT, GrantOption: true}, + "UPDATE": {Kind: privilege.UPDATE}, + "UPDATE WITH GRANT OPTION": {Kind: privilege.UPDATE, GrantOption: true}, + "DELETE": {Kind: privilege.DELETE}, + "DELETE WITH GRANT OPTION": {Kind: privilege.DELETE, GrantOption: true}, + "TRUNCATE": {Kind: privilege.DELETE}, + "TRUNCATE WITH GRANT OPTION": {Kind: privilege.DELETE, GrantOption: true}, + "REFERENCES": {Kind: privilege.SELECT}, + "REFERENCES WITH GRANT OPTION": {Kind: privilege.SELECT, GrantOption: true}, + "TRIGGER": {Kind: privilege.CREATE}, + "TRIGGER WITH GRANT OPTION": {Kind: privilege.CREATE, GrantOption: true}, + "RULE": {Kind: privilege.RULE}, + "RULE WITH GRANT OPTION": {Kind: privilege.RULE, GrantOption: true}, }) if err != nil { return nil, err } - return runPrivilegeChecks(privs, func(priv privilege.Kind) (tree.Datum, error) { - return hasPrivilege(ctx, specifier, user, priv) + return runPrivilegeChecks(privs, func(priv privilege.Privilege) (tree.Datum, error) { + ret, err := ctx.Planner.HasPrivilege(ctx.Context, specifier, user, priv) + return handleTableHasPrivilegeError(specifier, ret, err) }) }, ), @@ -1923,8 +1858,8 @@ SELECT description } privs, err := parsePrivilegeStr(args[1], privMap{ - "CREATE": {privilege.CREATE, false}, - "CREATE WITH GRANT OPTION": {privilege.CREATE, true}, + "CREATE": {Kind: privilege.CREATE}, + "CREATE WITH GRANT OPTION": {Kind: privilege.CREATE, GrantOption: true}, }) if err != nil { return nil, err @@ -1969,8 +1904,8 @@ SELECT description } privs, err := parsePrivilegeStr(args[1], privMap{ - "USAGE": {privilege.USAGE, false}, - "USAGE WITH GRANT OPTION": {privilege.USAGE, true}, + "USAGE": {Kind: privilege.USAGE}, + "USAGE WITH GRANT OPTION": {Kind: privilege.USAGE, GrantOption: true}, }) if err != nil { return nil, err @@ -2016,12 +1951,12 @@ SELECT description // the role are accessible (hasPrivsOfRole), CREATE to denote whether // the user is a member of the role (isMemberOfRole), and GRANT to // denote whether the user is an admin of the role (isAdminOfRole). - "USAGE": {privilege.USAGE, false}, - "MEMBER": {privilege.CREATE, false}, - "USAGE WITH GRANT OPTION": {privilege.GRANT, false}, - "USAGE WITH ADMIN OPTION": {privilege.GRANT, false}, - "MEMBER WITH GRANT OPTION": {privilege.GRANT, false}, - "MEMBER WITH ADMIN OPTION": {privilege.GRANT, false}, + "USAGE": {Kind: privilege.USAGE}, + "MEMBER": {Kind: privilege.CREATE}, + "USAGE WITH GRANT OPTION": {Kind: privilege.GRANT}, + "USAGE WITH ADMIN OPTION": {Kind: privilege.GRANT}, + "MEMBER WITH GRANT OPTION": {Kind: privilege.GRANT}, + "MEMBER WITH ADMIN OPTION": {Kind: privilege.GRANT}, }) if err != nil { return nil, err @@ -2029,8 +1964,8 @@ SELECT description if retNull { return tree.DNull, nil } - return runPrivilegeChecks(privs, func(priv privilege.Kind) (tree.Datum, error) { - switch priv { + return runPrivilegeChecks(privs, func(priv privilege.Privilege) (tree.Datum, error) { + switch priv.Kind { case privilege.USAGE: return hasPrivsOfRole(ctx, user, role) case privilege.CREATE: @@ -2165,7 +2100,7 @@ SELECT description Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) { var totalSize int for _, arg := range args { - encodeTableValue, err := rowenc.EncodeTableValue(nil, descpb.ColumnID(encoding.NoColumnID), arg, nil) + encodeTableValue, err := valueside.Encode(nil, valueside.NoColumnID, arg, nil) if err != nil { return tree.DNull, err } @@ -2310,7 +2245,7 @@ SELECT description if typmod != -1 { // This logics matches the postgres implementation // of how to calculate the precision based on the typmod - //https://github.com/postgres/postgres/blob/d84ffffe582b8e036a14c6bc2378df29167f3a00/src/backend/catalog/information_schema.sql#L109 + // https://github.com/postgres/postgres/blob/d84ffffe582b8e036a14c6bc2378df29167f3a00/src/backend/catalog/information_schema.sql#L109 return tree.NewDInt(((typmod - 4) >> 16) & 65535), nil } return tree.DNull, nil @@ -2455,27 +2390,19 @@ SELECT description return r[0], nil } -// hasPrivilege returns whether the given specifier has the given privilege. -func hasPrivilege( - ctx *tree.EvalContext, - specifier tree.HasPrivilegeSpecifier, - user security.SQLUsername, - kind privilege.Kind, -) (tree.Datum, error) { - ret, err := ctx.Planner.HasPrivilege( - ctx.Context, - specifier, - user, - kind, - ) - if err != nil { - // When an OID is specified and the relation is not found, we return NULL. - if specifier.TableOID != nil && sqlerrors.IsUndefinedRelationError(err) { - return tree.DNull, nil - } - return nil, err +func databaseHasPrivilegeSpecifier(databaseArg tree.Datum) (tree.HasPrivilegeSpecifier, error) { + var specifier tree.HasPrivilegeSpecifier + switch t := databaseArg.(type) { + case *tree.DString: + s := string(*t) + specifier.DatabaseName = &s + case *tree.DOid: + oid := oid.Oid(t.DInt) + specifier.DatabaseOID = &oid + default: + return specifier, errors.AssertionFailedf("unknown privilege specifier: %#v", databaseArg) } - return tree.MakeDBool(tree.DBool(ret)), nil + return specifier, nil } // tableHasPrivilegeSpecifier returns the HasPrivilegeSpecifier for @@ -2495,6 +2422,53 @@ func tableHasPrivilegeSpecifier(tableArg tree.Datum) (tree.HasPrivilegeSpecifier return specifier, nil } +// Note that we only verify the column exists for has_column_privilege. +func columnHasPrivilegeSpecifier( + tableArg tree.Datum, colArg tree.Datum, +) (tree.HasPrivilegeSpecifier, error) { + specifier, err := tableHasPrivilegeSpecifier(tableArg) + if err != nil { + return specifier, err + } + switch t := colArg.(type) { + case *tree.DString: + n := tree.Name(*t) + specifier.ColumnName = &n + case *tree.DInt: + attNum := uint32(*t) + specifier.ColumnAttNum = &attNum + default: + return specifier, errors.AssertionFailedf("unexpected arg type %T", t) + } + return specifier, nil +} + +func handleDatabaseHasPrivilegeError( + specifier tree.HasPrivilegeSpecifier, ret bool, err error, +) (tree.Datum, error) { + if err != nil { + // When a DatabaseOID is specified and the relation is not found, we return NULL. + if specifier.DatabaseOID != nil && sqlerrors.IsUndefinedDatabaseError(err) { + return tree.DNull, nil + } + return nil, err + } + return tree.MakeDBool(tree.DBool(ret)), nil +} + +func handleTableHasPrivilegeError( + specifier tree.HasPrivilegeSpecifier, ret bool, err error, +) (tree.Datum, error) { + if err != nil { + // When a TableOID is specified and the relation is not found, we return NULL. + if specifier.TableOID != nil && sqlerrors.IsUndefinedRelationError(err) { + return tree.DNull, nil + } + return nil, err + } + return tree.MakeDBool(tree.DBool(ret)), nil +} + func pgTrueTypImpl(attrField, typField string, retType *types.T) builtinDefinition { return makeBuiltin(defProps(), tree.Overload{ diff --git a/pkg/sql/sem/builtins/replication_builtins.go b/pkg/sql/sem/builtins/replication_builtins.go index 1e4dd805dd16..7811ea767f1c 100644 --- a/pkg/sql/sem/builtins/replication_builtins.go +++ b/pkg/sql/sem/builtins/replication_builtins.go @@ -15,6 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/streaming" "github.com/cockroachdb/cockroach/pkg/util/hlc" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" ) func initReplicationBuiltins() { @@ -112,7 +113,7 @@ var replicationBuiltins = map[string]builtinDefinition{ {"stream_id", types.Int}, {"frontier_ts", types.String}, }, - ReturnType: tree.FixedReturnType(types.String), + ReturnType: tree.FixedReturnType(types.Bytes), Fn: func(evalCtx *tree.EvalContext, args tree.Datums) (tree.Datum, error) { mgr, err := streaming.GetReplicationStreamManager(evalCtx) if err != nil { @@ -123,11 +124,15 @@ var replicationBuiltins = map[string]builtinDefinition{ return nil, err } streamID := streaming.StreamID(int(tree.MustBeDInt(args[0]))) - pts, err := mgr.UpdateReplicationStreamProgress(evalCtx, streamID, frontier, evalCtx.Txn) + sps, err := mgr.UpdateReplicationStreamProgress(evalCtx, streamID, frontier, evalCtx.Txn) if err != nil { return nil, err } - return tree.NewDString(pts.String()), nil + rawStatus, err := protoutil.Marshal(&sps) + if err != nil { + return nil, err + } + return tree.NewDBytes(tree.DBytes(rawStatus)), nil }, Info: "This function can be used on the consumer side to heartbeat its replication progress to " + "a replication stream in the source cluster. The returns a StreamReplicationStatus message " + @@ -165,4 +170,38 @@ var replicationBuiltins = map[string]builtinDefinition{ tree.VolatilityVolatile, ), ), + + "crdb_internal.replication_stream_spec": makeBuiltin( + tree.FunctionProperties{ + Category: categoryStreamIngestion, + DistsqlBlocklist: true, + }, + tree.Overload{ + Types: tree.ArgTypes{ + {"stream_id", types.Int}, + }, + ReturnType: tree.FixedReturnType(types.Bytes), + Fn: func(evalCtx *tree.EvalContext, args tree.Datums) (tree.Datum, error) { + mgr, err := streaming.GetReplicationStreamManager(evalCtx) + if err != nil { + return nil, err + } + + streamID := int64(tree.MustBeDInt(args[0])) + spec, err := mgr.GetReplicationStreamSpec(evalCtx, evalCtx.Txn, streaming.StreamID(streamID)) + if err != nil { + return nil, err + } + rawSpec, err := protoutil.Marshal(spec) + if err != nil { + return nil, err + } + return tree.NewDBytes(tree.DBytes(rawSpec)), err + }, + Info: "This function can be used on the consumer side to get a replication stream specification " + + "for the specified stream starting from the specified 'start_from' timestamp. The consumer will " + + "later call 'stream_partition' to a partition with the spec to start streaming.", + Volatility: tree.VolatilityVolatile, + }, + ), } diff --git a/pkg/sql/sem/builtins/window_frame_builtins.go b/pkg/sql/sem/builtins/window_frame_builtins.go index f54138605d8c..0eff809d0bf0 100644 --- a/pkg/sql/sem/builtins/window_frame_builtins.go +++ b/pkg/sql/sem/builtins/window_frame_builtins.go @@ -15,7 +15,7 @@ import ( "fmt" "strings" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/duration" "github.com/cockroachdb/cockroach/pkg/util/ring" diff --git a/pkg/sql/sem/catid/BUILD.bazel b/pkg/sql/sem/catid/BUILD.bazel new file mode 100644 index 000000000000..f45d54421ea7 --- /dev/null +++ b/pkg/sql/sem/catid/BUILD.bazel @@ -0,0 +1,8 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "catid", + srcs = ["ids.go"], + importpath = "github.com/cockroachdb/cockroach/pkg/sql/sem/catid", + visibility = ["//visibility:public"], +) diff --git a/pkg/sql/sem/catid/ids.go b/pkg/sql/sem/catid/ids.go new file mode 100644 index 000000000000..ef1c3f95a19b --- /dev/null +++ b/pkg/sql/sem/catid/ids.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Package catid is a low-level package exporting ID types. +package catid + +// DescID is a custom type for {Database,Table}Descriptor IDs. +type DescID uint32 + +// SafeValue implements the redact.SafeValue interface. +func (DescID) SafeValue() {} + +// ColumnID is a custom type for Column IDs. +type ColumnID uint32 + +// SafeValue implements the redact.SafeValue interface. +func (ColumnID) SafeValue() {} + +// FamilyID is a custom type for ColumnFamilyDescriptor IDs. +type FamilyID uint32 + +// SafeValue implements the redact.SafeValue interface. +func (FamilyID) SafeValue() {} + +// IndexID is a custom type for IndexDescriptor IDs. +type IndexID uint32 + +// SafeValue implements the redact.SafeValue interface. +func (IndexID) SafeValue() {} diff --git a/pkg/sql/sem/tree/BUILD.bazel b/pkg/sql/sem/tree/BUILD.bazel index fea320bbabc3..b2a614beff07 100644 --- a/pkg/sql/sem/tree/BUILD.bazel +++ b/pkg/sql/sem/tree/BUILD.bazel @@ -41,6 +41,7 @@ go_library( "create.go", "data_placement.go", "datum.go", + "datum_alloc.go", "decimal.go", "delete.go", "discard.go", @@ -138,6 +139,7 @@ go_library( "//pkg/sql/pgwire/pgnotice", "//pkg/sql/privilege", "//pkg/sql/roleoption", + "//pkg/sql/sem/catid", "//pkg/sql/sessiondata", "//pkg/sql/sessiondatapb", "//pkg/sql/sqlliveness", @@ -166,7 +168,7 @@ go_library( "//pkg/util/tracing", "//pkg/util/uint128", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_redact//:redact", "@com_github_google_go_cmp//cmp", @@ -255,8 +257,9 @@ go_test( "//pkg/util/timetz", "//pkg/util/timeutil", "//pkg/util/timeutil/pgdate", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_datadriven//:datadriven", + "@com_github_cockroachdb_errors//:errors", "@com_github_lib_pq//oid", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", diff --git a/pkg/sql/sem/tree/as_of.go b/pkg/sql/sem/tree/as_of.go index 55ee64aa7dbe..f399f410d37c 100644 --- a/pkg/sql/sem/tree/as_of.go +++ b/pkg/sql/sem/tree/as_of.go @@ -17,7 +17,7 @@ import ( "strings" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" diff --git a/pkg/sql/sem/tree/cast.go b/pkg/sql/sem/tree/cast.go index da7082315ed0..671eb7a4a5e0 100644 --- a/pkg/sql/sem/tree/cast.go +++ b/pkg/sql/sem/tree/cast.go @@ -12,13 +12,12 @@ package tree import ( "math" - "math/big" "strconv" "strings" "time" "unicode/utf8" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/geo" "github.com/cockroachdb/cockroach/pkg/geo/geopb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" @@ -67,6 +66,20 @@ const ( CastContextImplicit ) +// String returns the representation of CastContext as a string. +func (cc CastContext) String() string { + switch cc { + case CastContextExplicit: + return "explicit" + case CastContextAssignment: + return "assignment" + case CastContextImplicit: + return "implicit" + default: + return "invalid" + } +} + // contextOrigin indicates the source of information for a cast's maximum // context (see cast.maxContext below). It is only used to annotate entries in // castMap and to perform assertions on cast entries in the init function. It @@ -90,10 +103,10 @@ const ( // [1] https://www.postgresql.org/docs/13/catalog-pg-cast.html#CATALOG-PG-CAST // [2] https://www.postgresql.org/docs/13/sql-createcast.html#SQL-CREATECAST-NOTES contextOriginAutomaticIOConversion - // contextLegacyConversion is used for casts that are not supported by + // contextOriginLegacyConversion is used for casts that are not supported by // Postgres, but are supported by CockroachDB and continue to be supported // for backwards compatibility. - contextLegacyConversion + contextOriginLegacyConversion ) // cast includes details about a cast from one OID to another. @@ -131,13 +144,6 @@ type cast struct { dateStyleAffected bool } -// volatilityTODO is used temporarily to indicate that cast's volatility has not -// yet been configured in castMap, and the volatility should be retrieved from -// validCasts. -// -// TODO(mgartner): Configure all volatilities in castMap and remove validCasts. -const volatilityTODO = VolatilityLeakProof - 1 - // castMap defines valid casts. It maps from a source OID to a target OID to a // cast struct that contains information about the cast. Some possible casts, // such as casts from the UNKNOWN type and casts from a type to the identical @@ -152,6 +158,7 @@ const volatilityTODO = VolatilityLeakProof - 1 var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_bit: { oid.T_bit: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, + oid.T_int2: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_varbit: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, @@ -164,12 +171,12 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ }, oid.T_bool: { oid.T_bpchar: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_float4: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_float8: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_int2: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_float4: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_float8: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int2: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_int8: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_numeric: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int8: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_numeric: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_text: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_varchar: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, // Automatic I/O conversions to string types. @@ -261,9 +268,9 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_void: {maxContext: CastContextExplicit, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, }, oid.T_bytea: { - oid.T_uuid: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, oidext.T_geography: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oidext.T_geometry: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, + oid.T_uuid: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, // Automatic I/O conversions to string types. // TODO(mgartner): Cast from BYTES to string types should be immutable. oid.T_bpchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityStable}, @@ -348,12 +355,12 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_void: {maxContext: CastContextExplicit, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, }, oid.T_date: { - oid.T_float4: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_float8: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_int2: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_int4: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_int8: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_numeric: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_float4: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_float8: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int2: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int4: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int8: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_numeric: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_timestamp: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_timestamptz: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityStable}, // Automatic I/O conversions to string types. @@ -404,12 +411,12 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ }, }, oid.T_float4: { - oid.T_bool: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_bool: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_float8: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int2: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_interval: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_interval: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_numeric: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, // Automatic I/O conversions to string types. // TODO(mgartner): Cast from FLOAT4 to string types should be immutable. @@ -420,12 +427,12 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_varchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityStable}, }, oid.T_float8: { - oid.T_bool: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_bool: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_float4: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int2: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_interval: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_interval: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_numeric: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, // Automatic I/O conversions to string types. // TODO(mgartner): Cast from FLOAT8 to string types should be immutable. @@ -439,7 +446,7 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_bytea: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oidext.T_geography: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oidext.T_geometry: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_jsonb: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_jsonb: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, // Automatic I/O conversions to string types. oid.T_bpchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, oid.T_char: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, @@ -469,14 +476,14 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_name: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, }, oid.T_int2: { - oid.T_bit: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_bool: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_date: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_bit: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_bool: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_date: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_float4: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_float8: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_interval: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_interval: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_numeric: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_oid: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_regclass: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, @@ -485,8 +492,9 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_regprocedure: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_regrole: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_regtype: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_timestamp: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_timestamptz: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_timestamp: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_timestamptz: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_varbit: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, // Automatic I/O conversions to string types. oid.T_bpchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, oid.T_char: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, @@ -498,12 +506,12 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_bit: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_bool: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_char: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_date: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_date: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_float4: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_float8: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int2: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_interval: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_interval: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_numeric: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_oid: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_regclass: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, @@ -512,8 +520,9 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_regprocedure: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_regrole: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_regtype: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_timestamp: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_timestamptz: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_timestamp: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_timestamptz: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_varbit: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, // Automatic I/O conversions to string types. oid.T_bpchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, oid.T_name: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, @@ -522,13 +531,13 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ }, oid.T_int8: { oid.T_bit: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_bool: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_date: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_bool: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_date: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_float4: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_float8: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int2: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_interval: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_interval: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_numeric: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_oid: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_regclass: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, @@ -537,8 +546,9 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_regprocedure: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_regrole: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_regtype: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_timestamp: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_timestamptz: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_timestamp: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_timestamptz: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_varbit: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, // Automatic I/O conversions to string types. oid.T_bpchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, oid.T_char: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, @@ -547,13 +557,13 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_varchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, }, oid.T_interval: { - oid.T_float4: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_float8: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_int2: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_int4: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_int8: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_float4: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_float8: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int2: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int4: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int8: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_interval: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_numeric: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_numeric: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_time: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, // Automatic I/O conversions to string types. oid.T_bpchar: { @@ -596,8 +606,8 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_bool: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_float4: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_float8: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oidext.T_geography: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oidext.T_geometry: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oidext.T_geography: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oidext.T_geometry: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_int2: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextExplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, @@ -685,13 +695,13 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_void: {maxContext: CastContextExplicit, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, }, oid.T_numeric: { - oid.T_bool: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_bool: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_float4: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_float8: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int2: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_interval: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_interval: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_numeric: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, // Automatic I/O conversions to string types. oid.T_bpchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, @@ -701,6 +711,8 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_varchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, }, oid.T_oid: { + // TODO(mgartner): Casts to INT2 should not be allowed. + oid.T_int2: {maxContext: CastContextAssignment, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_regclass: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, @@ -725,6 +737,8 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_varchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityStable}, }, oid.T_regclass: { + // TODO(mgartner): Casts to INT2 should not be allowed. + oid.T_int2: {maxContext: CastContextAssignment, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_oid: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, @@ -736,6 +750,8 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_varchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityStable}, }, oid.T_regnamespace: { + // TODO(mgartner): Casts to INT2 should not be allowed. + oid.T_int2: {maxContext: CastContextAssignment, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_oid: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, @@ -747,6 +763,8 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_varchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityStable}, }, oid.T_regproc: { + // TODO(mgartner): Casts to INT2 should not be allowed. + oid.T_int2: {maxContext: CastContextAssignment, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_oid: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, @@ -759,6 +777,8 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_varchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityStable}, }, oid.T_regprocedure: { + // TODO(mgartner): Casts to INT2 should not be allowed. + oid.T_int2: {maxContext: CastContextAssignment, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_oid: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, @@ -771,6 +791,8 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_varchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityStable}, }, oid.T_regrole: { + // TODO(mgartner): Casts to INT2 should not be allowed. + oid.T_int2: {maxContext: CastContextAssignment, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_oid: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, @@ -782,6 +804,8 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_varchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityStable}, }, oid.T_regtype: { + // TODO(mgartner): Casts to INT2 should not be allowed. + oid.T_int2: {maxContext: CastContextAssignment, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_int4: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_int8: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_oid: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, @@ -891,12 +915,12 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ }, oid.T_timestamp: { oid.T_date: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, - oid.T_float4: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_float8: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_int2: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_int4: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_int8: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_numeric: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_float4: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_float8: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int2: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int4: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int8: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_numeric: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_time: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_timestamp: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, oid.T_timestamptz: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityStable}, @@ -949,12 +973,12 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ }, oid.T_timestamptz: { oid.T_date: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityStable}, - oid.T_float4: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_float8: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_int2: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_int4: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_int8: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, - oid.T_numeric: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_float4: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_float8: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int2: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int4: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int8: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_numeric: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_time: {maxContext: CastContextAssignment, origin: contextOriginPgCast, volatility: VolatilityStable}, oid.T_timestamp: { maxContext: CastContextAssignment, @@ -1012,7 +1036,7 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ oid.T_varchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, }, oid.T_uuid: { - oid.T_bytea: {maxContext: CastContextExplicit, origin: contextLegacyConversion, volatility: VolatilityImmutable}, + oid.T_bytea: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, // Automatic I/O conversions to string types. oid.T_bpchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, oid.T_char: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, @@ -1022,6 +1046,9 @@ var castMap = map[oid.Oid]map[oid.Oid]cast{ }, oid.T_varbit: { oid.T_bit: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, + oid.T_int2: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int4: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, + oid.T_int8: {maxContext: CastContextExplicit, origin: contextOriginLegacyConversion, volatility: VolatilityImmutable}, oid.T_varbit: {maxContext: CastContextImplicit, origin: contextOriginPgCast, volatility: VolatilityImmutable}, // Automatic I/O conversions to string types. oid.T_bpchar: {maxContext: CastContextAssignment, origin: contextOriginAutomaticIOConversion, volatility: VolatilityImmutable}, @@ -1314,9 +1341,23 @@ func lookupCast(src, tgt *types.T, intervalStyleEnabled, dateStyleEnabled bool) }, true } - // Casts from string types to array types are stable and allowed in - // explicit contexts. - if srcFamily == types.StringFamily && tgtFamily == types.ArrayFamily { + // Casts from array and tuple types to string types are immutable and + // allowed in assignment contexts. + // TODO(mgartner): Tuple to string casts should be stable. They are + // immutable to avoid backward incompatibility with previous versions, but + // this is incorrect and can causes corrupt indexes, corrupt tables, and + // incorrect query results. + if srcFamily == types.TupleFamily && tgtFamily == types.StringFamily { + return cast{ + maxContext: CastContextAssignment, + volatility: VolatilityImmutable, + }, true + } + + // Casts from string types to array and tuple types are stable and allowed + // in explicit contexts. + if srcFamily == types.StringFamily && + (tgtFamily == types.ArrayFamily || tgtFamily == types.TupleFamily) { return cast{ maxContext: CastContextExplicit, volatility: VolatilityStable, @@ -1334,8 +1375,8 @@ func lookupCast(src, tgt *types.T, intervalStyleEnabled, dateStyleEnabled bool) } // If src and tgt are the same type, the immutable cast is valid in any - // context. This logic is intentially after the lookup into castMap so that - // entries in castMap are preferred. + // context. This logic is intentionally after the lookup into castMap so + // that entries in castMap are preferred. if src.Oid() == tgt.Oid() { return cast{ maxContext: CastContextImplicit, @@ -1346,423 +1387,6 @@ func lookupCast(src, tgt *types.T, intervalStyleEnabled, dateStyleEnabled bool) return cast{}, false } -type castInfo struct { - from types.Family - to types.Family - volatility Volatility - - // volatilityHint is an optional string for VolatilityStable casts. When set, - // it is used as an error hint suggesting a possible workaround when stable - // casts are not allowed. - volatilityHint string - - // If set, the volatility of this cast is not cross-checked against postgres. - // Use this with caution. - ignoreVolatilityCheck bool -} - -// validCasts lists all valid explicit casts. -// -// This list must be kept in sync with the capabilities of PerformCast. -// -// Each cast defines a volatility: -// -// - immutable casts yield the same result on the same arguments in whatever -// context they are evaluated. -// -// - stable casts can yield a different result depending on the evaluation context: -// - session settings (e.g. bytes encoding format) -// - current timezone -// - current time (e.g. 'now'::string). -// -// TODO(#55094): move the PerformCast code for each cast into functions defined -// within each cast. -// -var validCasts = []castInfo{ - // Casts to BitFamily. - {from: types.UnknownFamily, to: types.BitFamily, volatility: VolatilityImmutable}, - {from: types.BitFamily, to: types.BitFamily, volatility: VolatilityImmutable}, - {from: types.IntFamily, to: types.BitFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.BitFamily, volatility: VolatilityImmutable}, - {from: types.CollatedStringFamily, to: types.BitFamily, volatility: VolatilityImmutable}, - - // Casts to BoolFamily. - {from: types.UnknownFamily, to: types.BoolFamily, volatility: VolatilityImmutable}, - {from: types.BoolFamily, to: types.BoolFamily, volatility: VolatilityImmutable}, - {from: types.IntFamily, to: types.BoolFamily, volatility: VolatilityImmutable}, - {from: types.FloatFamily, to: types.BoolFamily, volatility: VolatilityImmutable}, - {from: types.DecimalFamily, to: types.BoolFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.BoolFamily, volatility: VolatilityImmutable}, - {from: types.CollatedStringFamily, to: types.BoolFamily, volatility: VolatilityImmutable}, - {from: types.JsonFamily, to: types.BoolFamily, volatility: VolatilityImmutable}, - - // Casts to IntFamily. - {from: types.UnknownFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - {from: types.BoolFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - {from: types.IntFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - {from: types.FloatFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - {from: types.DecimalFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - {from: types.CollatedStringFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - {from: types.TimestampFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - {from: types.TimestampTZFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - {from: types.DateFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - {from: types.IntervalFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - {from: types.OidFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - {from: types.BitFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - {from: types.JsonFamily, to: types.IntFamily, volatility: VolatilityImmutable}, - - // Casts to FloatFamily. - {from: types.UnknownFamily, to: types.FloatFamily, volatility: VolatilityImmutable}, - {from: types.BoolFamily, to: types.FloatFamily, volatility: VolatilityImmutable}, - {from: types.IntFamily, to: types.FloatFamily, volatility: VolatilityImmutable}, - {from: types.FloatFamily, to: types.FloatFamily, volatility: VolatilityImmutable}, - {from: types.DecimalFamily, to: types.FloatFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.FloatFamily, volatility: VolatilityImmutable}, - {from: types.CollatedStringFamily, to: types.FloatFamily, volatility: VolatilityImmutable}, - {from: types.TimestampFamily, to: types.FloatFamily, volatility: VolatilityImmutable}, - {from: types.TimestampTZFamily, to: types.FloatFamily, volatility: VolatilityImmutable}, - {from: types.DateFamily, to: types.FloatFamily, volatility: VolatilityImmutable}, - {from: types.IntervalFamily, to: types.FloatFamily, volatility: VolatilityImmutable}, - {from: types.JsonFamily, to: types.FloatFamily, volatility: VolatilityImmutable}, - - // Casts to Box2D Family. - {from: types.UnknownFamily, to: types.Box2DFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.Box2DFamily, volatility: VolatilityImmutable}, - {from: types.CollatedStringFamily, to: types.Box2DFamily, volatility: VolatilityImmutable}, - {from: types.GeometryFamily, to: types.Box2DFamily, volatility: VolatilityImmutable}, - {from: types.Box2DFamily, to: types.Box2DFamily, volatility: VolatilityImmutable}, - - // Casts to GeographyFamily. - {from: types.UnknownFamily, to: types.GeographyFamily, volatility: VolatilityImmutable}, - {from: types.BytesFamily, to: types.GeographyFamily, volatility: VolatilityImmutable}, - {from: types.JsonFamily, to: types.GeographyFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.GeographyFamily, volatility: VolatilityImmutable}, - {from: types.CollatedStringFamily, to: types.GeographyFamily, volatility: VolatilityImmutable}, - {from: types.GeographyFamily, to: types.GeographyFamily, volatility: VolatilityImmutable}, - {from: types.GeometryFamily, to: types.GeographyFamily, volatility: VolatilityImmutable}, - - // Casts to GeometryFamily. - {from: types.UnknownFamily, to: types.GeometryFamily, volatility: VolatilityImmutable}, - {from: types.Box2DFamily, to: types.GeometryFamily, volatility: VolatilityImmutable}, - {from: types.BytesFamily, to: types.GeometryFamily, volatility: VolatilityImmutable}, - {from: types.JsonFamily, to: types.GeometryFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.GeometryFamily, volatility: VolatilityImmutable}, - {from: types.CollatedStringFamily, to: types.GeometryFamily, volatility: VolatilityImmutable}, - {from: types.GeographyFamily, to: types.GeometryFamily, volatility: VolatilityImmutable}, - {from: types.GeometryFamily, to: types.GeometryFamily, volatility: VolatilityImmutable}, - - // Casts to DecimalFamily. - {from: types.UnknownFamily, to: types.DecimalFamily, volatility: VolatilityImmutable}, - {from: types.BoolFamily, to: types.DecimalFamily, volatility: VolatilityImmutable}, - {from: types.IntFamily, to: types.DecimalFamily, volatility: VolatilityImmutable}, - {from: types.FloatFamily, to: types.DecimalFamily, volatility: VolatilityImmutable}, - {from: types.DecimalFamily, to: types.DecimalFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.DecimalFamily, volatility: VolatilityImmutable}, - {from: types.CollatedStringFamily, to: types.DecimalFamily, volatility: VolatilityImmutable}, - {from: types.TimestampFamily, to: types.DecimalFamily, volatility: VolatilityImmutable}, - {from: types.TimestampTZFamily, to: types.DecimalFamily, volatility: VolatilityImmutable}, - {from: types.DateFamily, to: types.DecimalFamily, volatility: VolatilityImmutable}, - {from: types.IntervalFamily, to: types.DecimalFamily, volatility: VolatilityImmutable}, - {from: types.JsonFamily, to: types.DecimalFamily, volatility: VolatilityImmutable}, - - // Casts to StringFamily. - {from: types.UnknownFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.BoolFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.IntFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.FloatFamily, to: types.StringFamily, volatility: VolatilityStable}, - {from: types.DecimalFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.CollatedStringFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.BitFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.ArrayFamily, to: types.StringFamily, volatility: VolatilityStable}, - {from: types.TupleFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.GeometryFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.Box2DFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.GeographyFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.BytesFamily, to: types.StringFamily, volatility: VolatilityStable}, - { - from: types.TimestampFamily, - to: types.StringFamily, - volatility: VolatilityImmutable, - volatilityHint: "TIMESTAMP to STRING casts are dependent on DateStyle; consider " + - "using to_char(timestamp) instead.", - }, - { - from: types.TimestampTZFamily, - to: types.StringFamily, - volatility: VolatilityStable, - volatilityHint: "TIMESTAMPTZ to STRING casts depend on the current timezone; consider " + - "using to_char(t AT TIME ZONE 'UTC') instead.", - }, - { - from: types.IntervalFamily, - to: types.StringFamily, - volatility: VolatilityImmutable, - volatilityHint: "INTERVAL to STRING casts depend on IntervalStyle; consider using to_char(interval)", - }, - {from: types.UuidFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - { - from: types.DateFamily, - to: types.StringFamily, - volatility: VolatilityImmutable, - volatilityHint: "DATE to STRING casts are dependent on DateStyle; consider " + - "using to_char(date) instead.", - }, - {from: types.TimeFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.TimeTZFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.OidFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.INetFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.JsonFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.EnumFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - {from: types.VoidFamily, to: types.StringFamily, volatility: VolatilityImmutable}, - - // Casts to CollatedStringFamily. - {from: types.UnknownFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.BoolFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.IntFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.FloatFamily, to: types.CollatedStringFamily, volatility: VolatilityStable}, - {from: types.DecimalFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.CollatedStringFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.BitFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.ArrayFamily, to: types.CollatedStringFamily, volatility: VolatilityStable}, - {from: types.TupleFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.Box2DFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.GeometryFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.GeographyFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.BytesFamily, to: types.CollatedStringFamily, volatility: VolatilityStable}, - {from: types.TimestampFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.TimestampTZFamily, to: types.CollatedStringFamily, volatility: VolatilityStable}, - {from: types.IntervalFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.UuidFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.DateFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.TimeFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.TimeTZFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.OidFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.INetFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.JsonFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - {from: types.EnumFamily, to: types.CollatedStringFamily, volatility: VolatilityImmutable}, - - // Casts to BytesFamily. - {from: types.UnknownFamily, to: types.BytesFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.BytesFamily, volatility: VolatilityImmutable}, - {from: types.CollatedStringFamily, to: types.BytesFamily, volatility: VolatilityImmutable}, - {from: types.BytesFamily, to: types.BytesFamily, volatility: VolatilityImmutable}, - {from: types.UuidFamily, to: types.BytesFamily, volatility: VolatilityImmutable}, - {from: types.GeometryFamily, to: types.BytesFamily, volatility: VolatilityImmutable}, - {from: types.GeographyFamily, to: types.BytesFamily, volatility: VolatilityImmutable}, - - // Casts to DateFamily. - {from: types.UnknownFamily, to: types.DateFamily, volatility: VolatilityImmutable}, - { - from: types.StringFamily, - to: types.DateFamily, - volatility: VolatilityStable, - volatilityHint: "STRING to DATE casts depend on session DateStyle; use parse_date(string) instead", - }, - {from: types.CollatedStringFamily, to: types.DateFamily, volatility: VolatilityStable}, - {from: types.DateFamily, to: types.DateFamily, volatility: VolatilityImmutable}, - {from: types.TimestampFamily, to: types.DateFamily, volatility: VolatilityImmutable}, - {from: types.TimestampTZFamily, to: types.DateFamily, volatility: VolatilityStable}, - {from: types.IntFamily, to: types.DateFamily, volatility: VolatilityImmutable}, - - // Casts to TimeFamily. - {from: types.UnknownFamily, to: types.TimeFamily, volatility: VolatilityImmutable}, - { - from: types.StringFamily, - to: types.TimeFamily, - volatility: VolatilityStable, - volatilityHint: "STRING to TIME casts depend on session DateStyle; use parse_time(string) instead", - }, - {from: types.CollatedStringFamily, to: types.TimeFamily, volatility: VolatilityStable}, - {from: types.TimeFamily, to: types.TimeFamily, volatility: VolatilityImmutable}, - {from: types.TimeTZFamily, to: types.TimeFamily, volatility: VolatilityImmutable}, - {from: types.TimestampFamily, to: types.TimeFamily, volatility: VolatilityImmutable}, - {from: types.TimestampTZFamily, to: types.TimeFamily, volatility: VolatilityStable}, - {from: types.IntervalFamily, to: types.TimeFamily, volatility: VolatilityImmutable}, - - // Casts to TimeTZFamily. - {from: types.UnknownFamily, to: types.TimeTZFamily, volatility: VolatilityImmutable}, - { - from: types.StringFamily, - to: types.TimeTZFamily, - volatility: VolatilityStable, - volatilityHint: "STRING to TIMETZ casts depend on session DateStyle; use parse_timetz(string) instead", - }, - {from: types.CollatedStringFamily, to: types.TimeTZFamily, volatility: VolatilityStable}, - {from: types.TimeFamily, to: types.TimeTZFamily, volatility: VolatilityStable}, - {from: types.TimeTZFamily, to: types.TimeTZFamily, volatility: VolatilityImmutable}, - {from: types.TimestampTZFamily, to: types.TimeTZFamily, volatility: VolatilityStable}, - - // Casts to TimestampFamily. - {from: types.UnknownFamily, to: types.TimestampFamily, volatility: VolatilityImmutable}, - { - from: types.StringFamily, to: types.TimestampFamily, volatility: VolatilityStable, - volatilityHint: "STRING to TIMESTAMP casts are context-dependent because of relative timestamp strings " + - "like 'now' and session settings such as DateStyle; use parse_timestamp(string) instead.", - }, - {from: types.CollatedStringFamily, to: types.TimestampFamily, volatility: VolatilityStable}, - {from: types.DateFamily, to: types.TimestampFamily, volatility: VolatilityImmutable}, - {from: types.TimestampFamily, to: types.TimestampFamily, volatility: VolatilityImmutable}, - { - from: types.TimestampTZFamily, to: types.TimestampFamily, volatility: VolatilityStable, - volatilityHint: "TIMESTAMPTZ to TIMESTAMP casts depend on the current timezone; consider using AT TIME ZONE 'UTC' instead", - }, - {from: types.IntFamily, to: types.TimestampFamily, volatility: VolatilityImmutable}, - - // Casts to TimestampTZFamily. - {from: types.UnknownFamily, to: types.TimestampTZFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.TimestampTZFamily, volatility: VolatilityStable}, - {from: types.CollatedStringFamily, to: types.TimestampTZFamily, volatility: VolatilityStable}, - {from: types.DateFamily, to: types.TimestampTZFamily, volatility: VolatilityStable}, - {from: types.TimestampFamily, to: types.TimestampTZFamily, volatility: VolatilityStable}, - {from: types.TimestampTZFamily, to: types.TimestampTZFamily, volatility: VolatilityImmutable}, - {from: types.IntFamily, to: types.TimestampTZFamily, volatility: VolatilityImmutable}, - - // Casts to IntervalFamily. - {from: types.UnknownFamily, to: types.IntervalFamily, volatility: VolatilityImmutable}, - { - from: types.StringFamily, - to: types.IntervalFamily, - volatility: VolatilityImmutable, - volatilityHint: "STRING to INTERVAL casts depend on session IntervalStyle; use parse_interval(string) instead", - }, - {from: types.CollatedStringFamily, to: types.IntervalFamily, volatility: VolatilityImmutable}, - {from: types.IntFamily, to: types.IntervalFamily, volatility: VolatilityImmutable}, - {from: types.TimeFamily, to: types.IntervalFamily, volatility: VolatilityImmutable}, - {from: types.IntervalFamily, to: types.IntervalFamily, volatility: VolatilityImmutable}, - {from: types.FloatFamily, to: types.IntervalFamily, volatility: VolatilityImmutable}, - {from: types.DecimalFamily, to: types.IntervalFamily, volatility: VolatilityImmutable}, - - // Casts to OidFamily. - {from: types.UnknownFamily, to: types.OidFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.OidFamily, volatility: VolatilityStable}, - {from: types.CollatedStringFamily, to: types.OidFamily, volatility: VolatilityStable}, - {from: types.IntFamily, to: types.OidFamily, volatility: VolatilityStable, ignoreVolatilityCheck: true}, - {from: types.OidFamily, to: types.OidFamily, volatility: VolatilityStable}, - - // Casts to UnknownFamily. - {from: types.UnknownFamily, to: types.UnknownFamily, volatility: VolatilityImmutable}, - - // Casts to UuidFamily. - {from: types.UnknownFamily, to: types.UuidFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.UuidFamily, volatility: VolatilityImmutable}, - {from: types.CollatedStringFamily, to: types.UuidFamily, volatility: VolatilityImmutable}, - {from: types.BytesFamily, to: types.UuidFamily, volatility: VolatilityImmutable}, - {from: types.UuidFamily, to: types.UuidFamily, volatility: VolatilityImmutable}, - - // Casts to INetFamily. - {from: types.UnknownFamily, to: types.INetFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.INetFamily, volatility: VolatilityImmutable}, - {from: types.CollatedStringFamily, to: types.INetFamily, volatility: VolatilityImmutable}, - {from: types.INetFamily, to: types.INetFamily, volatility: VolatilityImmutable}, - - // Casts to ArrayFamily. - {from: types.UnknownFamily, to: types.ArrayFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.ArrayFamily, volatility: VolatilityStable}, - - // Casts to JsonFamily. - {from: types.UnknownFamily, to: types.JsonFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.JsonFamily, volatility: VolatilityImmutable}, - {from: types.JsonFamily, to: types.JsonFamily, volatility: VolatilityImmutable}, - {from: types.GeometryFamily, to: types.JsonFamily, volatility: VolatilityImmutable}, - {from: types.GeographyFamily, to: types.JsonFamily, volatility: VolatilityImmutable}, - - // Casts to EnumFamily. - {from: types.UnknownFamily, to: types.EnumFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.EnumFamily, volatility: VolatilityImmutable}, - {from: types.EnumFamily, to: types.EnumFamily, volatility: VolatilityImmutable}, - {from: types.BytesFamily, to: types.EnumFamily, volatility: VolatilityImmutable}, - - // Casts to TupleFamily. - {from: types.UnknownFamily, to: types.TupleFamily, volatility: VolatilityImmutable}, - {from: types.TupleFamily, to: types.TupleFamily, volatility: VolatilityStable}, - {from: types.StringFamily, to: types.TupleFamily, volatility: VolatilityStable}, - - // Casts to VoidFamily. - {from: types.UnknownFamily, to: types.VoidFamily, volatility: VolatilityImmutable}, - {from: types.StringFamily, to: types.VoidFamily, volatility: VolatilityImmutable}, -} - -type castsMapKey struct { - from, to types.Family -} - -var castsMap map[castsMapKey]*castInfo - -// styleCastsMap contains castInfos for casts affected by a style parameter. -var styleCastsMap map[castsMapKey]*castInfo - -func init() { - castsMap = make(map[castsMapKey]*castInfo, len(validCasts)) - styleCastsMap = make(map[castsMapKey]*castInfo) - for i := range validCasts { - c := &validCasts[i] - - key := castsMapKey{from: c.from, to: c.to} - castsMap[key] = c - - if isDateStyleCastAffected(c.from, c.to) || isIntervalStyleCastAffected(c.from, c.to) { - cCopy := *c - cCopy.volatility = VolatilityStable - styleCastsMap[key] = &cCopy - } - } -} - -func isIntervalStyleCastAffected(from, to types.Family) bool { - switch from { - case types.StringFamily, types.CollatedStringFamily: - switch to { - case types.IntervalFamily: - return true - } - case types.IntervalFamily: - switch to { - case types.StringFamily, types.CollatedStringFamily: - return true - } - } - return false -} - -func isDateStyleCastAffected(from, to types.Family) bool { - switch from { - case types.StringFamily, types.CollatedStringFamily: - switch to { - case types.TimeFamily, - types.TimeTZFamily, - types.DateFamily, - types.TimestampFamily: - return true - } - case types.DateFamily, - types.TimestampFamily: - switch to { - case types.StringFamily, types.CollatedStringFamily: - return true - } - } - return false -} - -// lookupCastInfo returns the information for a valid cast. -// Returns nil if this is not a valid cast. -// Does not handle array and tuple casts. -func lookupCastInfo( - from, to types.Family, intervalStyleEnabled bool, dateStyleEnabled bool, -) *castInfo { - k := castsMapKey{from: from, to: to} - if (intervalStyleEnabled && isIntervalStyleCastAffected(from, to)) || - (dateStyleEnabled && isDateStyleCastAffected(from, to)) { - if r, ok := styleCastsMap[k]; ok { - return r - } - } - return castsMap[k] -} - // LookupCastVolatility returns the volatility of a valid cast. func LookupCastVolatility(from, to *types.T, sd *sessiondata.SessionData) (_ Volatility, ok bool) { fromFamily := from.Family() @@ -1802,20 +1426,8 @@ func LookupCastVolatility(from, to *types.T, sd *sessiondata.SessionData) (_ Vol dateStyleEnabled = sd.DateStyleEnabled } - // If the volatility has been set in castMap, return it. - c, ok := lookupCast(from, to, intervalStyleEnabled, dateStyleEnabled) - if ok && c.volatility != volatilityTODO { - return c.volatility, true - } - - // Otherwise, fallback to the volatility in castInfo. - cast := lookupCastInfo( - fromFamily, - toFamily, - sd != nil && sd.IntervalStyleEnabled, - sd != nil && sd.DateStyleEnabled, - ) - if cast == nil { + cast, ok := lookupCast(from, to, intervalStyleEnabled, dateStyleEnabled) + if !ok { return 0, false } return cast.volatility, true @@ -2314,14 +1926,14 @@ func performCastWithoutPrecisionTruncation( val.SetInt64(v.Unix()) val.Mul(val, big10E6) micros := v.Nanosecond() / int(time.Microsecond) - val.Add(val, big.NewInt(int64(micros))) + val.Add(val, apd.NewBigInt(int64(micros))) dd.Exponent = -6 case *DTimestampTZ: val := &dd.Coeff val.SetInt64(v.Unix()) val.Mul(val, big10E6) micros := v.Nanosecond() / int(time.Microsecond) - val.Add(val, big.NewInt(int64(micros))) + val.Add(val, apd.NewBigInt(int64(micros))) dd.Exponent = -6 case *DInterval: v.AsBigInt(&dd.Coeff) @@ -2756,12 +2368,12 @@ func performCastWithoutPrecisionTruncation( case *DTime: return NewDInterval(duration.MakeDuration(int64(*v)*1000, 0, 0), itm), nil case *DDecimal: - d := ctx.getTmpDec() + var d apd.Decimal dnanos := v.Decimal dnanos.Exponent += 9 // We need HighPrecisionCtx because duration values can contain // upward of 35 decimal digits and DecimalCtx only provides 25. - _, err := HighPrecisionCtx.Quantize(d, &dnanos, 0) + _, err := HighPrecisionCtx.Quantize(&d, &dnanos, 0) if err != nil { return nil, err } @@ -2922,8 +2534,8 @@ func performIntToOidCast(ctx *EvalContext, t *types.T, v DInt) (Datum, error) { } func roundDecimalToInt(ctx *EvalContext, d *apd.Decimal) (int64, error) { - newD := ctx.getTmpDec() - if _, err := DecimalCtx.RoundToIntegralValue(newD, d); err != nil { + var newD apd.Decimal + if _, err := DecimalCtx.RoundToIntegralValue(&newD, d); err != nil { return 0, err } i, err := newD.Int64() diff --git a/pkg/sql/sem/tree/cast_test.go b/pkg/sql/sem/tree/cast_test.go index 0e38ffcf2a28..4340e06fd5e4 100644 --- a/pkg/sql/sem/tree/cast_test.go +++ b/pkg/sql/sem/tree/cast_test.go @@ -23,22 +23,53 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/errors" "github.com/lib/pq/oid" "github.com/stretchr/testify/require" ) -// TestCastsVolatilityMatchesPostgres checks that our defined casts match -// Postgres' casts for Volatility. +// TestCastsMatchPostgres checks that the Volatility and CastContext of our +// defined casts match Postgres' casts. // -// Dump command below: -// COPY ( -// SELECT c.castsource, c.casttarget, p.provolatile, p.proleakproof -// FROM pg_cast c JOIN pg_proc p ON (c.castfunc = p.oid) -// ) TO STDOUT WITH CSV DELIMITER '|' HEADER; -func TestCastsVolatilityMatchesPostgres(t *testing.T) { +// The command for generating pg_cast_dump.csv from psql is below. We ignore +// types that we do not support, and we ignore geospatial types because they are +// an extension of Postgres and have no official OIDs. +// +// \copy ( +// WITH ignored_types AS ( +// SELECT t::regtype::oid t +// FROM (VALUES +// ('geography'), +// ('geometry'), +// ('box2d'), +// ('box3d'), +// ('tstzmultirange'), +// ('int4multirange'), +// ('int8multirange'), +// ('tstzmultirange'), +// ('tsmultirange'), +// ('datemultirange'), +// ('nummultirange') +// ) AS types(t) +// ) +// SELECT +// c.castsource, +// c.casttarget, +// p.provolatile, +// p.proleakproof, +// c.castcontext, +// substring(version(), 'PostgreSQL (\d+\.\d+)') pg_version +// FROM pg_cast c JOIN pg_proc p ON (c.castfunc = p.oid) +// WHERE +// c.castsource NOT IN (SELECT t FROM ignored_types) +// AND c.casttarget NOT IN (SELECT t FROM ignored_types) +// ORDER BY 1, 2 +// ) TO pg_cast_dump.csv WITH CSV DELIMITER '|' HEADER; +// +func TestCastsMatchPostgres(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - csvPath := filepath.Join("testdata", "pg_cast_provolatile_dump.csv") + csvPath := filepath.Join("testdata", "pg_cast_dump.csv") f, err := os.Open(csvPath) require.NoError(t, err) @@ -51,11 +82,16 @@ func TestCastsVolatilityMatchesPostgres(t *testing.T) { _, err = reader.Read() require.NoError(t, err) - type pgCast struct { - from, to oid.Oid + type pgCastKey struct { + from, to oid.Oid + } + + type pgCastValue struct { volatility Volatility + context CastContext } - var pgCasts []pgCast + + pgCastMap := make(map[pgCastKey]pgCastValue) for { line, err := reader.Read() @@ -63,7 +99,7 @@ func TestCastsVolatilityMatchesPostgres(t *testing.T) { break } require.NoError(t, err) - require.Len(t, line, 4) + require.Len(t, line, 6) fromOid, err := strconv.Atoi(line[0]) require.NoError(t, err) @@ -75,162 +111,64 @@ func TestCastsVolatilityMatchesPostgres(t *testing.T) { require.Len(t, provolatile, 1) proleakproof := line[3] require.Len(t, proleakproof, 1) + castcontext := line[4] + require.Len(t, castcontext, 1) v, err := VolatilityFromPostgres(provolatile, proleakproof[0] == 't') require.NoError(t, err) - pgCasts = append(pgCasts, pgCast{ - from: oid.Oid(fromOid), - to: oid.Oid(toOid), - volatility: v, - }) - } - - oidToFamily := func(o oid.Oid) (_ types.Family, ok bool) { - t, ok := types.OidToType[o] - if !ok { - return 0, false - } - return t.Family(), true - } - - oidStr := func(o oid.Oid) string { - res, ok := oidext.TypeName(o) - if !ok { - res = fmt.Sprintf("%d", o) - } - return res - } - - for _, c := range validCasts { - if c.volatility == 0 { - t.Errorf("cast %s::%s has no volatility set", c.from.Name(), c.to.Name()) - - } - if c.ignoreVolatilityCheck { - continue - } + c, err := castContextFromPostgres(castcontext) + require.NoError(t, err) - // Look through all pg casts and find any where the Oids map to these - // families. - found := false - for i := range pgCasts { - fromFamily, fromOk := oidToFamily(pgCasts[i].from) - toFamily, toOk := oidToFamily(pgCasts[i].to) - if fromOk && toOk && fromFamily == c.from && toFamily == c.to { - found = true - if c.volatility != pgCasts[i].volatility { - t.Errorf("cast %s::%s has volatility %s; corresponding pg cast %s::%s has volatility %s", - c.from.Name(), c.to.Name(), c.volatility, - oidStr(pgCasts[i].from), oidStr(pgCasts[i].to), pgCasts[i].volatility, - ) - } - } - } - if !found && testing.Verbose() { - t.Logf("cast %s::%s has no corresponding pg cast", c.from.Name(), c.to.Name()) - } + pgCastMap[pgCastKey{oid.Oid(fromOid), oid.Oid(toOid)}] = pgCastValue{v, c} } for src := range castMap { for tgt, c := range castMap[src] { - if c.volatility == volatilityTODO { - continue - } - // Find the corresponding pg cast. - found := false - for _, pgCast := range pgCasts { - if src == pgCast.from && tgt == pgCast.to { - found = true - if c.volatility != pgCast.volatility { - t.Errorf("cast %s::%s has volatility %s; corresponding pg cast has volatility %s", - oidStr(src), oidStr(tgt), c.volatility, pgCast.volatility, - ) - - } - } - } - if !found && testing.Verbose() { + pgCast, ok := pgCastMap[pgCastKey{src, tgt}] + if !ok && testing.Verbose() { t.Logf("cast %s::%s has no corresponding pg cast", oidStr(src), oidStr(tgt)) } - } - } -} - -func TestCastMapIncludesValidCasts(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - // familyToType returns the first type found of the given family. - familyToTypes := func(f types.Family) []*types.T { - var typs []*types.T - for _, typ := range types.OidToType { - if f == typ.Family() { - typs = append(typs, typ) + if ok && c.volatility != pgCast.volatility { + t.Errorf("cast %s::%s has volatility %s; corresponding pg cast has volatility %s", + oidStr(src), oidStr(tgt), c.volatility, pgCast.volatility, + ) } - } - return typs - } - - // findCast returns the first cast found from a type in srcTypes to a type - // in tgtTypes. - findCast := func(srcTypes, tgtTypes []*types.T) (_ cast, ok bool) { - for _, src := range srcTypes { - for _, tgt := range tgtTypes { - c, ok := lookupCast( - src, - tgt, - false, /* intervalStyleEnabled */ - false, /* dateStyleEnabled */ + if ok && c.maxContext != pgCast.context { + t.Errorf("cast %s::%s has maxContext %s; corresponding pg cast has context %s", + oidStr(src), oidStr(tgt), c.maxContext, pgCast.context, ) - if ok { - return c, true - } } } - return cast{}, false } +} - // Validate that there is at least one cast in castMap for each cast in - // validCasts. - for _, c := range validCasts { - srcTypes := familyToTypes(c.from) - if len(srcTypes) == 0 { - continue - } - - tgtTypes := familyToTypes(c.to) - if len(tgtTypes) == 0 { - continue - } - - _, ok := findCast(srcTypes, tgtTypes) - if !ok { - t.Errorf( - "castMap should include at least one cast from family %s to family %s", - c.from.Name(), c.to.Name(), - ) - } +// castContextFromPostgres returns a CastContext that matches the castcontext +// setting in Postgres's pg_cast table. +func castContextFromPostgres(castcontext string) (CastContext, error) { + switch castcontext { + case "e": + return CastContextExplicit, nil + case "a": + return CastContextAssignment, nil + case "i": + return CastContextImplicit, nil + default: + return 0, errors.AssertionFailedf("invalid castcontext %s", castcontext) } } // TestCastsFromUnknown verifies that there is a cast from Unknown defined for -// all type families. +// all types. func TestCastsFromUnknown(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - for v := range types.Family_name { - switch fam := types.Family(v); fam { - case types.UnknownFamily, types.AnyFamily: - // These type families are exceptions. - - default: - cast := lookupCastInfo(types.UnknownFamily, fam, false /* intervalStyleEnabled */, false /* dateStyleEnabled */) - if cast == nil { - t.Errorf("cast from Unknown to %s does not exist", fam) - } + for _, typ := range types.OidToType { + _, ok := lookupCast(types.Unknown, typ, false /* intervalStyleEnabled */, false /* dateStyleEnabled */) + if !ok { + t.Errorf("cast from Unknown to %s does not exist", typ.String()) } } } @@ -290,3 +228,11 @@ func TestTupleCastVolatility(t *testing.T) { } } } + +func oidStr(o oid.Oid) string { + res, ok := oidext.TypeName(o) + if !ok { + res = fmt.Sprintf("%d", o) + } + return res +} diff --git a/pkg/sql/sem/tree/constant_test.go b/pkg/sql/sem/tree/constant_test.go index e1f9e4845fc5..bfef77c21919 100644 --- a/pkg/sql/sem/tree/constant_test.go +++ b/pkg/sql/sem/tree/constant_test.go @@ -20,7 +20,7 @@ import ( "testing" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" diff --git a/pkg/sql/sem/tree/datum.go b/pkg/sql/sem/tree/datum.go index 123d8c6840ae..8cbb484c991d 100644 --- a/pkg/sql/sem/tree/datum.go +++ b/pkg/sql/sem/tree/datum.go @@ -15,7 +15,6 @@ import ( "encoding/hex" "fmt" "math" - "math/big" "regexp" "sort" "strconv" @@ -24,7 +23,7 @@ import ( "unicode" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/geo" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/lex" @@ -1058,10 +1057,10 @@ func (d *DDecimal) CompareError(ctx *EvalContext, other Datum) (int, error) { // NULL is less than any non-NULL value. return 1, nil } - v := ctx.getTmpDec() + var v apd.Decimal switch t := UnwrapDatum(ctx, other).(type) { case *DDecimal: - v = &t.Decimal + v.Set(&t.Decimal) case *DInt: v.SetInt64(int64(*t)) case *DFloat: @@ -1071,7 +1070,7 @@ func (d *DDecimal) CompareError(ctx *EvalContext, other Datum) (int, error) { default: return 0, makeUnsupportedComparisonMessage(d, other) } - res := CompareDecimals(&d.Decimal, v) + res := CompareDecimals(&d.Decimal, &v) return res, nil } @@ -1149,23 +1148,14 @@ func (d *DDecimal) Format(ctx *FmtCtx) { } } -// shallowDecimalSize is the size of the fixed-size part of apd.Decimal in -// bytes. -const shallowDecimalSize = unsafe.Sizeof(apd.Decimal{}) - -// SizeOfDecimal returns the size in bytes of an apd.Decimal. -func SizeOfDecimal(d *apd.Decimal) uintptr { - return shallowDecimalSize + uintptr(cap(d.Coeff.Bits()))*unsafe.Sizeof(big.Word(0)) -} - // Size implements the Datum interface. func (d *DDecimal) Size() uintptr { - return SizeOfDecimal(&d.Decimal) + return d.Decimal.Size() } var ( decimalNegativeZero = &apd.Decimal{Negative: true} - bigTen = big.NewInt(10) + bigTen = apd.NewBigInt(10) ) // IsComposite implements the CompositeDatum interface. @@ -1176,7 +1166,7 @@ func (d *DDecimal) IsComposite() bool { } // Check if d is divisible by 10. - var r big.Int + var r apd.BigInt r.Rem(&d.Decimal.Coeff, bigTen) return r.Sign() == 0 } @@ -3672,6 +3662,11 @@ func NewDTupleWithLen(typ *types.T, l int) *DTuple { return &DTuple{D: make(Datums, l), typ: typ} } +// MakeDTuple creates a DTuple with the provided datums. See NewDTuple. +func MakeDTuple(typ *types.T, d ...Datum) DTuple { + return DTuple{D: d, typ: typ} +} + // AsDTuple attempts to retrieve a *DTuple from an Expr, returning a *DTuple and // a flag signifying whether the assertion was successful. The function should // be used instead of direct type assertions wherever a *DTuple wrapped by a diff --git a/pkg/sql/rowenc/datum_alloc.go b/pkg/sql/sem/tree/datum_alloc.go similarity index 67% rename from pkg/sql/rowenc/datum_alloc.go rename to pkg/sql/sem/tree/datum_alloc.go index be271d1b61a4..6027b9677dfd 100644 --- a/pkg/sql/rowenc/datum_alloc.go +++ b/pkg/sql/sem/tree/datum_alloc.go @@ -8,11 +8,10 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package rowenc +package tree import ( "github.com/cockroachdb/cockroach/pkg/geo/geopb" - "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util" ) @@ -28,31 +27,30 @@ type DatumAlloc struct { // it will be set to defaultDatumAllocSize automatically. AllocSize int - datumAlloc []tree.Datum - dintAlloc []tree.DInt - dfloatAlloc []tree.DFloat - dstringAlloc []tree.DString - dbytesAlloc []tree.DBytes - dbitArrayAlloc []tree.DBitArray - ddecimalAlloc []tree.DDecimal - ddateAlloc []tree.DDate - denumAlloc []tree.DEnum - dbox2dAlloc []tree.DBox2D - dgeometryAlloc []tree.DGeometry - dgeographyAlloc []tree.DGeography - dtimeAlloc []tree.DTime - dtimetzAlloc []tree.DTimeTZ - dtimestampAlloc []tree.DTimestamp - dtimestampTzAlloc []tree.DTimestampTZ - dintervalAlloc []tree.DInterval - duuidAlloc []tree.DUuid - dipnetAlloc []tree.DIPAddr - djsonAlloc []tree.DJSON - dtupleAlloc []tree.DTuple - doidAlloc []tree.DOid - dvoidAlloc []tree.DVoid - scratch []byte - env tree.CollationEnvironment + datumAlloc []Datum + dintAlloc []DInt + dfloatAlloc []DFloat + dstringAlloc []DString + dbytesAlloc []DBytes + dbitArrayAlloc []DBitArray + ddecimalAlloc []DDecimal + ddateAlloc []DDate + denumAlloc []DEnum + dbox2dAlloc []DBox2D + dgeometryAlloc []DGeometry + dgeographyAlloc []DGeography + dtimeAlloc []DTime + dtimetzAlloc []DTimeTZ + dtimestampAlloc []DTimestamp + dtimestampTzAlloc []DTimestampTZ + dintervalAlloc []DInterval + duuidAlloc []DUuid + dipnetAlloc []DIPAddr + djsonAlloc []DJSON + dtupleAlloc []DTuple + doidAlloc []DOid + dvoidAlloc []DVoid + env CollationEnvironment // Allocations for geopb.SpatialObject.EWKB ewkbAlloc []byte @@ -66,7 +64,7 @@ const defaultEWKBAllocSize = 4096 // Arbitrary, could be tuned. const maxEWKBAllocSize = 16384 // Arbitrary, could be tuned. // NewDatums allocates Datums of the specified size. -func (a *DatumAlloc) NewDatums(num int) tree.Datums { +func (a *DatumAlloc) NewDatums(num int) Datums { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } @@ -76,7 +74,7 @@ func (a *DatumAlloc) NewDatums(num int) tree.Datums { if extTupleLen := num * datumAllocMultiplier; extensionSize < extTupleLen { extensionSize = extTupleLen } - *buf = make(tree.Datums, extensionSize) + *buf = make(Datums, extensionSize) } r := (*buf)[:num] *buf = (*buf)[num:] @@ -84,13 +82,13 @@ func (a *DatumAlloc) NewDatums(num int) tree.Datums { } // NewDInt allocates a DInt. -func (a *DatumAlloc) NewDInt(v tree.DInt) *tree.DInt { +func (a *DatumAlloc) NewDInt(v DInt) *DInt { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dintAlloc if len(*buf) == 0 { - *buf = make([]tree.DInt, a.AllocSize) + *buf = make([]DInt, a.AllocSize) } r := &(*buf)[0] *r = v @@ -99,13 +97,13 @@ func (a *DatumAlloc) NewDInt(v tree.DInt) *tree.DInt { } // NewDFloat allocates a DFloat. -func (a *DatumAlloc) NewDFloat(v tree.DFloat) *tree.DFloat { +func (a *DatumAlloc) NewDFloat(v DFloat) *DFloat { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dfloatAlloc if len(*buf) == 0 { - *buf = make([]tree.DFloat, a.AllocSize) + *buf = make([]DFloat, a.AllocSize) } r := &(*buf)[0] *r = v @@ -114,13 +112,13 @@ func (a *DatumAlloc) NewDFloat(v tree.DFloat) *tree.DFloat { } // NewDString allocates a DString. -func (a *DatumAlloc) NewDString(v tree.DString) *tree.DString { +func (a *DatumAlloc) NewDString(v DString) *DString { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dstringAlloc if len(*buf) == 0 { - *buf = make([]tree.DString, a.AllocSize) + *buf = make([]DString, a.AllocSize) } r := &(*buf)[0] *r = v @@ -128,19 +126,24 @@ func (a *DatumAlloc) NewDString(v tree.DString) *tree.DString { return r } +// NewDCollatedString allocates a DCollatedString. +func (a *DatumAlloc) NewDCollatedString(contents string, locale string) (*DCollatedString, error) { + return NewDCollatedString(contents, locale, &a.env) +} + // NewDName allocates a DName. -func (a *DatumAlloc) NewDName(v tree.DString) tree.Datum { - return tree.NewDNameFromDString(a.NewDString(v)) +func (a *DatumAlloc) NewDName(v DString) Datum { + return NewDNameFromDString(a.NewDString(v)) } // NewDBytes allocates a DBytes. -func (a *DatumAlloc) NewDBytes(v tree.DBytes) *tree.DBytes { +func (a *DatumAlloc) NewDBytes(v DBytes) *DBytes { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dbytesAlloc if len(*buf) == 0 { - *buf = make([]tree.DBytes, a.AllocSize) + *buf = make([]DBytes, a.AllocSize) } r := &(*buf)[0] *r = v @@ -149,13 +152,13 @@ func (a *DatumAlloc) NewDBytes(v tree.DBytes) *tree.DBytes { } // NewDBitArray allocates a DBitArray. -func (a *DatumAlloc) NewDBitArray(v tree.DBitArray) *tree.DBitArray { +func (a *DatumAlloc) NewDBitArray(v DBitArray) *DBitArray { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dbitArrayAlloc if len(*buf) == 0 { - *buf = make([]tree.DBitArray, a.AllocSize) + *buf = make([]DBitArray, a.AllocSize) } r := &(*buf)[0] *r = v @@ -164,28 +167,28 @@ func (a *DatumAlloc) NewDBitArray(v tree.DBitArray) *tree.DBitArray { } // NewDDecimal allocates a DDecimal. -func (a *DatumAlloc) NewDDecimal(v tree.DDecimal) *tree.DDecimal { +func (a *DatumAlloc) NewDDecimal(v DDecimal) *DDecimal { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.ddecimalAlloc if len(*buf) == 0 { - *buf = make([]tree.DDecimal, a.AllocSize) + *buf = make([]DDecimal, a.AllocSize) } r := &(*buf)[0] - *r = v + r.Set(&v.Decimal) *buf = (*buf)[1:] return r } // NewDDate allocates a DDate. -func (a *DatumAlloc) NewDDate(v tree.DDate) *tree.DDate { +func (a *DatumAlloc) NewDDate(v DDate) *DDate { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.ddateAlloc if len(*buf) == 0 { - *buf = make([]tree.DDate, a.AllocSize) + *buf = make([]DDate, a.AllocSize) } r := &(*buf)[0] *r = v @@ -194,13 +197,13 @@ func (a *DatumAlloc) NewDDate(v tree.DDate) *tree.DDate { } // NewDEnum allocates a DEnum. -func (a *DatumAlloc) NewDEnum(v tree.DEnum) *tree.DEnum { +func (a *DatumAlloc) NewDEnum(v DEnum) *DEnum { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.denumAlloc if len(*buf) == 0 { - *buf = make([]tree.DEnum, a.AllocSize) + *buf = make([]DEnum, a.AllocSize) } r := &(*buf)[0] *r = v @@ -209,13 +212,13 @@ func (a *DatumAlloc) NewDEnum(v tree.DEnum) *tree.DEnum { } // NewDBox2D allocates a DBox2D. -func (a *DatumAlloc) NewDBox2D(v tree.DBox2D) *tree.DBox2D { +func (a *DatumAlloc) NewDBox2D(v DBox2D) *DBox2D { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dbox2dAlloc if len(*buf) == 0 { - *buf = make([]tree.DBox2D, a.AllocSize) + *buf = make([]DBox2D, a.AllocSize) } r := &(*buf)[0] *r = v @@ -224,13 +227,13 @@ func (a *DatumAlloc) NewDBox2D(v tree.DBox2D) *tree.DBox2D { } // NewDGeography allocates a DGeography. -func (a *DatumAlloc) NewDGeography(v tree.DGeography) *tree.DGeography { +func (a *DatumAlloc) NewDGeography(v DGeography) *DGeography { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dgeographyAlloc if len(*buf) == 0 { - *buf = make([]tree.DGeography, a.AllocSize) + *buf = make([]DGeography, a.AllocSize) } r := &(*buf)[0] *r = v @@ -239,13 +242,13 @@ func (a *DatumAlloc) NewDGeography(v tree.DGeography) *tree.DGeography { } // NewDVoid allocates a new DVoid. -func (a *DatumAlloc) NewDVoid() *tree.DVoid { +func (a *DatumAlloc) NewDVoid() *DVoid { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dvoidAlloc if len(*buf) == 0 { - *buf = make([]tree.DVoid, a.AllocSize) + *buf = make([]DVoid, a.AllocSize) } r := &(*buf)[0] *buf = (*buf)[1:] @@ -255,8 +258,8 @@ func (a *DatumAlloc) NewDVoid() *tree.DVoid { // NewDGeographyEmpty allocates a new empty DGeography for unmarshalling. // After unmarshalling, DoneInitNewDGeo must be called to return unused // pre-allocated space to the DatumAlloc. -func (a *DatumAlloc) NewDGeographyEmpty() *tree.DGeography { - r := a.NewDGeography(tree.DGeography{}) +func (a *DatumAlloc) NewDGeographyEmpty() *DGeography { + r := a.NewDGeography(DGeography{}) a.giveBytesToEWKB(r.SpatialObjectRef()) return r } @@ -277,13 +280,13 @@ func (a *DatumAlloc) DoneInitNewDGeo(so *geopb.SpatialObject) { } // NewDGeometry allocates a DGeometry. -func (a *DatumAlloc) NewDGeometry(v tree.DGeometry) *tree.DGeometry { +func (a *DatumAlloc) NewDGeometry(v DGeometry) *DGeometry { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dgeometryAlloc if len(*buf) == 0 { - *buf = make([]tree.DGeometry, a.AllocSize) + *buf = make([]DGeometry, a.AllocSize) } r := &(*buf)[0] *r = v @@ -294,8 +297,8 @@ func (a *DatumAlloc) NewDGeometry(v tree.DGeometry) *tree.DGeometry { // NewDGeometryEmpty allocates a new empty DGeometry for unmarshalling. After // unmarshalling, DoneInitNewDGeo must be called to return unused // pre-allocated space to the DatumAlloc. -func (a *DatumAlloc) NewDGeometryEmpty() *tree.DGeometry { - r := a.NewDGeometry(tree.DGeometry{}) +func (a *DatumAlloc) NewDGeometryEmpty() *DGeometry { + r := a.NewDGeometry(DGeometry{}) a.giveBytesToEWKB(r.SpatialObjectRef()) return r } @@ -315,13 +318,13 @@ func (a *DatumAlloc) giveBytesToEWKB(so *geopb.SpatialObject) { } // NewDTime allocates a DTime. -func (a *DatumAlloc) NewDTime(v tree.DTime) *tree.DTime { +func (a *DatumAlloc) NewDTime(v DTime) *DTime { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dtimeAlloc if len(*buf) == 0 { - *buf = make([]tree.DTime, a.AllocSize) + *buf = make([]DTime, a.AllocSize) } r := &(*buf)[0] *r = v @@ -330,13 +333,13 @@ func (a *DatumAlloc) NewDTime(v tree.DTime) *tree.DTime { } // NewDTimeTZ allocates a DTimeTZ. -func (a *DatumAlloc) NewDTimeTZ(v tree.DTimeTZ) *tree.DTimeTZ { +func (a *DatumAlloc) NewDTimeTZ(v DTimeTZ) *DTimeTZ { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dtimetzAlloc if len(*buf) == 0 { - *buf = make([]tree.DTimeTZ, a.AllocSize) + *buf = make([]DTimeTZ, a.AllocSize) } r := &(*buf)[0] *r = v @@ -345,13 +348,13 @@ func (a *DatumAlloc) NewDTimeTZ(v tree.DTimeTZ) *tree.DTimeTZ { } // NewDTimestamp allocates a DTimestamp. -func (a *DatumAlloc) NewDTimestamp(v tree.DTimestamp) *tree.DTimestamp { +func (a *DatumAlloc) NewDTimestamp(v DTimestamp) *DTimestamp { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dtimestampAlloc if len(*buf) == 0 { - *buf = make([]tree.DTimestamp, a.AllocSize) + *buf = make([]DTimestamp, a.AllocSize) } r := &(*buf)[0] *r = v @@ -360,13 +363,13 @@ func (a *DatumAlloc) NewDTimestamp(v tree.DTimestamp) *tree.DTimestamp { } // NewDTimestampTZ allocates a DTimestampTZ. -func (a *DatumAlloc) NewDTimestampTZ(v tree.DTimestampTZ) *tree.DTimestampTZ { +func (a *DatumAlloc) NewDTimestampTZ(v DTimestampTZ) *DTimestampTZ { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dtimestampTzAlloc if len(*buf) == 0 { - *buf = make([]tree.DTimestampTZ, a.AllocSize) + *buf = make([]DTimestampTZ, a.AllocSize) } r := &(*buf)[0] *r = v @@ -375,13 +378,13 @@ func (a *DatumAlloc) NewDTimestampTZ(v tree.DTimestampTZ) *tree.DTimestampTZ { } // NewDInterval allocates a DInterval. -func (a *DatumAlloc) NewDInterval(v tree.DInterval) *tree.DInterval { +func (a *DatumAlloc) NewDInterval(v DInterval) *DInterval { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dintervalAlloc if len(*buf) == 0 { - *buf = make([]tree.DInterval, a.AllocSize) + *buf = make([]DInterval, a.AllocSize) } r := &(*buf)[0] *r = v @@ -390,13 +393,13 @@ func (a *DatumAlloc) NewDInterval(v tree.DInterval) *tree.DInterval { } // NewDUuid allocates a DUuid. -func (a *DatumAlloc) NewDUuid(v tree.DUuid) *tree.DUuid { +func (a *DatumAlloc) NewDUuid(v DUuid) *DUuid { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.duuidAlloc if len(*buf) == 0 { - *buf = make([]tree.DUuid, a.AllocSize) + *buf = make([]DUuid, a.AllocSize) } r := &(*buf)[0] *r = v @@ -405,13 +408,13 @@ func (a *DatumAlloc) NewDUuid(v tree.DUuid) *tree.DUuid { } // NewDIPAddr allocates a DIPAddr. -func (a *DatumAlloc) NewDIPAddr(v tree.DIPAddr) *tree.DIPAddr { +func (a *DatumAlloc) NewDIPAddr(v DIPAddr) *DIPAddr { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dipnetAlloc if len(*buf) == 0 { - *buf = make([]tree.DIPAddr, a.AllocSize) + *buf = make([]DIPAddr, a.AllocSize) } r := &(*buf)[0] *r = v @@ -420,13 +423,13 @@ func (a *DatumAlloc) NewDIPAddr(v tree.DIPAddr) *tree.DIPAddr { } // NewDJSON allocates a DJSON. -func (a *DatumAlloc) NewDJSON(v tree.DJSON) *tree.DJSON { +func (a *DatumAlloc) NewDJSON(v DJSON) *DJSON { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.djsonAlloc if len(*buf) == 0 { - *buf = make([]tree.DJSON, a.AllocSize) + *buf = make([]DJSON, a.AllocSize) } r := &(*buf)[0] *r = v @@ -435,13 +438,13 @@ func (a *DatumAlloc) NewDJSON(v tree.DJSON) *tree.DJSON { } // NewDTuple allocates a DTuple. -func (a *DatumAlloc) NewDTuple(v tree.DTuple) *tree.DTuple { +func (a *DatumAlloc) NewDTuple(v DTuple) *DTuple { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.dtupleAlloc if len(*buf) == 0 { - *buf = make([]tree.DTuple, a.AllocSize) + *buf = make([]DTuple, a.AllocSize) } r := &(*buf)[0] *r = v @@ -450,13 +453,13 @@ func (a *DatumAlloc) NewDTuple(v tree.DTuple) *tree.DTuple { } // NewDOid allocates a DOid. -func (a *DatumAlloc) NewDOid(v tree.DOid) tree.Datum { +func (a *DatumAlloc) NewDOid(v DOid) Datum { if a.AllocSize == 0 { a.AllocSize = defaultDatumAllocSize } buf := &a.doidAlloc if len(*buf) == 0 { - *buf = make([]tree.DOid, a.AllocSize) + *buf = make([]DOid, a.AllocSize) } r := &(*buf)[0] *r = v diff --git a/pkg/sql/sem/tree/decimal.go b/pkg/sql/sem/tree/decimal.go index b2213f77add7..0ed19db09c65 100644 --- a/pkg/sql/sem/tree/decimal.go +++ b/pkg/sql/sem/tree/decimal.go @@ -14,7 +14,7 @@ import ( "fmt" "math" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" ) diff --git a/pkg/sql/sem/tree/eval.go b/pkg/sql/sem/tree/eval.go index d99b075c8169..934c198d3577 100644 --- a/pkg/sql/sem/tree/eval.go +++ b/pkg/sql/sem/tree/eval.go @@ -14,13 +14,12 @@ import ( "context" "fmt" "math" - "math/big" "regexp" "strings" "time" "unicode/utf8" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/geo" "github.com/cockroachdb/cockroach/pkg/keys" @@ -75,8 +74,8 @@ var ( // ErrShiftArgOutOfRange is reported when a shift argument is out of range. ErrShiftArgOutOfRange = pgerror.New(pgcode.InvalidParameterValue, "shift argument out of range") - big10E6 = big.NewInt(1e6) - big10E10 = big.NewInt(1e10) + big10E6 = apd.NewBigInt(1e6) + big10E10 = apd.NewBigInt(1e10) ) // NewCannotMixBitArraySizesError creates an error for the case when a bitwise @@ -495,19 +494,19 @@ func initNonArrayToNonArrayConcatenation() { Volatility: volatility, }) } - fromTypeToVolatility := make(map[types.Family]Volatility) - for _, cast := range validCasts { - if cast.to == types.StringFamily { - fromTypeToVolatility[cast.from] = cast.volatility + fromTypeToVolatility := make(map[oid.Oid]Volatility) + ForEachCast(func(src, tgt oid.Oid) { + if tgt == oid.T_text { + fromTypeToVolatility[src] = castMap[src][tgt].volatility } - } + }) // We allow tuple + string concatenation, as well as any scalar types. for _, t := range append([]*types.T{types.AnyTuple}, types.Scalar...) { // Do not re-add String+String or String+Bytes, as they already exist // and have predefined correct behavior. if t != types.String && t != types.Bytes { - addConcat(t, types.String, fromTypeToVolatility[t.Family()]) - addConcat(types.String, t, fromTypeToVolatility[t.Family()]) + addConcat(t, types.String, fromTypeToVolatility[t.Oid()]) + addConcat(types.String, t, fromTypeToVolatility[t.Oid()]) } } } @@ -1429,10 +1428,11 @@ var BinOps = map[BinaryOperatorSymbol]binOpOverload{ if rInt == 0 { return nil, ErrDivByZero } - div := ctx.getTmpDec().SetInt64(int64(rInt)) + var div apd.Decimal + div.SetInt64(int64(rInt)) dd := &DDecimal{} dd.SetInt64(int64(MustBeDInt(left))) - _, err := DecimalCtx.Quo(&dd.Decimal, &dd.Decimal, div) + _, err := DecimalCtx.Quo(&dd.Decimal, &dd.Decimal, &div) return dd, err }, Volatility: VolatilityImmutable, @@ -3115,18 +3115,25 @@ type EvalDatabase interface { ctx context.Context, specifier HasPrivilegeSpecifier, user security.SQLUsername, - kind privilege.Kind, + priv privilege.Privilege, ) (bool, error) } // HasPrivilegeSpecifier specifies an object to lookup privilege for. +// Only one of DatabaseName, DatabaseOID, TableName, TableOID is filled. type HasPrivilegeSpecifier struct { - // Only one of these is filled. + + // Database privilege + DatabaseName *string + DatabaseOID *oid.Oid + + // Table privilege TableName *string TableOID *oid.Oid - // Only one of these is filled. - // Only used if TableName or TableOID is specified. + // Column privilege + // Requires TableName or TableOID. + // Only one of ColumnName, ColumnAttNum is filled. ColumnName *Name ColumnAttNum *uint32 } @@ -3579,6 +3586,9 @@ type EvalContext struct { Planner EvalPlanner + // Not using sql.JobExecContext type to avoid cycle dependency with sql package + JobExecContext interface{} + PrivilegedAccessor PrivilegedAccessor SessionAccessor EvalSessionAccessor @@ -3602,7 +3612,6 @@ type EvalContext struct { DB *kv.DB ReCache *RegexpCache - tmpDec apd.Decimal // TODO(mjibson): remove prepareOnly in favor of a 2-step prepare-exec solution // that is also able to save the plan to skip work during the exec step. @@ -3773,7 +3782,7 @@ func TimestampToDecimal(ts hlc.Timestamp) apd.Decimal { val := &res.Coeff val.SetInt64(ts.WallTime) val.Mul(val, big10E10) - val.Add(val, big.NewInt(int64(ts.Logical))) + val.Add(val, apd.NewBigInt(int64(ts.Logical))) // val must be positive. If it was set to a negative value above, // transfer the sign to res.Negative. @@ -3798,7 +3807,7 @@ func DecimalToInexactDTimestampTZ(d *DDecimal) (*DTimestampTZ, error) { } func decimalToHLC(d *DDecimal) (hlc.Timestamp, error) { - var coef big.Int + var coef apd.BigInt coef.Set(&d.Decimal.Coeff) // The physical portion of the HLC is stored shifted up by 10^10, so shift // it down and clear out the logical component. @@ -3931,10 +3940,6 @@ func (ctx *EvalContext) Ctx() context.Context { return ctx.Context } -func (ctx *EvalContext) getTmpDec() *apd.Decimal { - return &ctx.tmpDec -} - // Eval implements the TypedExpr interface. func (expr *AndExpr) Eval(ctx *EvalContext) (Datum, error) { left, err := expr.Left.(TypedExpr).Eval(ctx) diff --git a/pkg/sql/sem/tree/insert.go b/pkg/sql/sem/tree/insert.go index 77461934c5fa..65dda3e762af 100644 --- a/pkg/sql/sem/tree/insert.go +++ b/pkg/sql/sem/tree/insert.go @@ -52,6 +52,10 @@ func (node *Insert) Format(ctx *FmtCtx) { } if node.OnConflict != nil && !node.OnConflict.IsUpsertAlias() { ctx.WriteString(" ON CONFLICT") + if node.OnConflict.Constraint != "" { + ctx.WriteString(" ON CONSTRAINT ") + ctx.FormatNode(&node.OnConflict.Constraint) + } if len(node.OnConflict.Columns) > 0 { ctx.WriteString(" (") ctx.FormatNode(&node.OnConflict.Columns) @@ -90,7 +94,14 @@ func (node *Insert) DefaultValues() bool { // uses the primary key for as the conflict index and the values being inserted // for Exprs. type OnConflict struct { - Columns NameList + // At most one of Columns and Constraint will be set at once. + // Columns is the list of arbiter columns, if set, that the user specified + // in the ON CONFLICT (columns) list. + Columns NameList + // Constraint is the name of a table constraint that the user specified to + // get the list of arbiter columns from, in the ON CONFLICT ON CONSTRAINT + // form. + Constraint Name ArbiterPredicate Expr Exprs UpdateExprs Where *Where @@ -99,5 +110,6 @@ type OnConflict struct { // IsUpsertAlias returns true if the UPSERT syntactic sugar was used. func (oc *OnConflict) IsUpsertAlias() bool { - return oc != nil && oc.Columns == nil && oc.ArbiterPredicate == nil && oc.Exprs == nil && oc.Where == nil && !oc.DoNothing + return oc != nil && oc.Columns == nil && oc.Constraint == "" && + oc.ArbiterPredicate == nil && oc.Exprs == nil && oc.Where == nil && !oc.DoNothing } diff --git a/pkg/sql/sem/tree/pretty.go b/pkg/sql/sem/tree/pretty.go index 75bf11a7a900..de6f55c406e9 100644 --- a/pkg/sql/sem/tree/pretty.go +++ b/pkg/sql/sem/tree/pretty.go @@ -65,7 +65,7 @@ type PrettyCfg struct { // configuration. func DefaultPrettyCfg() PrettyCfg { return PrettyCfg{ - LineWidth: 60, + LineWidth: DefaultLineWidth, Simplify: true, TabWidth: 4, UseTabs: true, @@ -95,6 +95,26 @@ const ( PrettyAlignAndExtraIndent = 3 ) +// CaseMode directs which casing mode to use. +type CaseMode int + +const ( + // LowerCase transforms case-insensitive strings (like SQL keywords) to lowercase. + LowerCase CaseMode = 0 + // UpperCase transforms case-insensitive strings (like SQL keywords) to uppercase. + UpperCase CaseMode = 1 +) + +// LineWidthMode directs which mode of line width to use. +type LineWidthMode int + +const ( + // DefaultLineWidth is the line width used with the default pretty-printing configuration. + DefaultLineWidth = 60 + // ConsoleLineWidth is the line width used on the frontend console. + ConsoleLineWidth = 108 +) + // keywordWithText returns a pretty.Keyword with left and/or right // sides concat'd as a pretty.Text. func (p *PrettyCfg) keywordWithText(left, keyword, right string) pretty.Doc { @@ -955,6 +975,9 @@ func (node *Insert) doc(p *PrettyCfg) pretty.Doc { if node.OnConflict != nil && !node.OnConflict.IsUpsertAlias() { cond := pretty.Nil + if len(node.OnConflict.Constraint) > 0 { + cond = p.nestUnder(pretty.Text("ON CONSTRAINT"), p.Doc(&node.OnConflict.Constraint)) + } if len(node.OnConflict.Columns) > 0 { cond = p.bracket("(", p.Doc(&node.OnConflict.Columns), ")") } diff --git a/pkg/sql/sem/tree/select.go b/pkg/sql/sem/tree/select.go index 49f46b98e66b..07b78cd04dea 100644 --- a/pkg/sql/sem/tree/select.go +++ b/pkg/sql/sem/tree/select.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) @@ -270,7 +271,7 @@ func (node *StatementSource) Format(ctx *FmtCtx) { } // IndexID is a custom type for IndexDescriptor IDs. -type IndexID uint32 +type IndexID = catid.IndexID // IndexFlags represents "@" or "@{param[,param]}" where // param is one of: diff --git a/pkg/sql/sem/tree/table_ref.go b/pkg/sql/sem/tree/table_ref.go index db4ac7a49dc2..5bee33416a65 100644 --- a/pkg/sql/sem/tree/table_ref.go +++ b/pkg/sql/sem/tree/table_ref.go @@ -10,11 +10,13 @@ package tree +import "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" + // ID is a custom type for {Database,Table}Descriptor IDs. -type ID uint32 +type ID = catid.ColumnID // ColumnID is a custom type for ColumnDescriptor IDs. -type ColumnID uint32 +type ColumnID = catid.ColumnID // TableRef represents a numeric table reference. // (Syntax !NNN in SQL.) @@ -56,9 +58,3 @@ func (n *TableRef) String() string { return AsString(n) } // tableExpr implements the TableExpr interface. func (n *TableRef) tableExpr() {} - -// SafeValue implements the redact.SafeValue interface. -func (ID) SafeValue() {} - -// SafeValue implements the redact.SafeValue interface. -func (ColumnID) SafeValue() {} diff --git a/pkg/sql/sem/tree/testdata/pg_cast_dump.csv b/pkg/sql/sem/tree/testdata/pg_cast_dump.csv new file mode 100644 index 000000000000..db4a657872ad --- /dev/null +++ b/pkg/sql/sem/tree/testdata/pg_cast_dump.csv @@ -0,0 +1,156 @@ +castsource|casttarget|provolatile|proleakproof|castcontext|pg_version +16|23|i|f|e|13.5 +16|25|i|f|a|13.5 +16|1042|i|f|a|13.5 +16|1043|i|f|a|13.5 +18|23|i|f|e|13.5 +18|25|i|f|i|13.5 +18|1042|i|f|a|13.5 +18|1043|i|f|a|13.5 +19|25|i|f|i|13.5 +19|1042|i|f|a|13.5 +19|1043|i|f|a|13.5 +20|21|i|f|a|13.5 +20|23|i|f|a|13.5 +20|24|i|f|i|13.5 +20|26|i|f|i|13.5 +20|700|i|f|i|13.5 +20|701|i|f|i|13.5 +20|790|s|f|a|13.5 +20|1560|i|f|e|13.5 +20|1700|i|f|i|13.5 +20|2202|i|f|i|13.5 +20|2203|i|f|i|13.5 +20|2204|i|f|i|13.5 +20|2205|i|f|i|13.5 +20|2206|i|f|i|13.5 +20|3734|i|f|i|13.5 +20|3769|i|f|i|13.5 +20|4089|i|f|i|13.5 +20|4096|i|f|i|13.5 +20|4191|i|f|i|13.5 +21|20|i|f|i|13.5 +21|23|i|f|i|13.5 +21|24|i|f|i|13.5 +21|26|i|f|i|13.5 +21|700|i|f|i|13.5 +21|701|i|f|i|13.5 +21|1700|i|f|i|13.5 +21|2202|i|f|i|13.5 +21|2203|i|f|i|13.5 +21|2204|i|f|i|13.5 +21|2205|i|f|i|13.5 +21|2206|i|f|i|13.5 +21|3734|i|f|i|13.5 +21|3769|i|f|i|13.5 +21|4089|i|f|i|13.5 +21|4096|i|f|i|13.5 +21|4191|i|f|i|13.5 +23|16|i|f|e|13.5 +23|18|i|f|e|13.5 +23|20|i|f|i|13.5 +23|21|i|f|a|13.5 +23|700|i|f|i|13.5 +23|701|i|f|i|13.5 +23|790|s|f|a|13.5 +23|1560|i|f|e|13.5 +23|1700|i|f|i|13.5 +24|20|i|f|a|13.5 +25|18|i|f|a|13.5 +25|19|i|f|i|13.5 +25|142|s|f|e|13.5 +25|2205|s|f|i|13.5 +26|20|i|f|a|13.5 +600|603|i|f|a|13.5 +601|600|i|f|e|13.5 +602|600|i|f|e|13.5 +602|604|i|f|a|13.5 +603|600|i|f|e|13.5 +603|601|i|f|e|13.5 +603|604|i|f|a|13.5 +603|718|i|f|e|13.5 +604|600|i|f|e|13.5 +604|602|i|f|a|13.5 +604|603|i|f|e|13.5 +604|718|i|f|e|13.5 +650|25|i|f|a|13.5 +650|1042|i|f|a|13.5 +650|1043|i|f|a|13.5 +700|20|i|f|a|13.5 +700|21|i|f|a|13.5 +700|23|i|f|a|13.5 +700|701|i|f|i|13.5 +700|1700|i|f|a|13.5 +701|20|i|f|a|13.5 +701|21|i|f|a|13.5 +701|23|i|f|a|13.5 +701|700|i|f|a|13.5 +701|1700|i|f|a|13.5 +718|600|i|f|e|13.5 +718|603|i|f|e|13.5 +718|604|i|f|e|13.5 +774|829|i|f|i|13.5 +790|1700|s|f|a|13.5 +829|774|i|f|i|13.5 +869|25|i|f|a|13.5 +869|650|i|f|a|13.5 +869|1042|i|f|a|13.5 +869|1043|i|f|a|13.5 +1042|18|i|f|a|13.5 +1042|19|i|f|i|13.5 +1042|25|i|f|i|13.5 +1042|142|s|f|e|13.5 +1042|1042|i|f|i|13.5 +1042|1043|i|f|i|13.5 +1043|18|i|f|a|13.5 +1043|19|i|f|i|13.5 +1043|142|s|f|e|13.5 +1043|1043|i|f|i|13.5 +1043|2205|s|f|i|13.5 +1082|1114|i|f|i|13.5 +1082|1184|s|f|i|13.5 +1083|1083|i|f|i|13.5 +1083|1186|i|f|i|13.5 +1083|1266|s|f|i|13.5 +1114|1082|i|f|a|13.5 +1114|1083|i|f|a|13.5 +1114|1114|i|f|i|13.5 +1114|1184|s|f|i|13.5 +1184|1082|s|f|a|13.5 +1184|1083|s|f|a|13.5 +1184|1114|s|f|a|13.5 +1184|1184|i|f|i|13.5 +1184|1266|s|f|a|13.5 +1186|1083|i|f|a|13.5 +1186|1186|i|f|i|13.5 +1266|1083|i|f|a|13.5 +1266|1266|i|f|i|13.5 +1560|20|i|f|e|13.5 +1560|23|i|f|e|13.5 +1560|1560|i|f|i|13.5 +1562|1562|i|f|i|13.5 +1700|20|i|f|a|13.5 +1700|21|i|f|a|13.5 +1700|23|i|f|a|13.5 +1700|700|i|f|i|13.5 +1700|701|i|f|i|13.5 +1700|790|s|f|a|13.5 +1700|1700|i|f|i|13.5 +2202|20|i|f|a|13.5 +2203|20|i|f|a|13.5 +2204|20|i|f|a|13.5 +2205|20|i|f|a|13.5 +2206|20|i|f|a|13.5 +3734|20|i|f|a|13.5 +3769|20|i|f|a|13.5 +3802|16|i|f|e|13.5 +3802|20|i|f|e|13.5 +3802|21|i|f|e|13.5 +3802|23|i|f|e|13.5 +3802|700|i|f|e|13.5 +3802|701|i|f|e|13.5 +3802|1700|i|f|e|13.5 +4089|20|i|f|a|13.5 +4096|20|i|f|a|13.5 +4191|20|i|f|a|13.5 +5069|28|i|f|e|13.5 diff --git a/pkg/sql/sem/tree/testdata/pg_cast_provolatile_dump.csv b/pkg/sql/sem/tree/testdata/pg_cast_provolatile_dump.csv deleted file mode 100644 index 4b09dcc22399..000000000000 --- a/pkg/sql/sem/tree/testdata/pg_cast_provolatile_dump.csv +++ /dev/null @@ -1,153 +0,0 @@ -castsource|casttarget|provolatile|proleakproof -20|21|i|f -20|23|i|f -20|700|i|f -20|701|i|f -20|1700|i|f -21|20|i|f -21|23|i|f -21|700|i|f -21|701|i|f -21|1700|i|f -23|20|i|f -23|21|i|f -23|700|i|f -23|701|i|f -23|1700|i|f -700|20|i|f -700|21|i|f -700|23|i|f -700|701|i|f -700|1700|i|f -701|20|i|f -701|21|i|f -701|23|i|f -701|700|i|f -701|1700|i|f -1700|20|i|f -1700|21|i|f -1700|23|i|f -1700|700|i|f -1700|701|i|f -790|1700|s|f -1700|790|s|f -23|790|s|f -20|790|s|f -23|16|i|f -16|23|i|f -20|26|i|f -21|26|i|f -26|20|i|f -20|24|i|f -21|24|i|f -24|20|i|f -20|2202|i|f -21|2202|i|f -2202|20|i|f -20|2203|i|f -21|2203|i|f -2203|20|i|f -20|2204|i|f -21|2204|i|f -2204|20|i|f -20|2205|i|f -21|2205|i|f -2205|20|i|f -20|2206|i|f -21|2206|i|f -2206|20|i|f -20|3734|i|f -21|3734|i|f -3734|20|i|f -20|3769|i|f -21|3769|i|f -3769|20|i|f -25|2205|s|f -1043|2205|s|f -20|4096|i|f -21|4096|i|f -4096|20|i|f -20|4089|i|f -21|4089|i|f -4089|20|i|f -1042|25|i|f -1042|1043|i|f -18|25|i|f -18|1042|i|f -18|1043|i|f -19|25|i|f -19|1042|i|f -19|1043|i|f -25|18|i|f -1042|18|i|f -1043|18|i|f -25|19|i|f -1042|19|i|f -1043|19|i|f -18|23|i|f -23|18|i|f -702|1082|s|f -702|1083|s|f -702|1114|s|f -702|1184|i|f -703|1186|i|f -1082|1114|i|f -1082|1184|s|f -1083|1186|i|f -1083|1266|s|f -1114|702|s|f -1114|1082|i|f -1114|1083|i|f -1114|1184|s|f -1184|702|i|f -1184|1082|s|f -1184|1083|s|f -1184|1114|s|f -1184|1266|s|f -1186|703|i|f -1186|1083|i|f -1266|1083|i|f -600|603|i|f -601|600|i|f -602|600|i|f -602|604|i|f -603|600|i|f -603|601|i|f -603|604|i|f -603|718|i|f -604|600|i|f -604|602|i|f -604|603|i|f -604|718|i|f -718|600|i|f -718|603|i|f -718|604|i|f -829|774|i|f -774|829|i|f -869|650|i|f -20|1560|i|f -23|1560|i|f -1560|20|i|f -1560|23|i|f -650|25|i|f -869|25|i|f -16|25|i|f -25|142|s|f -650|1043|i|f -869|1043|i|f -16|1043|i|f -1043|142|s|f -650|1042|i|f -869|1042|i|f -16|1042|i|f -1042|142|s|f -1042|1042|i|f -1043|1043|i|f -1083|1083|i|f -1114|1114|i|f -1184|1184|i|f -1186|1186|i|f -1266|1266|i|f -1560|1560|i|f -1562|1562|i|f -1700|1700|i|f diff --git a/pkg/sql/sem/tree/type_check.go b/pkg/sql/sem/tree/type_check.go index f728a828f175..2400747e6a1e 100644 --- a/pkg/sql/sem/tree/type_check.go +++ b/pkg/sql/sem/tree/type_check.go @@ -490,25 +490,15 @@ func resolveCast( return nil default: - var v Volatility - var hint string - c, ok := lookupCast(castFrom, castTo, intervalStyleEnabled, dateStyleEnabled) - if ok && c.volatility != volatilityTODO { - // If the volatility has been set in castMap, use it. - v = c.volatility - hint = c.volatilityHint - } else if cast := lookupCastInfo(fromFamily, toFamily, intervalStyleEnabled, dateStyleEnabled); cast != nil { - // Otherwise, fallback to the volatility in castInfo. - v = cast.volatility - hint = cast.volatilityHint - } else { + cast, ok := lookupCast(castFrom, castTo, intervalStyleEnabled, dateStyleEnabled) + if !ok { return invalidCastError(castFrom, castTo) } - if !allowStable && v >= VolatilityStable { + if !allowStable && cast.volatility >= VolatilityStable { err := NewContextDependentOpsNotAllowedError(context) err = pgerror.Wrapf(err, pgcode.InvalidParameterValue, "%s::%s", castFrom, castTo) - if hint != "" { - err = errors.WithHint(err, hint) + if cast.volatilityHint != "" { + err = errors.WithHint(err, cast.volatilityHint) } return err } diff --git a/pkg/sql/sem/tree/window_funcs_test.go b/pkg/sql/sem/tree/window_funcs_test.go index 3b488d7eb0b1..bb06d9eef638 100644 --- a/pkg/sql/sem/tree/window_funcs_test.go +++ b/pkg/sql/sem/tree/window_funcs_test.go @@ -17,7 +17,7 @@ import ( "math/rand" "testing" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/leaktest" diff --git a/pkg/sql/set_cluster_setting.go b/pkg/sql/set_cluster_setting.go index e3083b01b363..0ac165cb9c2f 100644 --- a/pkg/sql/set_cluster_setting.go +++ b/pkg/sql/set_cluster_setting.go @@ -82,7 +82,7 @@ func (p *planner) SetClusterSetting( ) (planNode, error) { name := strings.ToLower(n.Name) st := p.EvalContext().Settings - v, ok := settings.Lookup(name, settings.LookupForLocalAccess) + v, ok := settings.Lookup(name, settings.LookupForLocalAccess, p.ExecCfg().Codec.ForSystemTenant()) if !ok { return nil, errors.Errorf("unknown cluster setting '%s'", name) } diff --git a/pkg/sql/set_var.go b/pkg/sql/set_var.go index 64b8f3611533..c355849a02a0 100644 --- a/pkg/sql/set_var.go +++ b/pkg/sql/set_var.go @@ -15,7 +15,7 @@ import ( "strings" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/paramparse" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" diff --git a/pkg/sql/show_cluster_setting.go b/pkg/sql/show_cluster_setting.go index 375fe6144933..511cb122054c 100644 --- a/pkg/sql/show_cluster_setting.go +++ b/pkg/sql/show_cluster_setting.go @@ -119,7 +119,9 @@ func (p *planner) ShowClusterSetting( ) (planNode, error) { name := strings.ToLower(n.Name) st := p.ExecCfg().Settings - val, ok := settings.Lookup(name, settings.LookupForLocalAccess) + val, ok := settings.Lookup( + name, settings.LookupForLocalAccess, p.ExecCfg().Codec.ForSystemTenant(), + ) if !ok { return nil, errors.Errorf("unknown setting: %q", name) } diff --git a/pkg/sql/show_create.go b/pkg/sql/show_create.go index 9ebe7cd5c4ea..b71c36354bd4 100644 --- a/pkg/sql/show_create.go +++ b/pkg/sql/show_create.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" ) @@ -72,7 +71,7 @@ func ShowCreateTable( lCtx simpleSchemaResolver, displayOptions ShowCreateDisplayOptions, ) (string, error) { - a := &rowenc.DatumAlloc{} + a := &tree.DatumAlloc{} f := p.ExtendedEvalContext().FmtCtx(tree.FmtSimple) f.WriteString("CREATE ") diff --git a/pkg/sql/show_create_clauses.go b/pkg/sql/show_create_clauses.go index e1bfea86ba50..776326a127f0 100644 --- a/pkg/sql/show_create_clauses.go +++ b/pkg/sql/show_create_clauses.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/multiregion" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" @@ -56,7 +57,7 @@ func selectComment(ctx context.Context, p PlanHookState, tableID descpb.ID) (tc var ok bool for ok, err = it.Next(ctx); ok; ok, err = it.Next(ctx) { row := it.Cur() - commentType := int(tree.MustBeDInt(row[0])) + commentType := keys.CommentType(tree.MustBeDInt(row[0])) switch commentType { case keys.TableCommentType, keys.ColumnCommentType, keys.IndexCommentType: subID := int(tree.MustBeDInt(row[2])) @@ -349,11 +350,11 @@ func showForeignKeyConstraint( buf.WriteByte(' ') buf.WriteString(fk.Match.String()) } - if fk.OnDelete != descpb.ForeignKeyReference_NO_ACTION { + if fk.OnDelete != catpb.ForeignKeyAction_NO_ACTION { buf.WriteString(" ON DELETE ") buf.WriteString(fk.OnDelete.String()) } - if fk.OnUpdate != descpb.ForeignKeyReference_NO_ACTION { + if fk.OnUpdate != catpb.ForeignKeyAction_NO_ACTION { buf.WriteString(" ON UPDATE ") buf.WriteString(fk.OnUpdate.String()) } @@ -427,7 +428,7 @@ func showCreateLocality(desc catalog.TableDescriptor, f *tree.FmtCtx) error { // ShowCreatePartitioning returns a PARTITION BY clause for the specified // index, if applicable. func ShowCreatePartitioning( - a *rowenc.DatumAlloc, + a *tree.DatumAlloc, codec keys.SQLCodec, tableDesc catalog.TableDescriptor, idx catalog.Index, @@ -450,7 +451,7 @@ func ShowCreatePartitioning( // Do not print PARTITION ALL BY if we are a REGIONAL BY ROW table. if c := tableDesc.GetLocalityConfig(); c != nil { switch c.Locality.(type) { - case *descpb.TableDescriptor_LocalityConfig_RegionalByRow_: + case *catpb.LocalityConfig_RegionalByRow_: return nil } } @@ -563,6 +564,9 @@ func showConstraintClause( f *tree.FmtCtx, ) error { for _, e := range desc.AllActiveAndInactiveChecks() { + if e.Hidden { + continue + } f.WriteString(",\n\t") if len(e.Name) > 0 { f.WriteString("CONSTRAINT ") diff --git a/pkg/sql/show_test.go b/pkg/sql/show_test.go index 0c37b98d6423..465d9d76272f 100644 --- a/pkg/sql/show_test.go +++ b/pkg/sql/show_test.go @@ -270,8 +270,7 @@ func TestShowCreateTable(t *testing.T) { rowid INT8 NOT VISIBLE NOT NULL DEFAULT unique_rowid(), CONSTRAINT %[1]s_pkey PRIMARY KEY (rowid ASC), INDEX t12_a_idx (a ASC) USING HASH WITH BUCKET_COUNT = 8, - FAMILY "primary" (a, rowid), - CONSTRAINT check_crdb_internal_a_shard_8 CHECK (crdb_internal_a_shard_8 IN (0:::INT8, 1:::INT8, 2:::INT8, 3:::INT8, 4:::INT8, 5:::INT8, 6:::INT8, 7:::INT8)) + FAMILY "primary" (a, rowid) )`, }, } @@ -874,12 +873,14 @@ func TestShowSessionPrivileges(t *testing.T) { sqlDBroot := sqlutils.MakeSQLRunner(rawSQLDBroot) defer s.Stopper().Stop(context.Background()) - // Create three users: one with no special permissions, one with the - // VIEWACTIVITY role option, and one admin. We'll check that the VIEWACTIVITY + // Create four users: one with no special permissions, one with the + // VIEWACTIVITY role option, one with VIEWACTIVITYREDACTED option, + // and one admin. We'll check that the VIEWACTIVITY, VIEWACTIVITYREDACTED // users and the admin can see all sessions and the unpermissioned user can // only see their own session. _ = sqlDBroot.Exec(t, `CREATE USER noperms`) _ = sqlDBroot.Exec(t, `CREATE USER viewactivity VIEWACTIVITY`) + _ = sqlDBroot.Exec(t, `CREATE USER viewactivityredacted VIEWACTIVITYREDACTED`) _ = sqlDBroot.Exec(t, `CREATE USER adminuser`) _ = sqlDBroot.Exec(t, `GRANT admin TO adminuser`) @@ -892,6 +893,7 @@ func TestShowSessionPrivileges(t *testing.T) { users := []user{ {"noperms", false, nil}, {"viewactivity", true, nil}, + {"viewactivityredacted", true, nil}, {"adminuser", true, nil}, } for i, tc := range users { diff --git a/pkg/sql/span/BUILD.bazel b/pkg/sql/span/BUILD.bazel index 186a0b64d900..27c408a2a501 100644 --- a/pkg/sql/span/BUILD.bazel +++ b/pkg/sql/span/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "//pkg/sql/opt/constraint", "//pkg/sql/opt/exec", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", "//pkg/sql/sem/tree", "//pkg/sql/types", "//pkg/util", diff --git a/pkg/sql/span/span_builder.go b/pkg/sql/span/span_builder.go index 41b3d0784439..ef0d3fa2f718 100644 --- a/pkg/sql/span/span_builder.go +++ b/pkg/sql/span/span_builder.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/opt/constraint" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util" @@ -41,7 +42,7 @@ type Builder struct { // KeyPrefix is the prefix of keys generated by the builder. KeyPrefix []byte - alloc rowenc.DatumAlloc + alloc tree.DatumAlloc neededFamilies []descpb.FamilyID } @@ -65,19 +66,18 @@ func MakeBuilder( table: table, index: index, indexColTypes: s.indexColTypes, - KeyPrefix: rowenc.MakeIndexKeyPrefix(codec, table, index.GetID()), + KeyPrefix: rowenc.MakeIndexKeyPrefix(codec, table.GetID(), index.GetID()), neededFamilies: nil, } - var columnIDs descpb.ColumnIDs - columnIDs, s.indexColDirs = catalog.FullIndexColumnIDs(index) - if cap(s.indexColTypes) < len(columnIDs) { - s.indexColTypes = make([]*types.T, len(columnIDs)) + s.indexColDirs = s.table.IndexFullColumnDirections(index) + columns := s.table.IndexFullColumns(index) + if cap(s.indexColTypes) < len(columns) { + s.indexColTypes = make([]*types.T, len(columns)) } else { - s.indexColTypes = s.indexColTypes[:len(columnIDs)] + s.indexColTypes = s.indexColTypes[:len(columns)] } - for i, colID := range columnIDs { - col, _ := table.FindColumnWithID(colID) + for i, col := range columns { // TODO (rohany): do I need to look at table columns with mutations here as well? if col != nil && col.Public() { s.indexColTypes[i] = col.GetType() @@ -186,7 +186,7 @@ func (s *Builder) SpanFromEncDatumsWithRange( func (s *Builder) SpanFromDatumRow( values tree.Datums, prefixLen int, colMap catalog.TableColMap, ) (_ roachpb.Span, containsNull bool, _ error) { - return rowenc.EncodePartialIndexSpan(s.table, s.index, prefixLen, colMap, values, s.KeyPrefix) + return rowenc.EncodePartialIndexSpan(s.index, prefixLen, colMap, values, s.KeyPrefix) } // SpanToPointSpan converts a span into a span that represents a point lookup on a @@ -381,7 +381,7 @@ func (s *Builder) encodeConstraintKey( } } - key, err = rowenc.EncodeTableKey(key, val, dir) + key, err = keyside.Encode(key, val, dir) if err != nil { return nil, false, err } diff --git a/pkg/sql/split.go b/pkg/sql/split.go index b47b3a964e4b..26e9ac2de317 100644 --- a/pkg/sql/split.go +++ b/pkg/sql/split.go @@ -96,10 +96,8 @@ func getRowKey( for i := range values { colMap.Set(index.GetKeyColumnID(i), i) } - prefix := rowenc.MakeIndexKeyPrefix(codec, tableDesc, index.GetID()) - key, _, err := rowenc.EncodePartialIndexKey( - tableDesc, index, len(values), colMap, values, prefix, - ) + prefix := rowenc.MakeIndexKeyPrefix(codec, tableDesc.GetID(), index.GetID()) + key, _, err := rowenc.EncodePartialIndexKey(index, len(values), colMap, values, prefix) if err != nil { return nil, err } diff --git a/pkg/sql/sqlerrors/errors.go b/pkg/sql/sqlerrors/errors.go index e0ccc2345533..95289770ec46 100644 --- a/pkg/sql/sqlerrors/errors.go +++ b/pkg/sql/sqlerrors/errors.go @@ -274,6 +274,11 @@ func IsUndefinedRelationError(err error) bool { return errHasCode(err, pgcode.UndefinedTable) } +// IsUndefinedDatabaseError checks whether this is an undefined database error. +func IsUndefinedDatabaseError(err error) bool { + return errHasCode(err, pgcode.UndefinedDatabase) +} + func errHasCode(err error, code ...pgcode.Code) bool { pgCode := pgerror.GetPGCode(err) for _, c := range code { diff --git a/pkg/sql/sqlinstance/instancestorage/BUILD.bazel b/pkg/sql/sqlinstance/instancestorage/BUILD.bazel index 088cf72056ab..c6c02f73d2f2 100644 --- a/pkg/sql/sqlinstance/instancestorage/BUILD.bazel +++ b/pkg/sql/sqlinstance/instancestorage/BUILD.bazel @@ -20,8 +20,8 @@ go_library( "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/systemschema", - "//pkg/sql/row", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/valueside", "//pkg/sql/sem/tree", "//pkg/sql/sqlinstance", "//pkg/sql/sqlliveness", diff --git a/pkg/sql/sqlinstance/instancestorage/row_codec.go b/pkg/sql/sqlinstance/instancestorage/row_codec.go index 2e3993dc5cbf..c71cf98cccbb 100644 --- a/pkg/sql/sqlinstance/instancestorage/row_codec.go +++ b/pkg/sql/sqlinstance/instancestorage/row_codec.go @@ -18,8 +18,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" - "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/valueside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -30,19 +30,18 @@ import ( // rowCodec encodes/decodes rows from the sql_instances table. type rowCodec struct { - codec keys.SQLCodec - colIdxMap catalog.TableColMap - columns []catalog.Column + codec keys.SQLCodec + columns []catalog.Column + decoder valueside.Decoder } // MakeRowCodec makes a new rowCodec for the sql_instances table. func makeRowCodec(codec keys.SQLCodec) rowCodec { + columns := systemschema.SQLInstancesTable.PublicColumns() return rowCodec{ - codec: codec, - colIdxMap: row.ColIDtoRowIndexFromCols( - systemschema.SQLInstancesTable.PublicColumns(), - ), - columns: systemschema.SQLInstancesTable.PublicColumns(), + codec: codec, + columns: columns, + decoder: valueside.MakeDecoder(columns), } } @@ -56,14 +55,14 @@ func (d *rowCodec) encodeRow( ) (kv kv.KeyValue, err error) { addrDatum := tree.NewDString(addr) var valueBuf []byte - valueBuf, err = rowenc.EncodeTableValue( - []byte(nil), d.columns[1].GetID(), addrDatum, []byte(nil)) + valueBuf, err = valueside.Encode( + []byte(nil), valueside.MakeColumnIDDelta(0, d.columns[1].GetID()), addrDatum, []byte(nil)) if err != nil { return kv, err } sessionDatum := tree.NewDBytes(tree.DBytes(sessionID.UnsafeBytes())) - sessionColDiff := d.columns[2].GetID() - d.columns[1].GetID() - valueBuf, err = rowenc.EncodeTableValue(valueBuf, sessionColDiff, sessionDatum, []byte(nil)) + sessionColDiff := valueside.MakeColumnIDDelta(d.columns[1].GetID(), d.columns[2].GetID()) + valueBuf, err = valueside.Encode(valueBuf, sessionColDiff, sessionDatum, []byte(nil)) if err != nil { return kv, err } @@ -85,11 +84,10 @@ func (d *rowCodec) decodeRow( tombstone bool, _ error, ) { - tbl := systemschema.SQLInstancesTable - var alloc rowenc.DatumAlloc + var alloc tree.DatumAlloc // First, decode the id field from the index key. { - types := []*types.T{tbl.PublicColumns()[0].GetType()} + types := []*types.T{d.columns[0].GetType()} row := make([]rowenc.EncDatum, 1) _, _, _, err := rowenc.DecodeIndexKey(d.codec, types, row, nil, kv.Key) if err != nil { @@ -103,44 +101,25 @@ func (d *rowCodec) decodeRow( if !kv.Value.IsPresent() { return instanceID, "", "", hlc.Timestamp{}, true, nil } - // The rest of the columns are stored as a family, packed with diff-encoded - // column IDs followed by their values. - { - bytes, err := kv.Value.GetTuple() - timestamp = kv.Value.Timestamp - if err != nil { - return instanceID, "", "", hlc.Timestamp{}, false, err - } - var colIDDiff uint32 - var lastColID descpb.ColumnID - var res tree.Datum - for len(bytes) > 0 { - _, _, colIDDiff, _, err = encoding.DecodeValueTag(bytes) - if err != nil { - return instanceID, "", "", hlc.Timestamp{}, false, err - } - colID := lastColID + descpb.ColumnID(colIDDiff) - lastColID = colID - if idx, ok := d.colIdxMap.Get(colID); ok { - res, bytes, err = rowenc.DecodeTableValue(&alloc, tbl.PublicColumns()[idx].GetType(), bytes) - if err != nil { - return instanceID, "", "", hlc.Timestamp{}, false, err - } - switch colID { - case tbl.PublicColumns()[1].GetID(): // addr - if res != tree.DNull { - addr = string(tree.MustBeDString(res)) - } - case tbl.PublicColumns()[2].GetID(): // sessionID - if res != tree.DNull { - sessionID = sqlliveness.SessionID(tree.MustBeDBytes(res)) - } - default: - return instanceID, "", "", hlc.Timestamp{}, false, errors.Errorf("unknown column: %v", colID) - } - } - } + timestamp = kv.Value.Timestamp + // The rest of the columns are stored as a family. + bytes, err := kv.Value.GetTuple() + if err != nil { + return instanceID, "", "", hlc.Timestamp{}, false, err + } + + datums, err := d.decoder.Decode(&alloc, bytes) + if err != nil { + return instanceID, "", "", hlc.Timestamp{}, false, err + } + + if addrVal := datums[1]; addrVal != tree.DNull { + addr = string(tree.MustBeDString(addrVal)) + } + if sessionIDVal := datums[2]; sessionIDVal != tree.DNull { + sessionID = sqlliveness.SessionID(tree.MustBeDBytes(sessionIDVal)) } + return instanceID, addr, sessionID, timestamp, false, nil } diff --git a/pkg/sql/sqlstats/persistedsqlstats/BUILD.bazel b/pkg/sql/sqlstats/persistedsqlstats/BUILD.bazel index d5eabe909dfd..9b7257c297fa 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/BUILD.bazel +++ b/pkg/sql/sqlstats/persistedsqlstats/BUILD.bazel @@ -46,7 +46,7 @@ go_library( "//pkg/util/timeutil", "@com_github_cockroachdb_errors//:errors", "@com_github_gogo_protobuf//types", - "@com_github_gorhill_cronexpr//:cronexpr", + "@com_github_robfig_cron_v3//:cron", ], ) diff --git a/pkg/sql/sqlstats/persistedsqlstats/cluster_settings.go b/pkg/sql/sqlstats/persistedsqlstats/cluster_settings.go index 044484ad2384..cf4ae0e3356e 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/cluster_settings.go +++ b/pkg/sql/sqlstats/persistedsqlstats/cluster_settings.go @@ -15,7 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/errors" - "github.com/gorhill/cronexpr" + "github.com/robfig/cron/v3" ) // SQLStatsFlushInterval is the cluster setting that controls how often the SQL @@ -73,7 +73,7 @@ var SQLStatsCleanupRecurrence = settings.RegisterValidatedStringSetting( "cron-tab recurrence for SQL Stats cleanup job", "@hourly", /* defaultValue */ func(_ *settings.Values, s string) error { - if _, err := cronexpr.Parse(s); err != nil { + if _, err := cron.ParseStandard(s); err != nil { return errors.Wrap(err, "invalid cron expression") } return nil diff --git a/pkg/sql/sqlstats/persistedsqlstats/scheduled_sql_stats_compaction_test.go b/pkg/sql/sqlstats/persistedsqlstats/scheduled_sql_stats_compaction_test.go index 2d4497698fbf..f50be866709d 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/scheduled_sql_stats_compaction_test.go +++ b/pkg/sql/sqlstats/persistedsqlstats/scheduled_sql_stats_compaction_test.go @@ -198,7 +198,9 @@ func TestSQLStatsScheduleOperations(t *testing.T) { t.Run("warn_schedule_long_run_interval", func(t *testing.T) { t.Run("via cluster setting", func(t *testing.T) { - helper.sqlDB.Exec(t, "SET CLUSTER SETTING sql.stats.cleanup.recurrence = '0 59 23 24 12 ? 2099'") + // Craft an expression that next repeats next month. + expr := fmt.Sprintf("59 23 24 %d ?", timeutil.Now().AddDate(0, 1, 0).Month()) + helper.sqlDB.Exec(t, "SET CLUSTER SETTING sql.stats.cleanup.recurrence = $1", expr) var err error testutils.SucceedsSoon(t, func() error { @@ -208,7 +210,7 @@ func TestSQLStatsScheduleOperations(t *testing.T) { if err == nil { return errors.Newf("retry: next_run=%s, schedule_expr=%s", sj.NextRun(), sj.ScheduleExpr()) } - require.Equal(t, "0 59 23 24 12 ? 2099", sj.ScheduleExpr()) + require.Equal(t, expr, sj.ScheduleExpr()) return nil }) require.True(t, errors.Is( diff --git a/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/BUILD.bazel b/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/BUILD.bazel index ebcd42563330..8118b11235bf 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/BUILD.bazel +++ b/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/BUILD.bazel @@ -17,7 +17,7 @@ go_library( "//pkg/util/encoding", "//pkg/util/json", "//pkg/util/timeutil", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", ], diff --git a/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/json_impl.go b/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/json_impl.go index 760044f44663..bddd7a8ce97c 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/json_impl.go +++ b/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil/json_impl.go @@ -14,7 +14,7 @@ import ( "encoding/hex" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/json" diff --git a/pkg/sql/stats/BUILD.bazel b/pkg/sql/stats/BUILD.bazel index bc37015648f5..2e373ec55e14 100644 --- a/pkg/sql/stats/BUILD.bazel +++ b/pkg/sql/stats/BUILD.bazel @@ -35,6 +35,7 @@ go_library( "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", "//pkg/sql/sem/tree", "//pkg/sql/sqlerrors", "//pkg/sql/sqlutil", diff --git a/pkg/sql/stats/histogram.go b/pkg/sql/stats/histogram.go index 18c543ac35cd..84156e98b444 100644 --- a/pkg/sql/stats/histogram.go +++ b/pkg/sql/stats/histogram.go @@ -16,7 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -379,7 +379,7 @@ func (h histogram) toHistogramData(colType *types.T) (HistogramData, error) { } for i := range h.buckets { - encoded, err := rowenc.EncodeTableKey(nil, h.buckets[i].UpperBound, encoding.Ascending) + encoded, err := keyside.Encode(nil, h.buckets[i].UpperBound, encoding.Ascending) if err != nil { return HistogramData{}, err } diff --git a/pkg/sql/stats/json.go b/pkg/sql/stats/json.go index f4f1d6fa7145..a7fff246ff87 100644 --- a/pkg/sql/stats/json.go +++ b/pkg/sql/stats/json.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -64,7 +65,7 @@ func (js *JSONStatistic) SetHistogram(h *HistogramData) error { js.HistogramColumnType = typ.SQLString() js.HistogramBuckets = make([]JSONHistoBucket, len(h.Buckets)) js.HistogramVersion = h.Version - var a rowenc.DatumAlloc + var a tree.DatumAlloc for i := range h.Buckets { b := &h.Buckets[i] js.HistogramBuckets[i].NumEq = b.NumEq @@ -74,7 +75,7 @@ func (js *JSONStatistic) SetHistogram(h *HistogramData) error { if b.UpperBound == nil { return fmt.Errorf("histogram bucket upper bound is unset") } - datum, _, err := rowenc.DecodeTableKey(&a, typ, b.UpperBound, encoding.Ascending) + datum, _, err := keyside.Decode(&a, typ, b.UpperBound, encoding.Ascending) if err != nil { return err } @@ -152,7 +153,7 @@ func (js *JSONStatistic) GetHistogram( h.Buckets[i].NumEq = hb.NumEq h.Buckets[i].NumRange = hb.NumRange h.Buckets[i].DistinctRange = hb.DistinctRange - h.Buckets[i].UpperBound, err = rowenc.EncodeTableKey(nil, upperVal, encoding.Ascending) + h.Buckets[i].UpperBound, err = keyside.Encode(nil, upperVal, encoding.Ascending) if err != nil { return nil, err } diff --git a/pkg/sql/stats/row_sampling.go b/pkg/sql/stats/row_sampling.go index 50e9f1134466..b6fe80b32ee5 100644 --- a/pkg/sql/stats/row_sampling.go +++ b/pkg/sql/stats/row_sampling.go @@ -48,7 +48,7 @@ type SampledRow struct { type SampleReservoir struct { samples []SampledRow colTypes []*types.T - da rowenc.DatumAlloc + da tree.DatumAlloc ra rowenc.EncDatumRowAlloc memAcc *mon.BoundAccount diff --git a/pkg/sql/stats/stats_cache.go b/pkg/sql/stats/stats_cache.go index 988a49c6a1d8..4f05acaf510f 100644 --- a/pkg/sql/stats/stats_cache.go +++ b/pkg/sql/stats/stats_cache.go @@ -26,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -73,7 +74,7 @@ type TableStatisticsCache struct { collectionFactory *descs.CollectionFactory // Used when decoding KV from the range feed. - datumAlloc rowenc.DatumAlloc + datumAlloc tree.DatumAlloc } // The cache stores *cacheEntry objects. The fields are protected by the @@ -180,7 +181,7 @@ func NewTableStatisticsCache( // decodeTableStatisticsKV decodes the table ID from a range feed event on // system.table_statistics. func decodeTableStatisticsKV( - codec keys.SQLCodec, kv *roachpb.RangeFeedValue, da *rowenc.DatumAlloc, + codec keys.SQLCodec, kv *roachpb.RangeFeedValue, da *tree.DatumAlloc, ) (tableDesc descpb.ID, err error) { // The primary key of table_statistics is (tableID INT, statisticID INT). types := []*types.T{types.Int, types.Int} @@ -580,10 +581,10 @@ func (sc *TableStatisticsCache) parseStats( } // Decode the histogram data so that it's usable by the opt catalog. - var a rowenc.DatumAlloc + var a tree.DatumAlloc for i := offset; i < len(res.Histogram); i++ { bucket := &res.HistogramData.Buckets[i-offset] - datum, _, err := rowenc.DecodeTableKey(&a, res.HistogramData.ColumnType, bucket.UpperBound, encoding.Ascending) + datum, _, err := keyside.Decode(&a, res.HistogramData.ColumnType, bucket.UpperBound, encoding.Ascending) if err != nil { return nil, err } diff --git a/pkg/sql/stmtdiagnostics/BUILD.bazel b/pkg/sql/stmtdiagnostics/BUILD.bazel index 843c6e81aae3..4b90a14f8edb 100644 --- a/pkg/sql/stmtdiagnostics/BUILD.bazel +++ b/pkg/sql/stmtdiagnostics/BUILD.bazel @@ -35,6 +35,7 @@ go_test( "statement_diagnostics_test.go", ], embed = [":stmtdiagnostics"], + tags = ["no-remote"], deps = [ "//pkg/base", "//pkg/keys", diff --git a/pkg/sql/tablewriter_delete.go b/pkg/sql/tablewriter_delete.go index f789e21282e5..9bd80615197e 100644 --- a/pkg/sql/tablewriter_delete.go +++ b/pkg/sql/tablewriter_delete.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/row" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" @@ -29,7 +28,7 @@ type tableDeleter struct { tableWriterBase rd row.Deleter - alloc *rowenc.DatumAlloc + alloc *tree.DatumAlloc } var _ tableWriter = &tableDeleter{} diff --git a/pkg/sql/tests/BUILD.bazel b/pkg/sql/tests/BUILD.bazel index c87956e28551..3206e4e85e84 100644 --- a/pkg/sql/tests/BUILD.bazel +++ b/pkg/sql/tests/BUILD.bazel @@ -32,6 +32,7 @@ go_test( "enum_test.go", "hash_sharded_test.go", "impure_builtin_test.go", + "insert_fast_path_test.go", "inverted_index_test.go", "kv_test.go", "main_test.go", diff --git a/pkg/sql/tests/insert_fast_path_test.go b/pkg/sql/tests/insert_fast_path_test.go new file mode 100644 index 000000000000..c261a8495a44 --- /dev/null +++ b/pkg/sql/tests/insert_fast_path_test.go @@ -0,0 +1,73 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +import ( + "context" + gosql "database/sql" + "testing" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/stretchr/testify/require" +) + +// TestInsertFastPathExtendedProtocol verifies that the 1PC "insert fast path" +// optimization is applied when doing a simple INSERT with a prepared statement. +func TestInsertFastPathExtendedProtocol(t *testing.T) { + defer leaktest.AfterTest(t)() + ctx := context.Background() + + var db *gosql.DB + + params, _ := CreateTestServerParams() + params.Settings = cluster.MakeTestingClusterSettings() + + tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ServerArgs: params}) + defer tc.Stopper().Stop(ctx) + db = tc.ServerConn(0) + _, err := db.Exec(`CREATE TABLE fast_path_test(val int);`) + require.NoError(t, err) + + conn, err := db.Conn(ctx) + require.NoError(t, err) + _, err = conn.ExecContext(ctx, "SET tracing = 'on'") + require.NoError(t, err) + // Use placeholders to force usage of extended protocol. + _, err = conn.ExecContext(ctx, "INSERT INTO fast_path_test VALUES($1)", 1) + require.NoError(t, err) + + fastPathEnabled := false + rows, err := conn.QueryContext(ctx, "SELECT message, operation FROM [SHOW TRACE FOR SESSION]") + require.NoError(t, err) + for rows.Next() { + var msg, operation string + err = rows.Scan(&msg, &operation) + require.NoError(t, err) + if msg == "autocommit enabled" && operation == "batch flow coordinator" { + fastPathEnabled = true + } + } + require.NoError(t, rows.Err()) + require.True(t, fastPathEnabled) + _, err = conn.ExecContext(ctx, "SET tracing = 'off'") + require.NoError(t, err) + err = conn.Close() + require.NoError(t, err) + + // Verify that the insert committed successfully. + var c int + err = db.QueryRow("SELECT count(*) FROM fast_path_test").Scan(&c) + require.NoError(t, err) + require.Equal(t, 1, c, "expected 1 row, got %d", c) +} diff --git a/pkg/sql/tests/system_table_test.go b/pkg/sql/tests/system_table_test.go index c9bf35ddd16a..d4f5da4aa083 100644 --- a/pkg/sql/tests/system_table_test.go +++ b/pkg/sql/tests/system_table_test.go @@ -191,21 +191,6 @@ func TestSystemTableLiterals(t *testing.T) { } require.NoError(t, catalog.ValidateSelf(gen)) - // TODO (Chengxiong) : remove this check after fixing #68031 - // These two system tables were created before we make shard column as - // virtual columns. We want to keep the hardcoded table descriptors to - // avoid system table migrations. However, in this test we run the `create - // table` statement and compare the result with the hardcoded descriptor, - // and there is discrepancy for sure. So we change the string statement to - // declare the shard column and constraint for it explicitly. The problem - // is that we only set `Hidden=true` when creating a shard column - // internally. User declared constraints has everything the same but with - // `Hidden=false`. So overriding the value here for now. Will remove it - // once we have better logic creating constraints. - if name == "statement_statistics" || name == "transaction_statistics" { - gen.TableDesc().Checks[0].Hidden = true - } - if test.pkg.TableDesc().Equal(gen.TableDesc()) { return } diff --git a/pkg/sql/tests/truncate_test.go b/pkg/sql/tests/truncate_test.go index e374f0399bc7..4fbb27685dbe 100644 --- a/pkg/sql/tests/truncate_test.go +++ b/pkg/sql/tests/truncate_test.go @@ -21,10 +21,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/stats" + "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" @@ -358,6 +361,8 @@ func TestTruncateWithConcurrentMutations(t *testing.T) { func TestTruncatePreservesSplitPoints(t *testing.T) { defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + skip.UnderRace(t) ctx := context.Background() @@ -386,6 +391,16 @@ func TestTruncatePreservesSplitPoints(t *testing.T) { }) defer tc.Stopper().Stop(ctx) + { + // This test asserts on KV-internal effects (i.e. range splits + // and their boundaries) as a result of configs and manually + // installed splits. To ensure it works with the span configs + // infrastructure quickly enough, we set a low closed timestamp + // target duration. + tdb := sqlutils.MakeSQLRunner(tc.ServerConn(0)) + tdb.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'`) + } + var err error _, err = tc.Conns[0].ExecContext(ctx, ` CREATE TABLE a(a INT PRIMARY KEY, b INT, INDEX(b)); @@ -395,29 +410,44 @@ ALTER INDEX a_b_idx SPLIT AT VALUES(1000), (2000), (3000), (4000), (5000), (6000 `) assert.NoError(t, err) - row := tc.Conns[0].QueryRowContext(ctx, ` + const origNRanges = 19 + + // Range split decisions happen asynchronously, hence the + // succeeds-soon block here and below. + testutils.SucceedsSoon(t, func() error { + row := tc.Conns[0].QueryRowContext(ctx, ` SELECT count(*) FROM crdb_internal.ranges_no_leases WHERE table_id = 'a'::regclass`) - assert.NoError(t, row.Err()) - var nRanges int - assert.NoError(t, row.Scan(&nRanges)) + assert.NoError(t, row.Err()) - const origNRanges = 19 - assert.Equal(t, origNRanges, nRanges) + var nRanges int + assert.NoError(t, row.Scan(&nRanges)) + if nRanges != origNRanges { + return errors.Newf("expected %d ranges, found %d", origNRanges, nRanges) + } + return nil + }) _, err = tc.Conns[0].ExecContext(ctx, `TRUNCATE a`) assert.NoError(t, err) - row = tc.Conns[0].QueryRowContext(ctx, ` + // We subtract 1 from the original n ranges because the first range + // can't be migrated to the new keyspace, as its prefix doesn't + // include an index ID. + expRanges := origNRanges + testCase.nodes*int(sql.PreservedSplitCountMultiple.Get( + &tc.Servers[0].Cfg.Settings.SV)) + + testutils.SucceedsSoon(t, func() error { + row := tc.Conns[0].QueryRowContext(ctx, ` SELECT count(*) FROM crdb_internal.ranges_no_leases WHERE table_id = 'a'::regclass`) - assert.NoError(t, row.Err()) - assert.NoError(t, row.Scan(&nRanges)) + assert.NoError(t, row.Err()) - // We subtract 1 from the original n ranges because the first range can't - // be migrated to the new keyspace, as its prefix doesn't include an - // index ID. - assert.Equal(t, origNRanges+testCase.nodes*int(sql.PreservedSplitCountMultiple.Get(&tc.Servers[0].Cfg. - Settings.SV)), - nRanges) + var nRanges int + assert.NoError(t, row.Scan(&nRanges)) + if nRanges != expRanges { + return errors.Newf("expected %d ranges, found %d", expRanges, nRanges) + } + return nil + }) }) } } diff --git a/pkg/sql/truncate.go b/pkg/sql/truncate.go index d33e9d0a7e9b..15393ba0b4e0 100644 --- a/pkg/sql/truncate.go +++ b/pkg/sql/truncate.go @@ -30,7 +30,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/row" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" @@ -382,7 +381,7 @@ func ClearTableDataInChunks( ) error { const chunkSize = row.TableTruncateChunkSize var resume roachpb.Span - alloc := &rowenc.DatumAlloc{} + alloc := &tree.DatumAlloc{} for rowIdx, done := 0, false; !done; rowIdx += chunkSize { resumeAt := resume if traceKV { diff --git a/pkg/sql/type_change.go b/pkg/sql/type_change.go index b3f2b0a4b075..eafa08333789 100644 --- a/pkg/sql/type_change.go +++ b/pkg/sql/type_change.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" @@ -957,7 +958,7 @@ func (t *typeSchemaChanger) canRemoveEnumValue( if err != nil { return err } - if descpb.RegionName(member.LogicalRepresentation) == homedRegion { + if catpb.RegionName(member.LogicalRepresentation) == homedRegion { return errors.Newf("could not remove enum value %q as it is the home region for table %q", member.LogicalRepresentation, desc.GetName()) } @@ -1073,7 +1074,7 @@ func findUsageOfEnumValueInEncodedPartitioningValue( foundUsage bool, member *descpb.TypeDescriptor_EnumMember, ) (bool, error) { - var d rowenc.DatumAlloc + var d tree.DatumAlloc tuple, _, err := rowenc.DecodePartitionTuple( &d, codec, table, index, partitioning, v, fakePrefixDatums, ) diff --git a/pkg/sql/unsupported_vars.go b/pkg/sql/unsupported_vars.go index 881541d69208..ef35e072e939 100644 --- a/pkg/sql/unsupported_vars.go +++ b/pkg/sql/unsupported_vars.go @@ -86,7 +86,7 @@ var UnsupportedVars = func(ss ...string) map[string]struct{} { "default_transaction_deferrable", // "default_transaction_isolation", // "default_transaction_read_only", - "default_with_oids", + // "default_with_oids", "dynamic_library_path", "effective_cache_size", "enable_bitmapscan", @@ -203,6 +203,6 @@ var UnsupportedVars = func(ss ...string) map[string]struct{} { "wal_debug", "work_mem", "xmlbinary", - "xmloption", + // "xmloption", "zero_damaged_pages", ) diff --git a/pkg/sql/update.go b/pkg/sql/update.go index 2ff8d0164723..44a728e48c09 100644 --- a/pkg/sql/update.go +++ b/pkg/sql/update.go @@ -321,10 +321,9 @@ func (u *updateNode) processSourceRow(params runParams, sourceVals tree.Datums) // If result rows need to be accumulated, do it. if u.run.tu.rows != nil { - // The new values can include all columns, the construction of the - // values has used execinfra.ScanVisibilityPublicAndNotPublic so the - // values may contain additional columns for every newly added column - // not yet visible. We do not want them to be available for RETURNING. + // The new values can include all columns, so the values may contain + // additional columns for every newly added column not yet visible. We do + // not want them to be available for RETURNING. // // MakeUpdater guarantees that the first columns of the new values // are those specified u.columns. diff --git a/pkg/sql/values_test.go b/pkg/sql/values_test.go index 557ca98df380..5f615374818e 100644 --- a/pkg/sql/values_test.go +++ b/pkg/sql/values_test.go @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/types" diff --git a/pkg/sql/vars.go b/pkg/sql/vars.go index ce261ceff3da..6cff08017597 100644 --- a/pkg/sql/vars.go +++ b/pkg/sql/vars.go @@ -1039,6 +1039,12 @@ var varGen = map[string]sessionVar{ // be changed to `on`. `backslash_quote`: makeCompatStringVar(`backslash_quote`, `safe_encoding`), + // See https://www.postgresql.org/docs/9.5/runtime-config-compatible.html + `default_with_oids`: makeCompatBoolVar(`default_with_oids`, false, false), + + // See https://www.postgresql.org/docs/current/datatype-xml.html. + `xmloption`: makeCompatStringVar(`xmloption`, `content`), + // Supported for PG compatibility only. // See https://www.postgresql.org/docs/10/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS `max_identifier_length`: { diff --git a/pkg/sql/zone_config.go b/pkg/sql/zone_config.go index 1a58ea8c423f..85f6118c5488 100644 --- a/pkg/sql/zone_config.go +++ b/pkg/sql/zone_config.go @@ -277,10 +277,13 @@ func GetHydratedZoneConfigForNamedZone( zoneID, zone, _, _, err := getZoneConfig( codec, descpb.ID(id), getKey, false /* getInheritedDefault */, false, /* mayBeTable */ ) + if err != nil { + return nil, err + } if err := completeZoneConfig(zone, codec, zoneID, getKey); err != nil { return nil, err } - return zone, err + return zone, nil } // GetHydratedZoneConfigForTable returns a fully hydrated zone config for a diff --git a/pkg/storage/metamorphic/operands.go b/pkg/storage/metamorphic/operands.go index 11351a93aee3..d106b49ff342 100644 --- a/pkg/storage/metamorphic/operands.go +++ b/pkg/storage/metamorphic/operands.go @@ -345,6 +345,31 @@ func (t *txnGenerator) forEachConflict( } } +// truncateSpanForConflicts truncates [key, endKey) to a sub-span that does +// not conflict with any in-flight writes. If no such span is found, an empty +// span (i.e. key >= endKey) is returned. Callers are expected to handle that +// case gracefully. +func (t *txnGenerator) truncateSpanForConflicts( + w readWriterID, txn txnID, key, endKey roachpb.Key, +) roachpb.Span { + // forEachConflict is guaranteed to iterate over conflicts in key order, + // with the lowest conflicting key first. Find the first conflict and + // truncate the span to that range. + t.forEachConflict(w, txn, key, endKey, func(conflict roachpb.Span) bool { + if conflict.ContainsKey(key) { + key = append([]byte(nil), conflict.EndKey...) + return true + } + endKey = conflict.Key + return false + }) + result := roachpb.Span{ + Key: key, + EndKey: endKey, + } + return result +} + func (t *txnGenerator) addWrittenKeySpan( w readWriterID, txn txnID, key roachpb.Key, endKey roachpb.Key, ) { @@ -421,6 +446,10 @@ func (t *txnGenerator) addWrittenKeySpan( } func (t *txnGenerator) trackTransactionalWrite(w readWriterID, txn txnID, key, endKey roachpb.Key) { + if len(endKey) > 0 && key.Compare(endKey) >= 0 { + // No-op. + return + } t.addWrittenKeySpan(w, txn, key, endKey) if w == "engine" { return diff --git a/pkg/storage/metamorphic/operations.go b/pkg/storage/metamorphic/operations.go index 6ec7d53e05c3..21dc31404d11 100644 --- a/pkg/storage/metamorphic/operations.go +++ b/pkg/storage/metamorphic/operations.go @@ -271,6 +271,11 @@ type mvccDeleteRangeOp struct { func (m mvccDeleteRangeOp) run(ctx context.Context) string { txn := m.m.getTxn(m.txn) writer := m.m.getReadWriter(m.writer) + if m.key.Compare(m.endKey) >= 0 { + // Empty range. No-op. + return "no-op due to no non-conflicting key range" + } + txn.Sequence++ keys, _, _, err := storage.MVCCDeleteRange(ctx, writer, nil, m.key, m.endKey, 0, txn.WriteTimestamp, txn, true) @@ -295,7 +300,6 @@ func (m mvccDeleteRangeOp) run(ctx context.Context) string { type mvccClearTimeRangeOp struct { m *metaTestRunner - writer readWriterID key roachpb.Key endKey roachpb.Key startTime hlc.Timestamp @@ -303,14 +307,16 @@ type mvccClearTimeRangeOp struct { } func (m mvccClearTimeRangeOp) run(ctx context.Context) string { - writer := m.m.getReadWriter(m.writer) - useTBI := m.writer == "engine" - span, err := storage.MVCCClearTimeRange(ctx, writer, &enginepb.MVCCStats{}, m.key, m.endKey, - m.startTime, m.endTime, math.MaxInt64, math.MaxInt64, useTBI) + if m.key.Compare(m.endKey) >= 0 { + // Empty range. No-op. + return "no-op due to no non-conflicting key range" + } + span, err := storage.MVCCClearTimeRange(ctx, m.m.engine, &enginepb.MVCCStats{}, m.key, m.endKey, + m.startTime, m.endTime, math.MaxInt64, math.MaxInt64, true /* useTBI */) if err != nil { return fmt.Sprintf("error: %s", err) } - return fmt.Sprintf("ok, span = %v", span) + return fmt.Sprintf("ok, deleted span = %s - %s, resumeSpan = %v", m.key, m.endKey, span) } type mvccDeleteOp struct { @@ -678,11 +684,15 @@ type clearRangeOp struct { func (c clearRangeOp) run(ctx context.Context) string { // ClearRange calls in Cockroach usually happen with boundaries demarcated // using unversioned keys, so mimic the same behavior here. + if c.key.Compare(c.endKey) >= 0 { + // Empty range. No-op. + return "no-op due to no non-conflicting key range" + } err := c.m.engine.ClearMVCCRangeAndIntents(c.key, c.endKey) if err != nil { return fmt.Sprintf("error: %s", err.Error()) } - return "ok" + return fmt.Sprintf("deleted range = %s - %s", c.key, c.endKey) } type compactOp struct { @@ -887,11 +897,9 @@ var opGenerators = []opGenerator{ if endKey.Compare(key) < 0 { key, endKey = endKey, key } - // forEachConflict is guaranteed to iterate - m.txnGenerator.forEachConflict(writer, txn, key, endKey, func(conflict roachpb.Span) bool { - endKey = conflict.Key - return false - }) + truncatedSpan := m.txnGenerator.truncateSpanForConflicts(writer, txn, key, endKey) + key = truncatedSpan.Key + endKey = truncatedSpan.EndKey // Track this write in the txn generator. This ensures the batch will be // committed before the transaction is committed @@ -918,15 +926,17 @@ var opGenerators = []opGenerator{ { name: "mvcc_clear_time_range", generate: func(ctx context.Context, m *metaTestRunner, args ...string) mvccOp { - writer := readWriterID(args[0]) - key := m.keyGenerator.parse(args[1]).Key - endKey := m.keyGenerator.parse(args[2]).Key - startTime := m.pastTSGenerator.parse(args[3]) + key := m.keyGenerator.parse(args[0]).Key + endKey := m.keyGenerator.parse(args[1]).Key + startTime := m.pastTSGenerator.parse(args[2]) endTime := m.pastTSGenerator.parse(args[3]) if endKey.Compare(key) < 0 { key, endKey = endKey, key } + truncatedSpan := m.txnGenerator.truncateSpanForConflicts("engine", "", key, endKey) + key = truncatedSpan.Key + endKey = truncatedSpan.EndKey if endTime.Less(startTime) { startTime, endTime = endTime, startTime } @@ -937,18 +947,13 @@ var opGenerators = []opGenerator{ endTime = endTime.Next() return &mvccClearTimeRangeOp{ m: m, - writer: writer, key: key, endKey: endKey, startTime: startTime, endTime: endTime, } }, - dependentOps: func(m *metaTestRunner, args ...string) []opReference { - return closeItersOnBatch(m, readWriterID(args[0])) - }, operands: []operandType{ - operandReadWriter, operandMVCCKey, operandMVCCKey, operandPastTS, @@ -1337,6 +1342,10 @@ var opGenerators = []opGenerator{ endKey = endKey.Next() } + truncatedSpan := m.txnGenerator.truncateSpanForConflicts("engine", "", key, endKey) + key = truncatedSpan.Key + endKey = truncatedSpan.EndKey + return &clearRangeOp{ m: m, key: key, diff --git a/pkg/storage/mvcc.go b/pkg/storage/mvcc.go index dc608b90f0b2..2aa19c7e04e0 100644 --- a/pkg/storage/mvcc.go +++ b/pkg/storage/mvcc.go @@ -1640,7 +1640,7 @@ func mvccPutInternal( // instead of allowing their transactions to continue and be retried // before committing. writeTimestamp.Forward(metaTimestamp.Next()) - maybeTooOldErr = roachpb.NewWriteTooOldError(readTimestamp, writeTimestamp) + maybeTooOldErr = roachpb.NewWriteTooOldError(readTimestamp, writeTimestamp, key) // If we're in a transaction, always get the value at the orig // timestamp. Outside of a transaction, the read timestamp advances // to the the latest value's timestamp + 1 as well. The new diff --git a/pkg/storage/mvcc_test.go b/pkg/storage/mvcc_test.go index af67ce017e6a..7159fd653478 100644 --- a/pkg/storage/mvcc_test.go +++ b/pkg/storage/mvcc_test.go @@ -427,7 +427,7 @@ func TestMVCCWriteWithOlderTimestampAfterDeletionOfNonexistentKey(t *testing.T) if err := MVCCPut( context.Background(), engine, nil, testKey1, hlc.Timestamp{WallTime: 1}, value1, nil, ); !testutils.IsError( - err, "write at timestamp 0.000000001,0 too old; wrote at 0.000000003,1", + err, "write for key \"/db1\" at timestamp 0.000000001,0 too old; wrote at 0.000000003,1", ) { t.Fatal(err) } diff --git a/pkg/storage/pebble_mvcc_scanner.go b/pkg/storage/pebble_mvcc_scanner.go index 925dc6d847f1..243526d186fa 100644 --- a/pkg/storage/pebble_mvcc_scanner.go +++ b/pkg/storage/pebble_mvcc_scanner.go @@ -174,8 +174,10 @@ type pebbleMVCCScanner struct { // mostRecentTS stores the largest timestamp observed that is equal to or // above the scan timestamp. Only applicable if failOnMoreRecent is true. If // set and no other error is hit, a WriteToOld error will be returned from - // the scan. - mostRecentTS hlc.Timestamp + // the scan. mostRecentKey is one of the keys (not necessarily at + // mostRecentTS) that was more recent than the scan. + mostRecentTS hlc.Timestamp + mostRecentKey roachpb.Key // Stores any error returned. If non-nil, iteration short circuits. err error // Number of iterations to try before we do a Seek/SeekReverse. Stays within @@ -338,7 +340,7 @@ func (p *pebbleMVCCScanner) maybeFailOnMoreRecent() { } // The txn can't write at the existing timestamp, so we provide the error // with the timestamp immediately after it. - p.err = roachpb.NewWriteTooOldError(p.ts, p.mostRecentTS.Next()) + p.err = roachpb.NewWriteTooOldError(p.ts, p.mostRecentTS.Next(), p.mostRecentKey) p.results.clear() p.intents.Reset() } @@ -373,6 +375,9 @@ func (p *pebbleMVCCScanner) getAndAdvance(ctx context.Context) bool { // seen so we know to return an error, but then keep scanning so // that we can return the largest possible time. p.mostRecentTS.Forward(p.curUnsafeKey.Timestamp) + if len(p.mostRecentKey) == 0 { + p.mostRecentKey = append(p.mostRecentKey, p.curUnsafeKey.Key...) + } return p.advanceKey() } @@ -390,6 +395,9 @@ func (p *pebbleMVCCScanner) getAndAdvance(ctx context.Context) bool { // seen so we know to return an error, but then keep scanning so // that we can return the largest possible time. p.mostRecentTS.Forward(p.curUnsafeKey.Timestamp) + if len(p.mostRecentKey) == 0 { + p.mostRecentKey = append(p.mostRecentKey, p.curUnsafeKey.Key...) + } return p.advanceKey() } diff --git a/pkg/storage/sst.go b/pkg/storage/sst.go index 94de3de4f705..893812bd63be 100644 --- a/pkg/storage/sst.go +++ b/pkg/storage/sst.go @@ -54,19 +54,9 @@ func CheckSSTConflicts( var statsDiff enginepb.MVCCStats var intents []roachpb.Intent - // Fast path: there are no keys in the reader between the sstable's start and - // end keys. We use a non-prefix iterator for this search, and reopen a prefix - // one if there are engine keys in the span. - nonPrefixIter := reader.NewMVCCIterator(MVCCKeyAndIntentsIterKind, IterOptions{UpperBound: end.Key}) - nonPrefixIter.SeekGE(start) - valid, _ := nonPrefixIter.Valid() - nonPrefixIter.Close() - if !valid { - return statsDiff, nil - } - - extIter := reader.NewMVCCIterator(MVCCKeyAndIntentsIterKind, IterOptions{UpperBound: end.Key, Prefix: true}) + extIter := reader.NewMVCCIterator(MVCCKeyAndIntentsIterKind, IterOptions{UpperBound: end.Key}) defer extIter.Close() + extIter.SeekGE(start) sstIter, err := NewMemSSTIterator(sst, false) if err != nil { @@ -75,50 +65,24 @@ func CheckSSTConflicts( defer sstIter.Close() sstIter.SeekGE(start) - // extIter is a prefix iterator; it is expected to skip keys that belong - // to different prefixes. Only iterate along the sst iterator, and re-seek - // extIter each time. - sstOK, sstErr := sstIter.Valid() - if sstOK { - extIter.SeekGE(MVCCKey{Key: sstIter.UnsafeKey().Key}) - } extOK, extErr := extIter.Valid() - for sstErr == nil && sstOK { + sstOK, sstErr := sstIter.Valid() + for extErr == nil && sstErr == nil && extOK && sstOK { if err := ctx.Err(); err != nil { return enginepb.MVCCStats{}, err } - if !extOK { - // There is no key in extIter matching this prefix. Check the next - // key in sstIter. Note that we can't just use an exhausted extIter - // as a sign that we are done; extIter could be skipping keys, so it - // must be re-seeked. - sstIter.NextKey() - sstOK, sstErr = sstIter.Valid() - if sstOK { - extIter.SeekGE(MVCCKey{Key: sstIter.UnsafeKey().Key}) - } - extOK, extErr = extIter.Valid() - continue - } extKey, extValue := extIter.UnsafeKey(), extIter.UnsafeValue() sstKey, sstValue := sstIter.UnsafeKey(), sstIter.UnsafeValue() // Keep seeking the iterators until both keys are equal. if cmp := bytes.Compare(extKey.Key, sstKey.Key); cmp < 0 { - // sstIter is further ahead. Seek extIter. extIter.SeekGE(MVCCKey{Key: sstKey.Key}) extOK, extErr = extIter.Valid() continue } else if cmp > 0 { - // extIter is further ahead. But it could have skipped keys in between, - // so re-seek it at the next sst key. - sstIter.NextKey() + sstIter.SeekGE(MVCCKey{Key: extKey.Key}) sstOK, sstErr = sstIter.Valid() - if sstOK { - extIter.SeekGE(MVCCKey{Key: sstIter.UnsafeKey().Key}) - } - extOK, extErr = extIter.Valid() continue } @@ -221,7 +185,7 @@ func CheckSSTConflicts( // be used in transactions so we don't need to check. if sstKey.Timestamp.LessEq(extKey.Timestamp) { return enginepb.MVCCStats{}, roachpb.NewWriteTooOldError( - sstKey.Timestamp, extKey.Timestamp.Next()) + sstKey.Timestamp, extKey.Timestamp.Next(), sstKey.Key) } // If we are shadowing an existing key, we must update the stats accordingly @@ -242,12 +206,12 @@ func CheckSSTConflicts( extOK, extErr = extIter.Valid() } - if sstErr != nil { - return enginepb.MVCCStats{}, sstErr - } if extErr != nil { return enginepb.MVCCStats{}, extErr } + if sstErr != nil { + return enginepb.MVCCStats{}, sstErr + } if len(intents) > 0 { return enginepb.MVCCStats{}, &roachpb.WriteIntentError{Intents: intents} } diff --git a/pkg/storage/sst_iterator.go b/pkg/storage/sst_iterator.go index 449a0426896f..d3e52851e8ce 100644 --- a/pkg/storage/sst_iterator.go +++ b/pkg/storage/sst_iterator.go @@ -33,6 +33,10 @@ type sstIterator struct { // roachpb.Verify k/v pairs on each call to Next. verify bool + + // For determining whether to trySeekUsingNext=true in SeekGE. + prevSeekKey MVCCKey + seekGELastOp bool } // NewSSTIterator returns a `SimpleMVCCIterator` for the provided file, which it @@ -86,7 +90,15 @@ func (r *sstIterator) SeekGE(key MVCCKey) { } r.keyBuf = EncodeKeyToBuf(r.keyBuf, key) var iKey *sstable.InternalKey - iKey, r.value = r.iter.SeekGE(r.keyBuf) + trySeekUsingNext := false + if r.seekGELastOp { + // trySeekUsingNext = r.prevSeekKey <= key + trySeekUsingNext = !key.Less(r.prevSeekKey) + } + // NB: seekGELastOp may still be true, and we haven't updated prevSeekKey. + // So be careful not to return before the end of the function that sets these + // fields up for the next SeekGE. + iKey, r.value = r.iter.SeekGE(r.keyBuf, trySeekUsingNext) if iKey != nil { r.iterValid = true r.mvccKey, r.err = DecodeMVCCKey(iKey.UserKey) @@ -97,6 +109,9 @@ func (r *sstIterator) SeekGE(key MVCCKey) { if r.iterValid && r.err == nil && r.verify && r.mvccKey.IsValue() { r.err = roachpb.Value{RawBytes: r.value}.Verify(r.mvccKey.Key) } + r.prevSeekKey.Key = append(r.prevSeekKey.Key[:0], r.mvccKey.Key...) + r.prevSeekKey.Timestamp = r.mvccKey.Timestamp + r.seekGELastOp = true } // Valid implements the SimpleMVCCIterator interface. @@ -106,6 +121,7 @@ func (r *sstIterator) Valid() (bool, error) { // Next implements the SimpleMVCCIterator interface. func (r *sstIterator) Next() { + r.seekGELastOp = false if !r.iterValid || r.err != nil { return } @@ -124,6 +140,7 @@ func (r *sstIterator) Next() { // NextKey implements the SimpleMVCCIterator interface. func (r *sstIterator) NextKey() { + r.seekGELastOp = false if !r.iterValid || r.err != nil { return } diff --git a/pkg/storage/testdata/mvcc_histories/conditional_put_with_txn_enable_separated b/pkg/storage/testdata/mvcc_histories/conditional_put_with_txn_enable_separated index a6ffa7822547..065a36bdebbe 100644 --- a/pkg/storage/testdata/mvcc_histories/conditional_put_with_txn_enable_separated +++ b/pkg/storage/testdata/mvcc_histories/conditional_put_with_txn_enable_separated @@ -62,7 +62,7 @@ cput k=k v=v4 cond=v3 ts=123 >> at end: data: "k"/124.000000000,1 -> /BYTES/v4 data: "k"/124.000000000,0 -> /BYTES/v3 -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 123.000000000,0 too old; wrote at 124.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "k" at timestamp 123.000000000,0 too old; wrote at 124.000000000,1 # Reset for next test diff --git a/pkg/storage/testdata/mvcc_histories/conditional_put_write_too_old b/pkg/storage/testdata/mvcc_histories/conditional_put_write_too_old index 54c96f957097..64956d46f179 100644 --- a/pkg/storage/testdata/mvcc_histories/conditional_put_write_too_old +++ b/pkg/storage/testdata/mvcc_histories/conditional_put_write_too_old @@ -25,7 +25,7 @@ cput ts=1 k=k v=v2 cond=v1 >> at end: data: "k"/10.000000000,1 -> /BYTES/v2 data: "k"/10.000000000,0 -> /BYTES/v1 -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 1.000000000,0 too old; wrote at 10.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "k" at timestamp 1.000000000,0 too old; wrote at 10.000000000,1 # Try a transactional put @t=1 with expectation of value2; should fail. run error @@ -49,4 +49,4 @@ meta: "k"/0,0 -> txn={id=00000000 key=/Min pri=0.00000000 epo=0 ts=10.000000000, data: "k"/10.000000000,2 -> /BYTES/v3 data: "k"/10.000000000,1 -> /BYTES/v2 data: "k"/10.000000000,0 -> /BYTES/v1 -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 1.000000000,0 too old; wrote at 10.000000000,2 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "k" at timestamp 1.000000000,0 too old; wrote at 10.000000000,2 diff --git a/pkg/storage/testdata/mvcc_histories/delete_range b/pkg/storage/testdata/mvcc_histories/delete_range index 2376bdee4699..1fa89908c08c 100644 --- a/pkg/storage/testdata/mvcc_histories/delete_range +++ b/pkg/storage/testdata/mvcc_histories/delete_range @@ -128,7 +128,7 @@ data: "c/123"/47.000000000,0 -> / data: "c/123"/44.000000000,0 -> /BYTES/abc data: "d"/44.000000000,0 -> /BYTES/abc data: "d/123"/44.000000000,0 -> /BYTES/abc -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 46.000000000,0 too old; wrote at 47.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "c" at timestamp 46.000000000,0 too old; wrote at 47.000000000,1 run ok txn_remove t=A diff --git a/pkg/storage/testdata/mvcc_histories/increment b/pkg/storage/testdata/mvcc_histories/increment index d26741ea724b..1d0f40b75832 100644 --- a/pkg/storage/testdata/mvcc_histories/increment +++ b/pkg/storage/testdata/mvcc_histories/increment @@ -63,7 +63,7 @@ data: "k"/0,1 -> /INT/2 data: "r"/3.000000000,1 -> /INT/3 data: "r"/3.000000000,0 -> /INT/2 data: "r"/1.000000000,0 -> /INT/1 -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 2.000000000,0 too old; wrote at 3.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "r" at timestamp 2.000000000,0 too old; wrote at 3.000000000,1 # Ditto with transactional. run error @@ -80,4 +80,4 @@ data: "r"/3.000000000,2 -> /INT/2 data: "r"/3.000000000,1 -> /INT/3 data: "r"/3.000000000,0 -> /INT/2 data: "r"/1.000000000,0 -> /INT/1 -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 2.000000000,0 too old; wrote at 3.000000000,2 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "r" at timestamp 2.000000000,0 too old; wrote at 3.000000000,2 diff --git a/pkg/storage/testdata/mvcc_histories/read_fail_on_more_recent b/pkg/storage/testdata/mvcc_histories/read_fail_on_more_recent index 802e3d91bcd7..f1286da73ab4 100644 --- a/pkg/storage/testdata/mvcc_histories/read_fail_on_more_recent +++ b/pkg/storage/testdata/mvcc_histories/read_fail_on_more_recent @@ -37,7 +37,7 @@ run error get k=k1 ts=9,0 failOnMoreRecent ---- get: "k1" -> -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 9.000000000,0 too old; wrote at 10.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "k1" at timestamp 9.000000000,0 too old; wrote at 10.000000000,1 run ok get k=k1 ts=10,0 @@ -48,7 +48,7 @@ run error get k=k1 ts=10,0 failOnMoreRecent ---- get: "k1" -> -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 10.000000000,0 too old; wrote at 10.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "k1" at timestamp 10.000000000,0 too old; wrote at 10.000000000,1 run ok get k=k1 ts=11,0 @@ -69,7 +69,7 @@ run error scan k=k1 end=k2 ts=9,0 failOnMoreRecent ---- scan: "k1"-"k2" -> -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 9.000000000,0 too old; wrote at 10.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "k1" at timestamp 9.000000000,0 too old; wrote at 10.000000000,1 run ok scan k=k1 end=k2 ts=10,0 @@ -80,7 +80,7 @@ run error scan k=k1 end=k2 ts=10,0 failOnMoreRecent ---- scan: "k1"-"k2" -> -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 10.000000000,0 too old; wrote at 10.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "k1" at timestamp 10.000000000,0 too old; wrote at 10.000000000,1 run ok scan k=k1 end=k2 ts=11,0 @@ -180,7 +180,7 @@ run error scan k=k1 end=k3 ts=9,0 failOnMoreRecent ---- scan: "k1"-"k3" -> -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 9.000000000,0 too old; wrote at 10.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "k1" at timestamp 9.000000000,0 too old; wrote at 10.000000000,1 run error scan k=k1 end=k3 ts=10,0 @@ -192,7 +192,7 @@ run error scan k=k1 end=k3 ts=10,0 failOnMoreRecent ---- scan: "k1"-"k3" -> -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 10.000000000,0 too old; wrote at 10.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "k1" at timestamp 10.000000000,0 too old; wrote at 10.000000000,1 run error scan k=k1 end=k3 ts=11,0 @@ -241,16 +241,16 @@ run error scan k=a end=b_next ts=9,0 failOnMoreRecent ---- scan: "a"-"b_next" -> -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 9.000000000,0 too old; wrote at 13.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "a" at timestamp 9.000000000,0 too old; wrote at 13.000000000,1 run error scan k=a end=c_next ts=9,0 failOnMoreRecent ---- scan: "a"-"c_next" -> -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 9.000000000,0 too old; wrote at 13.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "a" at timestamp 9.000000000,0 too old; wrote at 13.000000000,1 run error scan k=b end=c_next ts=9,0 failOnMoreRecent ---- scan: "b"-"c_next" -> -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 9.000000000,0 too old; wrote at 13.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "b" at timestamp 9.000000000,0 too old; wrote at 13.000000000,1 diff --git a/pkg/storage/testdata/mvcc_histories/update_existing_key_old_version b/pkg/storage/testdata/mvcc_histories/update_existing_key_old_version index 9b5084f5b605..78da368792e8 100644 --- a/pkg/storage/testdata/mvcc_histories/update_existing_key_old_version +++ b/pkg/storage/testdata/mvcc_histories/update_existing_key_old_version @@ -14,7 +14,7 @@ put k=k v=v2 ts=0,1 >> at end: data: "k"/1.000000000,2 -> /BYTES/v2 data: "k"/1.000000000,1 -> /BYTES/v -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 0,1 too old; wrote at 1.000000000,2 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "k" at timestamp 0,1 too old; wrote at 1.000000000,2 # Earlier logical time. @@ -25,4 +25,4 @@ put k=k v=v2 ts=1,0 data: "k"/1.000000000,3 -> /BYTES/v2 data: "k"/1.000000000,2 -> /BYTES/v2 data: "k"/1.000000000,1 -> /BYTES/v -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 1.000000000,0 too old; wrote at 1.000000000,3 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "k" at timestamp 1.000000000,0 too old; wrote at 1.000000000,3 diff --git a/pkg/storage/testdata/mvcc_histories/write_too_old b/pkg/storage/testdata/mvcc_histories/write_too_old index 072d405f6885..6ad2e566d5a9 100644 --- a/pkg/storage/testdata/mvcc_histories/write_too_old +++ b/pkg/storage/testdata/mvcc_histories/write_too_old @@ -24,7 +24,7 @@ txn: "A" meta={id=00000000 key=/Min pri=0.00000000 epo=0 ts=33.000000000,0 min=0 meta: "a"/0,0 -> txn={id=00000000 key=/Min pri=0.00000000 epo=0 ts=44.000000000,1 min=0,0 seq=0} ts=44.000000000,1 del=true klen=12 vlen=0 mergeTs= txnDidNotUpdateMeta=true data: "a"/44.000000000,1 -> / data: "a"/44.000000000,0 -> /BYTES/abc -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 33.000000000,0 too old; wrote at 44.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "a" at timestamp 33.000000000,0 too old; wrote at 44.000000000,1 run ok resolve_intent t=A k=a status=ABORTED @@ -47,7 +47,7 @@ txn: "B" meta={id=00000000 key=/Min pri=0.00000000 epo=0 ts=33.000000000,0 min=0 meta: "a"/0,0 -> txn={id=00000000 key=/Min pri=0.00000000 epo=0 ts=44.000000000,1 min=0,0 seq=0} ts=44.000000000,1 del=false klen=12 vlen=8 mergeTs= txnDidNotUpdateMeta=true data: "a"/44.000000000,1 -> /BYTES/def data: "a"/44.000000000,0 -> /BYTES/abc -error: (*roachpb.WriteTooOldError:) WriteTooOldError: write at timestamp 33.000000000,0 too old; wrote at 44.000000000,1 +error: (*roachpb.WriteTooOldError:) WriteTooOldError: write for key "a" at timestamp 33.000000000,0 too old; wrote at 44.000000000,1 run ok resolve_intent t=B k=a status=ABORTED diff --git a/pkg/streaming/BUILD.bazel b/pkg/streaming/BUILD.bazel index 9ec81daf9740..eabc45dd49e8 100644 --- a/pkg/streaming/BUILD.bazel +++ b/pkg/streaming/BUILD.bazel @@ -6,7 +6,7 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/streaming", visibility = ["//visibility:public"], deps = [ - "//pkg/jobs/jobspb", + "//pkg/ccl/streamingccl/streampb", "//pkg/kv", "//pkg/sql/sem/tree", "//pkg/util/hlc", diff --git a/pkg/streaming/api.go b/pkg/streaming/api.go index 19579d92a12a..a920e897e0e7 100644 --- a/pkg/streaming/api.go +++ b/pkg/streaming/api.go @@ -11,7 +11,7 @@ package streaming import ( - "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streampb" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -53,8 +53,7 @@ type ReplicationStreamManager interface { evalCtx *tree.EvalContext, streamID StreamID, frontier hlc.Timestamp, - txn *kv.Txn, - ) (jobspb.StreamReplicationStatus, error) + txn *kv.Txn) (streampb.StreamReplicationStatus, error) // StreamPartition starts streaming replication for the partition specified by opaqueSpec // which contains serialized streampb.StreamPartitionSpec protocol message. @@ -63,6 +62,13 @@ type ReplicationStreamManager interface { streamID StreamID, opaqueSpec []byte, ) (tree.ValueGenerator, error) + + // GetReplicationStreamSpec gets a stream replication spec. + GetReplicationStreamSpec( + evalCtx *tree.EvalContext, + txn *kv.Txn, + streamID StreamID, + ) (*streampb.ReplicationStreamSpec, error) } // GetReplicationStreamManager returns a ReplicationStreamManager if a CCL binary is loaded. diff --git a/pkg/testutils/echotest/BUILD.bazel b/pkg/testutils/echotest/BUILD.bazel new file mode 100644 index 000000000000..57b42e94fa3a --- /dev/null +++ b/pkg/testutils/echotest/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "echotest", + srcs = ["echotest.go"], + importpath = "github.com/cockroachdb/cockroach/pkg/testutils/echotest", + visibility = ["//visibility:public"], + deps = ["@com_github_cockroachdb_datadriven//:datadriven"], +) diff --git a/pkg/testutils/echotest/echotest.go b/pkg/testutils/echotest/echotest.go new file mode 100644 index 000000000000..477ae1945eb5 --- /dev/null +++ b/pkg/testutils/echotest/echotest.go @@ -0,0 +1,44 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package echotest + +import ( + "testing" + + "github.com/cockroachdb/datadriven" +) + +// Require checks that the string matches what is found in the file located at +// the provided path. The file must follow the datadriven format: +// +// echo +// ---- +// +// +// The contents of the file can be updated automatically using datadriven's +// -rewrite flag. +func Require(t *testing.T, act, path string) { + var ran bool + datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string { + if d.Cmd != "echo" { + return "only 'echo' is supported" + } + ran = true + return act + }) + if !ran { + // Guard against a possible error in which the file is created, then datadriven + // is invoked with -rewrite to seed it (which it does not do, since there is + // no directive in the file), and then also the tests pass despite not checking + // anything. + t.Errorf("no tests run for %s, is the file empty?", path) + } +} diff --git a/pkg/testutils/keysutils/BUILD.bazel b/pkg/testutils/keysutils/BUILD.bazel index ff589da7533e..089994c3c5b3 100644 --- a/pkg/testutils/keysutils/BUILD.bazel +++ b/pkg/testutils/keysutils/BUILD.bazel @@ -8,7 +8,7 @@ go_library( deps = [ "//pkg/keys", "//pkg/roachpb:with-mocks", - "//pkg/sql/rowenc", + "//pkg/sql/rowenc/keyside", "//pkg/sql/sem/tree", "//pkg/util/encoding", "//pkg/util/keysutil", diff --git a/pkg/testutils/keysutils/pretty_scanner.go b/pkg/testutils/keysutils/pretty_scanner.go index 32b2811351aa..d220bae59966 100644 --- a/pkg/testutils/keysutils/pretty_scanner.go +++ b/pkg/testutils/keysutils/pretty_scanner.go @@ -16,7 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/sql/rowenc" + "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/keysutil" @@ -161,7 +161,7 @@ func parseAscendingIntIndexKey(input string) (string, roachpb.Key) { return origInput, nil } remainder := input[slashPos:] // `/something/else` -> `/else` - key, err := rowenc.EncodeTableKey(nil, datum, encoding.Ascending) + key, err := keyside.Encode(nil, datum, encoding.Ascending) if err != nil { panic(err) } diff --git a/pkg/testutils/lint/lint_test.go b/pkg/testutils/lint/lint_test.go index dbf2fbab3fca..4ec69e9cced0 100644 --- a/pkg/testutils/lint/lint_test.go +++ b/pkg/testutils/lint/lint_test.go @@ -1049,7 +1049,7 @@ func TestLint(t *testing.T) { ":!rpc/codec_test.go", ":!settings/settings_test.go", ":!sql/types/types_jsonpb.go", - ":!sql/schemachanger/scgraphviz/graphviz.go", + ":!sql/schemachanger/scplan/internal/scgraphviz/graphviz.go", ) if err != nil { t.Fatal(err) @@ -1167,6 +1167,7 @@ func TestLint(t *testing.T) { ":!*.pb.go", ":!*.pb.gw.go", ":!kv/kvclient/kvcoord/lock_spans_over_budget_error.go", + ":!roachpb/replica_unavailable_error.go", ":!sql/pgwire/pgerror/constraint_name.go", ":!sql/pgwire/pgerror/severity.go", ":!sql/pgwire/pgerror/with_candidate_code.go", @@ -1509,6 +1510,7 @@ func TestLint(t *testing.T) { case strings.HasSuffix(s, "protoutil"): case strings.HasSuffix(s, "testutils"): case strings.HasSuffix(s, "syncutil"): + case strings.HasSuffix(s, "buildutil"): case strings.HasSuffix(s, settingsPkgPrefix): default: t.Errorf("%s <- please don't add CRDB dependencies to settings pkg", s) diff --git a/pkg/testutils/skip/BUILD.bazel b/pkg/testutils/skip/BUILD.bazel index fe70040bdd17..98be6766db40 100644 --- a/pkg/testutils/skip/BUILD.bazel +++ b/pkg/testutils/skip/BUILD.bazel @@ -11,6 +11,7 @@ go_library( deps = [ "//pkg/build/bazel", "//pkg/util", + "//pkg/util/buildutil", "//pkg/util/envutil", "//pkg/util/syncutil", ], diff --git a/pkg/testutils/skip/skip.go b/pkg/testutils/skip/skip.go index 34fe926c8976..6be83ff2bb5f 100644 --- a/pkg/testutils/skip/skip.go +++ b/pkg/testutils/skip/skip.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/build/bazel" "github.com/cockroachdb/cockroach/pkg/util" + "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/cockroach/pkg/util/syncutil" ) @@ -136,6 +137,14 @@ func UnderMetamorphic(t SkippableTest, args ...interface{}) { } } +// UnderNonTestBuild skips this test if the build does not have the crdb_test +// tag. +func UnderNonTestBuild(t SkippableTest) { + if !buildutil.CrdbTestBuild { + t.Skip("crdb_test tag required for this test") + } +} + // UnderBench returns true iff a test is currently running under `go // test -bench`. When true, tests should avoid writing data on // stdout/stderr from goroutines that run asynchronously with the diff --git a/pkg/ui/workspaces/cluster-ui/src/sessions/sessionDetails.tsx b/pkg/ui/workspaces/cluster-ui/src/sessions/sessionDetails.tsx index 7c43ac9bc05f..363599cca7b5 100644 --- a/pkg/ui/workspaces/cluster-ui/src/sessions/sessionDetails.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/sessions/sessionDetails.tsx @@ -323,7 +323,7 @@ export class SessionDetails extends React.Component { /> ({ history, location: { - pathname: - "/statement/true/SELECT city%2C id FROM vehicles WHERE city %3D %241", + pathname: "/statement/true/4705782015019656142", search: "", hash: "", state: null, }, match: { - path: "/statement/:database/:implicitTxn/:statement", - url: - "/statement/defaultdb/true/SELECT city%2C id FROM vehicles WHERE city %3D %241", + path: "/statement/:implicitTxn/:statement", + url: "/statement/true/4705782015019656142", isExact: true, params: { implicitTxn: "true", - statement: "SELECT city%2C id FROM vehicles WHERE city %3D %241", - database: "defaultdb", + statement: "4705782015019656142", }, }, timeScale: { @@ -152,13 +150,23 @@ export const getStatementDetailsPropsFixture = (): StatementDetailsProps => ({ key: "Custom", }, statement: { - statement: "SELECT city, id FROM vehicles WHERE city = $1", + statement: + "CREATE TABLE IF NOT EXISTS promo_codes (\n" + + " code VARCHAR NOT NULL,\n" + + " description VARCHAR NULL,\n" + + " creation_time TIMESTAMP NULL,\n" + + " expiration_time TIMESTAMP NULL,\n" + + " rules JSONB NULL,\n" + + " PRIMARY KEY (code ASC)\n" + + ")", stats: statementStats, database: "defaultdb", byNode: [ { + aggregatedFingerprintID: fingerprintID, label: "4", - summary: "SELECT city, id FROM vehicles", + summary: + "CREATE TABLE IF NOT EXISTS promo_codes (code VARCHAR NOT NULL, description VARCHAR NULL, creation_time TIMESTAMP NULL, expiration_time TIMESTAMP NULL, rules JSONB NULL, PRIMARY KEY (code ASC))", aggregatedTs, aggregationInterval, implicitTxn: true, @@ -167,8 +175,10 @@ export const getStatementDetailsPropsFixture = (): StatementDetailsProps => ({ stats: statementStats, }, { + aggregatedFingerprintID: fingerprintID, label: "3", - summary: "SELECT city, id FROM vehicles", + summary: + "CREATE TABLE IF NOT EXISTS promo_codes (code VARCHAR NOT NULL, description VARCHAR NULL, creation_time TIMESTAMP NULL, expiration_time TIMESTAMP NULL, rules JSONB NULL, PRIMARY KEY (code ASC))", aggregatedTs, aggregationInterval, implicitTxn: true, @@ -177,8 +187,10 @@ export const getStatementDetailsPropsFixture = (): StatementDetailsProps => ({ stats: statementStats, }, { + aggregatedFingerprintID: fingerprintID, label: "2", - summary: "SELECT city, id FROM vehicles", + summary: + "CREATE TABLE IF NOT EXISTS promo_codes (code VARCHAR NOT NULL, description VARCHAR NULL, creation_time TIMESTAMP NULL, expiration_time TIMESTAMP NULL, rules JSONB NULL, PRIMARY KEY (code ASC))", aggregatedTs, aggregationInterval, implicitTxn: true, @@ -187,8 +199,10 @@ export const getStatementDetailsPropsFixture = (): StatementDetailsProps => ({ stats: statementStats, }, { + aggregatedFingerprintID: fingerprintID, label: "1", - summary: "SELECT city, id FROM vehicles", + summary: + "CREATE TABLE IF NOT EXISTS promo_codes (code VARCHAR NOT NULL, description VARCHAR NULL, creation_time TIMESTAMP NULL, expiration_time TIMESTAMP NULL, rules JSONB NULL, PRIMARY KEY (code ASC))", aggregatedTs, aggregationInterval, implicitTxn: true, diff --git a/pkg/ui/workspaces/cluster-ui/src/statementDetails/statementDetails.selectors.ts b/pkg/ui/workspaces/cluster-ui/src/statementDetails/statementDetails.selectors.ts index d42804062369..aa2757cc1080 100644 --- a/pkg/ui/workspaces/cluster-ui/src/statementDetails/statementDetails.selectors.ts +++ b/pkg/ui/workspaces/cluster-ui/src/statementDetails/statementDetails.selectors.ts @@ -9,7 +9,7 @@ // licenses/APL.txt. import { createSelector } from "@reduxjs/toolkit"; -import { RouteComponentProps, match as Match } from "react-router-dom"; +import { RouteComponentProps } from "react-router-dom"; import { Location } from "history"; import _ from "lodash"; import { AppState } from "../store"; @@ -20,19 +20,19 @@ import { FixLong, flattenStatementStats, getMatchParamByName, - implicitTxnAttr, statementAttr, databaseAttr, - StatementStatistics, - statementKey, aggregatedTsAttr, aggregationIntervalAttr, + StatementStatistics, + statementKey, queryByName, } from "../util"; import { AggregateStatistics } from "../statementsTable"; import { Fraction } from "./statementDetails"; interface StatementDetailsData { + statementFingerprintID: string; nodeId: number; summary: string; aggregatedTs: number; @@ -52,6 +52,7 @@ function coalesceNodeStats( const key = statementKey(stmt); if (!(key in statsKey)) { statsKey[key] = { + statementFingerprintID: stmt.statement_fingerprint_id?.toString(), nodeId: stmt.node_id, summary: stmt.statement_summary, aggregatedTs: stmt.aggregated_ts, @@ -68,6 +69,7 @@ function coalesceNodeStats( return Object.keys(statsKey).map(key => { const stmt = statsKey[key]; return { + aggregatedFingerprintID: stmt.statementFingerprintID, label: stmt.nodeId.toString(), summary: stmt.summary, aggregatedTs: stmt.aggregatedTs, @@ -98,31 +100,24 @@ function fractionMatching( return { numerator, denominator }; } -function filterByRouterParamsPredicate( - match: Match, +function filterByExecStatKey( location: Location, internalAppNamePrefix: string, + statementFingerprintID: string, ): (stat: ExecutionStatistics) => boolean { - const statement = getMatchParamByName(match, statementAttr); - const implicitTxn = getMatchParamByName(match, implicitTxnAttr) === "true"; - const database = - queryByName(location, databaseAttr) === "(unset)" - ? "" - : queryByName(location, databaseAttr); const apps = queryByName(location, appAttr) ? queryByName(location, appAttr).split(",") : null; + // If the aggregatedTs is unset, we will aggregate across the current date range. const aggregatedTs = queryByName(location, aggregatedTsAttr); const aggInterval = queryByName(location, aggregationIntervalAttr); const filterByKeys = (stmt: ExecutionStatistics) => - stmt.statement === statement && + stmt.statement_fingerprint_id?.toString() === statementFingerprintID && (aggregatedTs == null || stmt.aggregated_ts.toString() === aggregatedTs) && (aggInterval == null || - stmt.aggregation_interval.toString() === aggInterval) && - stmt.implicit_txn === implicitTxn && - (stmt.database === database || database === null); + stmt.aggregation_interval.toString() === aggInterval); if (!apps) { return filterByKeys; @@ -152,15 +147,25 @@ export const selectStatement = createSelector( const internalAppNamePrefix = sqlStatsState.data?.internal_app_name_prefix; const flattened = flattenStatementStats(statements); - const results = _.filter( - flattened, - filterByRouterParamsPredicate( - props.match, + const statementFingerprintID = getMatchParamByName( + props.match, + statementAttr, + ); + const results = flattened.filter( + filterByExecStatKey( props.location, internalAppNamePrefix, + statementFingerprintID, ), ); - const statement = getMatchParamByName(props.match, statementAttr); + + // We expect a single result to be returned. The key used to retrieve results is specific per: + // - statement fingerprint id + // - aggregation timestamp + // - aggregation period + // see the `statementKey` function in appStats.ts for implementation. + const statement = results[0].statement; + return { statement, stats: combineStatementStats(results.map(s => s.stats)), diff --git a/pkg/ui/workspaces/cluster-ui/src/statementsPage/statementsPage.fixture.ts b/pkg/ui/workspaces/cluster-ui/src/statementsPage/statementsPage.fixture.ts index f0a7d22e06ef..9d87bd91dbcf 100644 --- a/pkg/ui/workspaces/cluster-ui/src/statementsPage/statementsPage.fixture.ts +++ b/pkg/ui/workspaces/cluster-ui/src/statementsPage/statementsPage.fixture.ts @@ -282,10 +282,17 @@ const statementsPagePropsFixture: StatementsPageProps = { regions: "", nodes: "", }, + // Aggregate key values in these statements will need to change if implementation + // of 'statementKey' in appStats.ts changes. statements: [ { + aggregatedFingerprintID: "1253500548539870016", label: - "SELECT IFNULL(a, b) FROM (SELECT (SELECT code FROM promo_codes WHERE code > $1 ORDER BY code LIMIT _) AS a, (SELECT code FROM promo_codes ORDER BY code LIMIT _) AS b)", + "SELECT IFNULL(a, b)\n" + + " FROM (\n" + + " SELECT (SELECT code FROM promo_codes WHERE code > $1 ORDER BY code LIMIT _) AS a,\n" + + " (SELECT code FROM promo_codes ORDER BY code LIMIT _) AS b\n" + + " )", summary: "SELECT IFNULL(a, b) FROM (SELECT)", aggregatedTs, aggregationInterval, @@ -295,6 +302,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "1985666523427702831", label: "INSERT INTO vehicles VALUES ($1, $2, __more6__)", summary: "INSERT INTO vehicles", aggregatedTs, @@ -305,8 +313,13 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "13649565517143827225", label: - "SELECT IFNULL(a, b) FROM (SELECT (SELECT id FROM users WHERE (city = $1) AND (id > $2) ORDER BY id LIMIT _) AS a, (SELECT id FROM users WHERE city = $1 ORDER BY id LIMIT _) AS b)", + "SELECT IFNULL(a, b)\n" + + " FROM (\n" + + " SELECT (SELECT id FROM users WHERE city = $1 AND id > $2 ORDER BY id LIMIT _) AS a,\n" + + " (SELECT id FROM users WHERE city = $1 ORDER BY id LIMIT _) AS b\n" + + " )", summary: "SELECT IFNULL(a, b) FROM (SELECT)", aggregatedTs, aggregationInterval, @@ -316,6 +329,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "1533636712988872414", label: "UPSERT INTO vehicle_location_histories VALUES ($1, $2, now(), $3, $4)", summary: "UPSERT INTO vehicle_location_histories", @@ -327,6 +341,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "2461578209191418170", label: "INSERT INTO user_promo_codes VALUES ($1, $2, $3, now(), _)", summary: "INSERT INTO user_promo_codes", aggregatedTs, @@ -337,6 +352,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "4705782015019656142", label: "SELECT city, id FROM vehicles WHERE city = $1", summary: "SELECT city, id FROM vehicles", aggregatedTs, @@ -347,6 +363,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "2298970482983227199", label: "INSERT INTO rides VALUES ($1, $2, $2, $3, $4, $5, _, now(), _, $6)", summary: "INSERT INTO rides", @@ -358,6 +375,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "4716433305747424413", label: "SELECT IFNULL(a, b) FROM (SELECT AS a, AS b)", summary: "SELECT IFNULL(a, b) FROM (SELECT)", aggregatedTs, @@ -368,6 +386,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "367828504526856403", label: "UPDATE rides SET end_address = $3, end_time = now() WHERE (city = $1) AND (id = $2)", summary: "UPDATE rides SET end_address = $... WHERE (city = $1) AND...", @@ -379,6 +398,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "14972494059652918390", label: "INSERT INTO users VALUES ($1, $2, __more3__)", summary: "INSERT INTO users", aggregatedTs, @@ -389,6 +409,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "15897033026745880862", label: "SELECT count(*) FROM user_promo_codes WHERE ((city = $1) AND (user_id = $2)) AND (code = $3)", summary: "SELECT count(*) FROM user_promo_codes", @@ -400,6 +421,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '49958554803360403681', label: "INSERT INTO promo_codes VALUES ($1, $2, __more3__)", summary: "INSERT INTO promo_codes", aggregatedTs, @@ -410,6 +432,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '9233296116064220812', label: "ALTER TABLE users SCATTER FROM (_, _) TO (_, _)", summary: "ALTER TABLE users SCATTER FROM (_, _) TO (_, _)", aggregatedTs, @@ -420,6 +443,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '6117473345491440803', label: "ALTER TABLE rides ADD FOREIGN KEY (vehicle_city, vehicle_id) REFERENCES vehicles (city, id)", summary: @@ -432,6 +456,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '1301242584620444873', label: "SHOW database", summary: "SHOW database", aggregatedTs, @@ -443,8 +468,16 @@ const statementsPagePropsFixture: StatementsPageProps = { diagnosticsReports, }, { + aggregatedFingerprintID: '11195381626529102926', label: - "CREATE TABLE IF NOT EXISTS promo_codes (code VARCHAR NOT NULL, description VARCHAR NULL, creation_time TIMESTAMP NULL, expiration_time TIMESTAMP NULL, rules JSONB NULL, PRIMARY KEY (code ASC))", + "CREATE TABLE IF NOT EXISTS promo_codes (\n" + + " code VARCHAR NOT NULL,\n" + + " description VARCHAR NULL,\n" + + " creation_time TIMESTAMP NULL,\n" + + " expiration_time TIMESTAMP NULL,\n" + + " rules JSONB NULL,\n" + + " PRIMARY KEY (code ASC)\n" + + " )", summary: "CREATE TABLE IF NOT EXISTS promo_codes (code VARCHAR NOT NULL, description VARCHAR NULL, creation_time TIMESTAMP NULL, expiration_time TIMESTAMP NULL, rules JSONB NULL, PRIMARY KEY (code ASC))", aggregatedTs, @@ -455,6 +488,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '18127289707013477303', label: "ALTER TABLE users SPLIT AT VALUES (_, _)", summary: "ALTER TABLE users SPLIT AT VALUES (_, _)", aggregatedTs, @@ -465,6 +499,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '2499764450427976233', label: "ALTER TABLE vehicles SCATTER FROM (_, _) TO (_, _)", summary: "ALTER TABLE vehicles SCATTER FROM (_, _) TO (_, _)", aggregatedTs, @@ -475,6 +510,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '818321793552651414', label: "ALTER TABLE vehicle_location_histories ADD FOREIGN KEY (city, ride_id) REFERENCES rides (city, id)", summary: @@ -487,8 +523,16 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '13217779306501326587', label: - 'CREATE TABLE IF NOT EXISTS user_promo_codes (city VARCHAR NOT NULL, user_id UUID NOT NULL, code VARCHAR NOT NULL, "timestamp" TIMESTAMP NULL, usage_count INT8 NULL, PRIMARY KEY (city ASC, user_id ASC, code ASC))', + 'CREATE TABLE IF NOT EXISTS user_promo_codes (\n' + + ' city VARCHAR NOT NULL,\n' + + ' user_id UUID NOT NULL,\n' + + ' code VARCHAR NOT NULL,\n' + + ' "timestamp" TIMESTAMP NULL,\n' + + ' usage_count INT8 NULL,\n' + + ' PRIMARY KEY (city ASC, user_id ASC, code ASC)\n' + + ' )', summary: 'CREATE TABLE IF NOT EXISTS user_promo_codes (city VARCHAR NOT NULL, user_id UUID NOT NULL, code VARCHAR NOT NULL, "timestamp" TIMESTAMP NULL, usage_count INT8 NULL, PRIMARY KEY (city ASC, user_id ASC, code ASC))', aggregatedTs, @@ -499,6 +543,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '6325213731862855938', label: "INSERT INTO users VALUES ($1, $2, __more3__), (__more40__)", summary: "INSERT INTO users VALUES", aggregatedTs, @@ -509,6 +554,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '17372586739449521577', label: "ALTER TABLE rides SCATTER FROM (_, _) TO (_, _)", summary: "ALTER TABLE rides SCATTER FROM (_, _) TO (_, _)", aggregatedTs, @@ -519,6 +565,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '17098541896015126122', label: 'SET CLUSTER SETTING "cluster.organization" = $1', summary: 'SET CLUSTER SETTING "cluster.organization" = $1', aggregatedTs, @@ -529,6 +576,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '13350023170184726428', label: "ALTER TABLE vehicles ADD FOREIGN KEY (city, owner_id) REFERENCES users (city, id)", summary: @@ -541,8 +589,24 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '2695725667586429780', label: - "CREATE TABLE IF NOT EXISTS rides (id UUID NOT NULL, city VARCHAR NOT NULL, vehicle_city VARCHAR NULL, rider_id UUID NULL, vehicle_id UUID NULL, start_address VARCHAR NULL, end_address VARCHAR NULL, start_time TIMESTAMP NULL, end_time TIMESTAMP NULL, revenue DECIMAL(10,2) NULL, PRIMARY KEY (city ASC, id ASC), INDEX rides_auto_index_fk_city_ref_users (city ASC, rider_id ASC), INDEX rides_auto_index_fk_vehicle_city_ref_vehicles (vehicle_city ASC, vehicle_id ASC), CONSTRAINT check_vehicle_city_city CHECK (vehicle_city = city))", + "CREATE TABLE IF NOT EXISTS rides (\n" + + " id UUID NOT NULL,\n" + + " city VARCHAR NOT NULL,\n" + + " vehicle_city VARCHAR NULL,\n" + + " rider_id UUID NULL,\n" + + " vehicle_id UUID NULL,\n" + + " start_address VARCHAR NULL,\n" + + " end_address VARCHAR NULL,\n" + + " start_time TIMESTAMP NULL,\n" + + " end_time TIMESTAMP NULL,\n" + + " revenue DECIMAL(10,2) NULL,\n" + + " PRIMARY KEY (city ASC, id ASC),\n" + + " INDEX rides_auto_index_fk_city_ref_users (city ASC, rider_id ASC),\n" + + " INDEX rides_auto_index_fk_vehicle_city_ref_vehicles (vehicle_city ASC, vehicle_id ASC),\n" + + " CONSTRAINT check_vehicle_city_city CHECK (vehicle_city = city)\n" + + " )", summary: "CREATE TABLE IF NOT EXISTS rides (id UUID NOT NULL, city VARCHAR NOT NULL, vehicle_city VARCHAR NULL, rider_id UUID NULL, vehicle_id UUID NULL, start_address VARCHAR NULL, end_address VARCHAR NULL, start_time TIMESTAMP NULL, end_time TIMESTAMP NULL, revenue DECIMAL(10,2) NULL, PRIMARY KEY (city ASC, id ASC), INDEX rides_auto_index_fk_city_ref_users (city ASC, rider_id ASC), INDEX rides_auto_index_fk_vehicle_city_ref_vehicles (vehicle_city ASC, vehicle_id ASC), CONSTRAINT check_vehicle_city_city CHECK (vehicle_city = city))", aggregatedTs, @@ -553,8 +617,20 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '6754865160812330169', label: - "CREATE TABLE IF NOT EXISTS vehicles (id UUID NOT NULL, city VARCHAR NOT NULL, type VARCHAR NULL, owner_id UUID NULL, creation_time TIMESTAMP NULL, status VARCHAR NULL, current_location VARCHAR NULL, ext JSONB NULL, PRIMARY KEY (city ASC, id ASC), INDEX vehicles_auto_index_fk_city_ref_users (city ASC, owner_id ASC))", + "CREATE TABLE IF NOT EXISTS vehicles (\n" + + " id UUID NOT NULL,\n" + + " city VARCHAR NOT NULL,\n" + + " type VARCHAR NULL,\n" + + " owner_id UUID NULL,\n" + + " creation_time TIMESTAMP NULL,\n" + + " status VARCHAR NULL,\n" + + " current_location VARCHAR NULL,\n" + + " ext JSONB NULL,\n" + + " PRIMARY KEY (city ASC, id ASC),\n" + + " INDEX vehicles_auto_index_fk_city_ref_users (city ASC, owner_id ASC)\n" + + " )", summary: "CREATE TABLE IF NOT EXISTS vehicles (id UUID NOT NULL, city VARCHAR NOT NULL, type VARCHAR NULL, owner_id UUID NULL, creation_time TIMESTAMP NULL, status VARCHAR NULL, current_location VARCHAR NULL, ext JSONB NULL, PRIMARY KEY (city ASC, id ASC), INDEX vehicles_auto_index_fk_city_ref_users (city ASC, owner_id ASC))", aggregatedTs, @@ -565,6 +641,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '6810471486115018510', label: "INSERT INTO rides VALUES ($1, $2, __more8__), (__more400__)", summary: "INSERT INTO rides", aggregatedTs, @@ -575,6 +652,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '13265908854908549668', label: "ALTER TABLE vehicles SPLIT AT VALUES (_, _)", summary: "ALTER TABLE vehicles SPLIT AT VALUES (_, _)", aggregatedTs, @@ -585,6 +663,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '18377382163116490400', label: "SET sql_safe_updates = _", summary: "SET sql_safe_updates = _", aggregatedTs, @@ -595,8 +674,16 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '8695470234690735168', label: - "CREATE TABLE IF NOT EXISTS users (id UUID NOT NULL, city VARCHAR NOT NULL, name VARCHAR NULL, address VARCHAR NULL, credit_card VARCHAR NULL, PRIMARY KEY (city ASC, id ASC))", + "CREATE TABLE IF NOT EXISTS users (\n" + + " id UUID NOT NULL,\n" + + " city VARCHAR NOT NULL,\n" + + " name VARCHAR NULL,\n" + + " address VARCHAR NULL,\n" + + " credit_card VARCHAR NULL,\n" + + " PRIMARY KEY (city ASC, id ASC)\n" + + " )", summary: "CREATE TABLE IF NOT EXISTS users (id UUID NOT NULL, city VARCHAR NOT NULL, name VARCHAR NULL, address VARCHAR NULL, credit_card VARCHAR NULL, PRIMARY KEY (city ASC, id ASC))", aggregatedTs, @@ -607,8 +694,16 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '9261848985398568228', label: - 'CREATE TABLE IF NOT EXISTS vehicle_location_histories (city VARCHAR NOT NULL, ride_id UUID NOT NULL, "timestamp" TIMESTAMP NOT NULL, lat FLOAT8 NULL, long FLOAT8 NULL, PRIMARY KEY (city ASC, ride_id ASC, "timestamp" ASC))', + 'CREATE TABLE IF NOT EXISTS vehicle_location_histories (\n' + + ' city VARCHAR NOT NULL,\n' + + ' ride_id UUID NOT NULL,\n' + + ' "timestamp" TIMESTAMP NOT NULL,\n' + + ' lat FLOAT8 NULL,\n' + + ' long FLOAT8 NULL,\n' + + ' PRIMARY KEY (city ASC, ride_id ASC, "timestamp" ASC)\n' + + ' )', summary: 'CREATE TABLE IF NOT EXISTS vehicle_location_histories (city VARCHAR NOT NULL, ride_id UUID NOT NULL, "timestamp" TIMESTAMP NOT NULL, lat FLOAT8 NULL, long FLOAT8 NULL, PRIMARY KEY (city ASC, ride_id ASC, "timestamp" ASC))', aggregatedTs, @@ -619,6 +714,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: '4176684928840388768', label: "SELECT * FROM crdb_internal.node_build_info", summary: "SELECT * FROM crdb_internal.node_build_info", aggregatedTs, @@ -629,6 +725,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "15868120298061590648", label: "CREATE DATABASE movr", summary: "CREATE DATABASE movr", implicitTxn: true, @@ -640,6 +737,7 @@ const statementsPagePropsFixture: StatementsPageProps = { diagnosticsReports: diagnosticsReportsInProgress, }, { + aggregatedFingerprintID: "13070583869906258880", label: "SELECT count(*) > _ FROM [SHOW ALL CLUSTER SETTINGS] AS _ (v) WHERE v = _", summary: "SELECT count(*) > _ FROM [SHOW ALL CLUSTER SETTINGS] AS...", @@ -651,6 +749,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "641287435601027145", label: 'SET CLUSTER SETTING "enterprise.license" = $1', summary: 'SET CLUSTER SETTING "enterprise.license" = $1', aggregatedTs, @@ -661,6 +760,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "16743225271705059729", label: "ALTER TABLE rides ADD FOREIGN KEY (city, rider_id) REFERENCES users (city, id)", summary: @@ -673,6 +773,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "6075815909800602827", label: "ALTER TABLE user_promo_codes ADD FOREIGN KEY (city, user_id) REFERENCES users (city, id)", summary: @@ -685,6 +786,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "5158086166870396309", label: "INSERT INTO promo_codes VALUES ($1, $2, __more3__), (__more900__)", summary: "INSERT INTO promo_codes", @@ -696,6 +798,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "13494397675172244644", label: "ALTER TABLE rides SPLIT AT VALUES (_, _)", summary: "ALTER TABLE rides SPLIT AT VALUES (_, _)", aggregatedTs, @@ -706,6 +809,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "101921598584277094", label: "SELECT value FROM crdb_internal.node_build_info WHERE field = _", summary: "SELECT value FROM crdb_internal.node_build_info", aggregatedTs, @@ -716,6 +820,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "7880339715822034020", label: "INSERT INTO vehicle_location_histories VALUES ($1, $2, __more3__), (__more900__)", summary: "INSERT INTO vehicle_location_histories", @@ -727,6 +832,7 @@ const statementsPagePropsFixture: StatementsPageProps = { stats: statementStats, }, { + aggregatedFingerprintID: "16819876564846676829", label: "INSERT INTO vehicles VALUES ($1, $2, __more6__), (__more10__)", summary: "INSERT INTO vehicles", aggregatedTs, diff --git a/pkg/ui/workspaces/cluster-ui/src/statementsPage/statementsPage.selectors.ts b/pkg/ui/workspaces/cluster-ui/src/statementsPage/statementsPage.selectors.ts index 88bec0631f05..8a2cc5d2072c 100644 --- a/pkg/ui/workspaces/cluster-ui/src/statementsPage/statementsPage.selectors.ts +++ b/pkg/ui/workspaces/cluster-ui/src/statementsPage/statementsPage.selectors.ts @@ -32,6 +32,7 @@ import { SQLStatsState } from "../store/sqlStats"; type ICollectedStatementStatistics = cockroach.server.serverpb.StatementsResponse.ICollectedStatementStatistics; export interface StatementsSummaryData { + statementFingerprintID: string; statement: string; statementSummary: string; aggregatedTs: number; @@ -173,6 +174,7 @@ export const selectStatements = createSelector( const key = statementKey(stmt); if (!(key in statsByStatementKey)) { statsByStatementKey[key] = { + statementFingerprintID: stmt.statement_fingerprint_id?.toString(), statement: stmt.statement, statementSummary: stmt.statement_summary, aggregatedTs: stmt.aggregated_ts, @@ -189,6 +191,7 @@ export const selectStatements = createSelector( return Object.keys(statsByStatementKey).map(key => { const stmt = statsByStatementKey[key]; return { + aggregatedFingerprintID: stmt.statementFingerprintID, label: stmt.statement, summary: stmt.statementSummary, aggregatedTs: stmt.aggregatedTs, diff --git a/pkg/ui/workspaces/cluster-ui/src/statementsTable/statementsTable.tsx b/pkg/ui/workspaces/cluster-ui/src/statementsTable/statementsTable.tsx index 01e6a7d8c67b..e1747c258076 100644 --- a/pkg/ui/workspaces/cluster-ui/src/statementsTable/statementsTable.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/statementsTable/statementsTable.tsx @@ -10,7 +10,6 @@ import React from "react"; import classNames from "classnames/bind"; -import Long from "long"; import { FixLong, @@ -204,6 +203,7 @@ function makeCommonColumns( } export interface AggregateStatistics { + aggregatedFingerprintID: string; // label is either shortStatement (StatementsPage) or nodeId (StatementDetails). label: string; // summary exists only for SELECT/INSERT/UPSERT/UPDATE statements, and is diff --git a/pkg/ui/workspaces/cluster-ui/src/statementsTable/statementsTableContent.tsx b/pkg/ui/workspaces/cluster-ui/src/statementsTable/statementsTableContent.tsx index 46421df5bc3e..cb8906deda56 100644 --- a/pkg/ui/workspaces/cluster-ui/src/statementsTable/statementsTableContent.tsx +++ b/pkg/ui/workspaces/cluster-ui/src/statementsTable/statementsTableContent.tsx @@ -47,6 +47,7 @@ export const StatementTableCell = { onStatementClick?: (statement: string) => void, ) => (stmt: AggregateStatistics): React.ReactElement => ( { const base = `/statement/${props.implicitTxn}`; - const linkStatement = props.statementNoConstants || props.statement; + const statementFingerprintID = props.statementFingerprintID; const searchParams = propsToQueryString({ [databaseAttr]: props.database, @@ -158,10 +159,13 @@ export const StatementLinkTarget = ( [aggregationIntervalAttr]: props.aggregationInterval, }); - return `${base}/${encodeURIComponent(linkStatement)}?${searchParams}`; + return `${base}/${encodeURIComponent( + statementFingerprintID, + )}?${searchParams}`; }; interface StatementLinkProps { + statementFingerprintID: string; aggregatedTs?: number; aggregationInterval?: number; statement: string; @@ -175,6 +179,7 @@ interface StatementLinkProps { } export const StatementLink = ({ + statementFingerprintID, aggregatedTs, aggregationInterval, statement, @@ -193,6 +198,7 @@ export const StatementLink = ({ }, [onClick, statement]); const linkProps = { + statementFingerprintID, aggregatedTs, aggregationInterval, statement, diff --git a/pkg/ui/workspaces/cluster-ui/src/transactionsPage/utils.ts b/pkg/ui/workspaces/cluster-ui/src/transactionsPage/utils.ts index 9a116144c592..d4322707afdd 100644 --- a/pkg/ui/workspaces/cluster-ui/src/transactionsPage/utils.ts +++ b/pkg/ui/workspaces/cluster-ui/src/transactionsPage/utils.ts @@ -100,6 +100,7 @@ export const aggregateStatements = ( const key = transactionScopedStatementKey(s); if (!(key in statsKey)) { statsKey[key] = { + aggregatedFingerprintID: s.statement_fingerprint_id?.toString(), label: s.statement, summary: s.statement_summary, aggregatedTs: s.aggregated_ts, diff --git a/pkg/ui/workspaces/cluster-ui/src/util/appStats/appStats.ts b/pkg/ui/workspaces/cluster-ui/src/util/appStats/appStats.ts index d1940becd825..250c057f72c0 100644 --- a/pkg/ui/workspaces/cluster-ui/src/util/appStats/appStats.ts +++ b/pkg/ui/workspaces/cluster-ui/src/util/appStats/appStats.ts @@ -202,6 +202,7 @@ export function aggregateStatementStats( } export interface ExecutionStatistics { + statement_fingerprint_id: Long; statement: string; statement_summary: string; aggregated_ts: number; @@ -222,6 +223,7 @@ export function flattenStatementStats( statementStats: CollectedStatementStatistics[], ): ExecutionStatistics[] { return statementStats.map(stmt => ({ + statement_fingerprint_id: stmt.id, statement: stmt.key.key_data.query, statement_summary: stmt.key.key_data.query_summary, aggregated_ts: TimestampToNumber(stmt.key.aggregated_ts), @@ -257,9 +259,7 @@ export const getSearchParams = (searchParams: string) => { // aggregated_ts and aggregation_interval. export function statementKey(stmt: ExecutionStatistics): string { return ( - stmt.statement + - stmt.implicit_txn + - stmt.database + + stmt.statement_fingerprint_id?.toString() + stmt.aggregated_ts + stmt.aggregation_interval ); diff --git a/pkg/ui/workspaces/db-console/src/routes/RedirectToStatementDetails.tsx b/pkg/ui/workspaces/db-console/src/routes/RedirectToStatementDetails.tsx index a58057ce8e56..5e8e297bdf49 100644 --- a/pkg/ui/workspaces/db-console/src/routes/RedirectToStatementDetails.tsx +++ b/pkg/ui/workspaces/db-console/src/routes/RedirectToStatementDetails.tsx @@ -27,7 +27,7 @@ type Props = { // where app and database are route params, to the new StatementDetails route. export function RedirectToStatementDetails({ match }: Props) { const linkProps = { - statement: getMatchParamByName(match, statementAttr), + statementFingerprintID: getMatchParamByName(match, statementAttr), app: getMatchParamByName(match, appAttr), implicitTxn: getMatchParamByName(match, implicitTxnAttr) === "true", database: getMatchParamByName(match, databaseAttr), diff --git a/pkg/ui/workspaces/db-console/src/views/statements/statementDetails.tsx b/pkg/ui/workspaces/db-console/src/views/statements/statementDetails.tsx index e504c6598c37..9fa4b90becb5 100644 --- a/pkg/ui/workspaces/db-console/src/views/statements/statementDetails.tsx +++ b/pkg/ui/workspaces/db-console/src/views/statements/statementDetails.tsx @@ -8,11 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. import { connect } from "react-redux"; -import { - RouteComponentProps, - match as Match, - withRouter, -} from "react-router-dom"; +import { RouteComponentProps, withRouter } from "react-router-dom"; import { Location } from "history"; import { createSelector } from "reselect"; import _ from "lodash"; @@ -33,7 +29,6 @@ import { aggregationIntervalAttr, appAttr, databaseAttr, - implicitTxnAttr, statementAttr, } from "src/util/constants"; import { FixLong } from "src/util/fixLong"; @@ -65,6 +60,7 @@ interface Fraction { } interface StatementDetailsData { + statementFingerprintID: string; nodeId: number; summary: string; aggregatedTs: number; @@ -78,12 +74,13 @@ interface StatementDetailsData { function coalesceNodeStats( stats: ExecutionStatistics[], ): AggregateStatistics[] { - const statsKey: { [nodeId: string]: StatementDetailsData } = {}; + const statsKey: { [stmtKey: string]: StatementDetailsData } = {}; stats.forEach(stmt => { const key = statementKey(stmt); if (!(key in statsKey)) { statsKey[key] = { + statementFingerprintID: stmt.statement_fingerprint_id?.toString(), nodeId: stmt.node_id, summary: stmt.statement_summary, aggregatedTs: stmt.aggregated_ts, @@ -100,6 +97,7 @@ function coalesceNodeStats( return Object.keys(statsKey).map(key => { const stmt = statsKey[key]; return { + aggregatedFingerprintID: stmt.statementFingerprintID, label: stmt.nodeId.toString(), summary: stmt.summary, aggregatedTs: stmt.aggregatedTs, @@ -130,31 +128,24 @@ function fractionMatching( return { numerator, denominator }; } -function filterByRouterParamsPredicate( - match: Match, +function filterByExecStatKey( location: Location, internalAppNamePrefix: string, + statementFingerprintID: string, ): (stat: ExecutionStatistics) => boolean { - const statement = getMatchParamByName(match, statementAttr); - const implicitTxn = getMatchParamByName(match, implicitTxnAttr) === "true"; - const database = - queryByName(location, databaseAttr) === "(unset)" - ? "" - : queryByName(location, databaseAttr); const apps = queryByName(location, appAttr) ? queryByName(location, appAttr).split(",") : null; + // If the aggregatedTs is unset, we will aggregate across the current date range. const aggregatedTs = queryByName(location, aggregatedTsAttr); const aggInterval = queryByName(location, aggregationIntervalAttr); const filterByKeys = (stmt: ExecutionStatistics) => - stmt.statement === statement && + stmt.statement_fingerprint_id?.toString() === statementFingerprintID && (aggregatedTs == null || stmt.aggregated_ts.toString() === aggregatedTs) && (aggInterval == null || - stmt.aggregation_interval.toString() === aggInterval) && - stmt.implicit_txn === implicitTxn && - (stmt.database === database || database === null); + stmt.aggregation_interval.toString() === aggInterval); if (!apps) { return filterByKeys; @@ -185,14 +176,26 @@ export const selectStatement = createSelector( const internalAppNamePrefix = statementsState.data?.internal_app_name_prefix; const flattened = flattenStatementStats(statements); + + const statementFingerprintID = getMatchParamByName( + props.match, + statementAttr, + ); const results = flattened.filter( - filterByRouterParamsPredicate( - props.match, + filterByExecStatKey( props.location, internalAppNamePrefix, + statementFingerprintID, ), ); - const statement = getMatchParamByName(props.match, statementAttr); + + // We expect a single result to be returned. The key used to retrieve results is specific per: + // - statement fingerprint id + // - aggregation timestamp + // - aggregation period + // see the `statementKey` function in appStats.ts for implementation. + const statement = results[0].statement; + return { statement, stats: combineStatementStats(results.map(s => s.stats)), diff --git a/pkg/ui/workspaces/db-console/src/views/statements/statements.spec.tsx b/pkg/ui/workspaces/db-console/src/views/statements/statements.spec.tsx index 46844ebf657e..3ce9e1226020 100644 --- a/pkg/ui/workspaces/db-console/src/views/statements/statements.spec.tsx +++ b/pkg/ui/workspaces/db-console/src/views/statements/statements.spec.tsx @@ -17,7 +17,6 @@ import { merge } from "lodash"; import "src/protobufInit"; import * as protos from "src/js/protos"; -import { util } from "@cockroachlabs/cluster-ui"; import { appAttr, statementAttr } from "src/util/constants"; import { selectStatements, @@ -28,6 +27,7 @@ import { import { selectStatement } from "./statementDetails"; import ISensitiveInfo = protos.cockroach.sql.ISensitiveInfo; import { AdminUIState, createAdminUIStore } from "src/redux/state"; +import { util } from "@cockroachlabs/cluster-ui"; type CollectedStatementStatistics = util.CollectedStatementStatistics; type ExecStats = util.ExecStats; @@ -53,7 +53,6 @@ describe("selectStatements", () => { const props = makeEmptyRouteProps(); const result = selectStatements(state, props); - assert.equal(result.length, 3); const expectedFingerprints = [stmtA, stmtB, stmtC].map( @@ -276,8 +275,9 @@ describe("selectStatement", () => { const stmtB = makeFingerprint(2, "foobar"); const stmtC = makeFingerprint(3, "another"); const state = makeStateWithStatements([stmtA, stmtB, stmtC]); - const props = makeRoutePropsWithStatement(stmtA.key.key_data.query); + const stmtAFingerprintID = stmtA.id.toString(); + const props = makeRoutePropsWithStatement(stmtAFingerprintID); const result = selectStatement(state, props); assert.equal(result.statement, stmtA.key.key_data.query); @@ -297,8 +297,8 @@ describe("selectStatement", () => { .add(stmtB.stats.count.add(stmtC.stats.count)) .toNumber(); const state = makeStateWithStatements([stmtA, stmtB, stmtC]); - const props = makeRoutePropsWithStatement(stmtA.key.key_data.query); - + const stmtAFingerprintID = stmtA.id.toString(); + const props = makeRoutePropsWithStatement(stmtAFingerprintID); const result = selectStatement(state, props); assert.equal(result.statement, stmtA.key.key_data.query); @@ -323,8 +323,8 @@ describe("selectStatement", () => { .add(stmtC.stats.count) .toNumber(); const state = makeStateWithStatements([stmtA, stmtB, stmtC]); - const props = makeRoutePropsWithStatement(stmtA.key.key_data.query); - + const stmtAFingerprintID = stmtA.id.toString(); + const props = makeRoutePropsWithStatement(stmtAFingerprintID); const result = selectStatement(state, props); assert.equal(result.statement, stmtA.key.key_data.query); @@ -364,8 +364,8 @@ describe("selectStatement", () => { stmtG, stmtH, ]); - const props = makeRoutePropsWithStatement(stmtA.key.key_data.query); - + const stmtAFingerprintID = stmtA.id.toString(); + const props = makeRoutePropsWithStatement(stmtAFingerprintID); const result = selectStatement(state, props); assert.equal(result.statement, stmtA.key.key_data.query); @@ -384,10 +384,8 @@ describe("selectStatement", () => { makeFingerprint(2, "bar"), makeFingerprint(3, "baz"), ]); - const props = makeRoutePropsWithStatementAndApp( - stmtA.key.key_data.query, - "foo", - ); + const stmtAFingerprintID = stmtA.id.toString(); + const props = makeRoutePropsWithStatementAndApp(stmtAFingerprintID, "foo"); const result = selectStatement(state, props); @@ -407,8 +405,9 @@ describe("selectStatement", () => { makeFingerprint(2, "bar"), makeFingerprint(3, "baz"), ]); + const stmtAFingerprintID = stmtA.id.toString(); const props = makeRoutePropsWithStatementAndApp( - stmtA.key.key_data.query, + stmtAFingerprintID, "(unset)", ); @@ -430,8 +429,9 @@ describe("selectStatement", () => { makeFingerprint(2, "bar"), makeFingerprint(3, "baz"), ]); + const stmtAFingerprintID = stmtA.id.toString(); const props = makeRoutePropsWithStatementAndApp( - stmtA.key.key_data.query, + stmtAFingerprintID, "$ internal", ); @@ -467,6 +467,7 @@ function makeFingerprint( }, node_id: nodeId, }, + id: Long.fromNumber(id), stats: makeStats(), }; } diff --git a/pkg/ui/workspaces/db-console/src/views/statements/statementsPage.tsx b/pkg/ui/workspaces/db-console/src/views/statements/statementsPage.tsx index 2a3c5bee4fb4..ac5f618b9750 100644 --- a/pkg/ui/workspaces/db-console/src/views/statements/statementsPage.tsx +++ b/pkg/ui/workspaces/db-console/src/views/statements/statementsPage.tsx @@ -60,6 +60,7 @@ type ExecutionStatistics = util.ExecutionStatistics; type StatementStatistics = util.StatementStatistics; interface StatementsSummaryData { + statementFingerprintID: string; statement: string; statementSummary: string; aggregatedTs: number; @@ -118,6 +119,7 @@ export const selectStatements = createSelector( const key = statementKey(stmt); if (!(key in statsByStatementKey)) { statsByStatementKey[key] = { + statementFingerprintID: stmt.statement_fingerprint_id?.toString(), statement: stmt.statement, statementSummary: stmt.statement_summary, aggregatedTs: stmt.aggregated_ts, @@ -134,6 +136,7 @@ export const selectStatements = createSelector( return Object.keys(statsByStatementKey).map(key => { const stmt = statsByStatementKey[key]; return { + aggregatedFingerprintID: stmt.statementFingerprintID, label: stmt.statement, summary: stmt.statementSummary, aggregatedTs: stmt.aggregatedTs, diff --git a/pkg/util/duration/BUILD.bazel b/pkg/util/duration/BUILD.bazel index 4ece483c190b..d94df65fc164 100644 --- a/pkg/util/duration/BUILD.bazel +++ b/pkg/util/duration/BUILD.bazel @@ -12,6 +12,7 @@ go_library( "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/util/arith", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/util/duration/duration.go b/pkg/util/duration/duration.go index d59d263be2ed..ecdee8c3f585 100644 --- a/pkg/util/duration/duration.go +++ b/pkg/util/duration/duration.go @@ -18,6 +18,7 @@ import ( "strings" "time" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/util/arith" @@ -62,9 +63,9 @@ const ( ) var ( - bigDaysInMonth = big.NewInt(DaysPerMonth) - bigNanosInDay = big.NewInt(nanosInDay) - bigNanosInMonth = big.NewInt(nanosInMonth) + bigDaysInMonth = apd.NewBigInt(DaysPerMonth) + bigNanosInDay = apd.NewBigInt(nanosInDay) + bigNanosInMonth = apd.NewBigInt(nanosInMonth) ) // errEncodeOverflow is returned by Encode when the sortNanos returned would @@ -298,18 +299,18 @@ func FromFloat64(x float64) Duration { return d.normalize().round() } -// FromBigInt converts a big.Int number of nanoseconds to a duration. Inverse +// FromBigInt converts an apd.BigInt number of nanoseconds to a duration. Inverse // conversion of AsBigInt. Boolean false if the result overflows. -func FromBigInt(src *big.Int) (Duration, bool) { - var rem big.Int - var monthsDec big.Int +func FromBigInt(src *apd.BigInt) (Duration, bool) { + var rem apd.BigInt + var monthsDec apd.BigInt monthsDec.QuoRem(src, bigNanosInMonth, &rem) if !monthsDec.IsInt64() { return Duration{}, false } - var daysDec big.Int - var nanosRem big.Int + var daysDec apd.BigInt + var nanosRem apd.BigInt daysDec.QuoRem(&rem, bigNanosInDay, &nanosRem) // Note: we do not need to check for overflow of daysDec because any // excess bits were spilled into months above already. @@ -359,14 +360,14 @@ func (d Duration) AsFloat64() float64 { float64(numMonthsInYear*DaysPerMonth*SecsPerDay) } -// AsBigInt converts a duration to a big.Int with the number of nanoseconds. -func (d Duration) AsBigInt(dst *big.Int) { +// AsBigInt converts a duration to an apd.BigInt with the number of nanoseconds. +func (d Duration) AsBigInt(dst *apd.BigInt) { dst.SetInt64(d.Months) dst.Mul(dst, bigDaysInMonth) - dst.Add(dst, big.NewInt(d.Days)) + dst.Add(dst, apd.NewBigInt(d.Days)) dst.Mul(dst, bigNanosInDay) // Uses rounded instead of nanos here to remove any on-disk nanos. - dst.Add(dst, big.NewInt(d.rounded())) + dst.Add(dst, apd.NewBigInt(d.rounded())) } const ( diff --git a/pkg/util/encoding/BUILD.bazel b/pkg/util/encoding/BUILD.bazel index 9ce055044c94..cb215ecaf4c5 100644 --- a/pkg/util/encoding/BUILD.bazel +++ b/pkg/util/encoding/BUILD.bazel @@ -23,7 +23,7 @@ go_library( "//pkg/util/timetz", "//pkg/util/timeutil", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", ], ) @@ -53,7 +53,7 @@ go_test( "//pkg/util/timeutil", "//pkg/util/timeutil/pgdate", "//pkg/util/uuid", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", diff --git a/pkg/util/encoding/decimal.go b/pkg/util/encoding/decimal.go index 7ae74d8639bc..8d89acaa9401 100644 --- a/pkg/util/encoding/decimal.go +++ b/pkg/util/encoding/decimal.go @@ -21,7 +21,7 @@ import ( "math/big" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/errors" ) diff --git a/pkg/util/encoding/decimal_test.go b/pkg/util/encoding/decimal_test.go index dfcf0814204c..520c5460a13d 100644 --- a/pkg/util/encoding/decimal_test.go +++ b/pkg/util/encoding/decimal_test.go @@ -17,7 +17,7 @@ import ( "math/rand" "testing" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/util/randutil" ) diff --git a/pkg/util/encoding/encoding.go b/pkg/util/encoding/encoding.go index 1295ec2dd5db..634989f3478a 100644 --- a/pkg/util/encoding/encoding.go +++ b/pkg/util/encoding/encoding.go @@ -24,7 +24,7 @@ import ( "unicode/utf8" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/geo/geopb" "github.com/cockroachdb/cockroach/pkg/util/bitarray" "github.com/cockroachdb/cockroach/pkg/util/duration" @@ -1906,7 +1906,7 @@ func prettyPrintFirstValue(dir Direction, b []byte) ([]byte, string, error) { build.WriteString("ARRAY[") first := true // Use the array key decoding logic, but instead of calling out - // to DecodeTableKey, just make a recursive call. + // to keyside.Decode, just make a recursive call. for { if len(buf) == 0 { return nil, "", errors.AssertionFailedf("invalid array (unterminated)") diff --git a/pkg/util/encoding/encoding_test.go b/pkg/util/encoding/encoding_test.go index 57b5ef2893d6..926d4f2f3e93 100644 --- a/pkg/util/encoding/encoding_test.go +++ b/pkg/util/encoding/encoding_test.go @@ -20,7 +20,7 @@ import ( "testing" "time" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/geo" "github.com/cockroachdb/cockroach/pkg/geo/geopb" "github.com/cockroachdb/cockroach/pkg/util/bitarray" diff --git a/pkg/util/hlc/hlc.go b/pkg/util/hlc/hlc.go index 794ec7b753d6..075b461ecbcc 100644 --- a/pkg/util/hlc/hlc.go +++ b/pkg/util/hlc/hlc.go @@ -160,6 +160,16 @@ func (m *HybridManualClock) Increment(nanos int64) { m.mu.Unlock() } +// Forward sets the wall time to the supplied timestamp this moves the clock +// forward in time. +func (m *HybridManualClock) Forward(nanos int64) { + m.mu.Lock() + if nanos > m.mu.nanos { + m.mu.nanos = nanos + } + m.mu.Unlock() +} + // Pause pauses the hybrid manual clock; the passage of time no longer causes // the clock to tick. Increment can still be used, though. func (m *HybridManualClock) Pause() { diff --git a/pkg/util/json/BUILD.bazel b/pkg/util/json/BUILD.bazel index 75eee6a369dd..f46173d282b0 100644 --- a/pkg/util/json/BUILD.bazel +++ b/pkg/util/json/BUILD.bazel @@ -25,7 +25,7 @@ go_library( "//pkg/util/encoding", "//pkg/util/syncutil", "//pkg/util/unique", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_cockroachdb_errors//:errors", ], ) @@ -47,7 +47,7 @@ go_test( "//pkg/util/randutil", "//pkg/util/timeutil", "//pkg/util/unique", - "@com_github_cockroachdb_apd_v2//:apd", + "@com_github_cockroachdb_apd_v3//:apd", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/util/json/encode.go b/pkg/util/json/encode.go index 57a36662b2a2..164b354d83a0 100644 --- a/pkg/util/json/encode.go +++ b/pkg/util/json/encode.go @@ -11,7 +11,7 @@ package json import ( - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/errors" ) diff --git a/pkg/util/json/encoded.go b/pkg/util/json/encoded.go index e7c3ffcca41f..c4e04dd5156e 100644 --- a/pkg/util/json/encoded.go +++ b/pkg/util/json/encoded.go @@ -17,7 +17,7 @@ import ( "strconv" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/sql/inverted" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/errors" diff --git a/pkg/util/json/json.go b/pkg/util/json/json.go index 0be61bb64864..798568233f05 100644 --- a/pkg/util/json/json.go +++ b/pkg/util/json/json.go @@ -22,7 +22,7 @@ import ( "unicode/utf8" "unsafe" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/geo" "github.com/cockroachdb/cockroach/pkg/geo/geopb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -756,8 +756,7 @@ func (jsonFalse) Size() uintptr { return 0 } func (jsonTrue) Size() uintptr { return 0 } func (j jsonNumber) Size() uintptr { - intVal := j.Coeff - return decimalSize + uintptr(cap(intVal.Bits()))*wordSize + return j.Coeff.Size() } func (j jsonString) Size() uintptr { diff --git a/pkg/util/json/json_test.go b/pkg/util/json/json_test.go index 09a571db7f81..4068ca610ade 100644 --- a/pkg/util/json/json_test.go +++ b/pkg/util/json/json_test.go @@ -19,7 +19,7 @@ import ( "strings" "testing" - "github.com/cockroachdb/apd/v2" + "github.com/cockroachdb/apd/v3" "github.com/cockroachdb/cockroach/pkg/sql/inverted" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/util/encoding" diff --git a/pkg/util/mon/bytes_usage.go b/pkg/util/mon/bytes_usage.go index fee9470a8391..1bd7acfe6e20 100644 --- a/pkg/util/mon/bytes_usage.go +++ b/pkg/util/mon/bytes_usage.go @@ -13,6 +13,7 @@ package mon import ( "context" "fmt" + "io" "math" "math/bits" @@ -783,3 +784,57 @@ func (mm *BytesMonitor) adjustBudget(ctx context.Context) { mm.mu.curBudget.Shrink(ctx, mm.mu.curBudget.used-neededBytes) } } + +// ReadAll is like io.ReadAll except it additionally asks the BoundAccount acct +// permission, if it is non-nil, it grows its buffer while reading. When the +// caller releases the returned slice it shrink the bound account by its cap. +func ReadAll(ctx context.Context, r io.Reader, acct *BoundAccount) ([]byte, error) { + if acct == nil { + b, err := io.ReadAll(r) + return b, err + } + + const starting, maxIncrease = 1024, 8 << 20 + if err := acct.Grow(ctx, starting); err != nil { + return nil, err + } + + b := make([]byte, 0, starting) + + for { + // If we've filled our buffer, ask the monitor for more, up to its cap again + // or max, whichever is less (so we double until we hit 8mb then grow by 8mb + // each time thereafter), then alloc a new buffer that is that much bigger + // and copy the existing buffer over. + if len(b) == cap(b) { + grow := cap(b) + if grow > maxIncrease { + // If we're realloc'ing at the max size it's probably worth checking if + // we've been cancelled too. + if err := ctx.Err(); err != nil { + acct.Shrink(ctx, int64(cap(b))) + return nil, err + } + grow = maxIncrease + } + if err := acct.Grow(ctx, int64(grow)); err != nil { + // We were denied so release whatever we had before returning the error. + acct.Shrink(ctx, int64(cap(b))) + return nil, err + } + realloc := make([]byte, len(b), cap(b)+grow) + copy(realloc, b) + b = realloc + } + + // Read into our buffer until we get an error. + n, err := r.Read(b[len(b):cap(b)]) + b = b[:len(b)+n] + if err != nil { + if err == io.EOF { + err = nil + } + return b, err + } + } +} diff --git a/pkg/workload/schemachange/BUILD.bazel b/pkg/workload/schemachange/BUILD.bazel index c2a882a294be..c3aa6eeaea10 100644 --- a/pkg/workload/schemachange/BUILD.bazel +++ b/pkg/workload/schemachange/BUILD.bazel @@ -16,8 +16,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/security", + "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/colinfo", - "//pkg/sql/catalog/descpb", "//pkg/sql/parser", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", diff --git a/pkg/workload/schemachange/operation_generator.go b/pkg/workload/schemachange/operation_generator.go index 09d0d473ef59..4941d2fb1b86 100644 --- a/pkg/workload/schemachange/operation_generator.go +++ b/pkg/workload/schemachange/operation_generator.go @@ -19,8 +19,8 @@ import ( "sync/atomic" "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/randgen" @@ -513,11 +513,11 @@ func (og *operationGenerator) alterTableLocality(ctx context.Context, tx pgx.Tx) return fmt.Sprintf(`ALTER TABLE %s SET LOCALITY %s`, tableName, toLocality), nil } -func getClusterRegionNames(ctx context.Context, tx pgx.Tx) (descpb.RegionNames, error) { +func getClusterRegionNames(ctx context.Context, tx pgx.Tx) (catpb.RegionNames, error) { return scanRegionNames(ctx, tx, "SELECT region FROM [SHOW REGIONS FROM CLUSTER]") } -func getDatabaseRegionNames(ctx context.Context, tx pgx.Tx) (descpb.RegionNames, error) { +func getDatabaseRegionNames(ctx context.Context, tx pgx.Tx) (catpb.RegionNames, error) { return scanRegionNames(ctx, tx, "SELECT region FROM [SHOW REGIONS FROM DATABASE]") } @@ -528,10 +528,10 @@ func getDatabase(ctx context.Context, tx pgx.Tx) (string, error) { } type getRegionsResult struct { - regionNamesInDatabase descpb.RegionNames - regionNamesInCluster descpb.RegionNames + regionNamesInDatabase catpb.RegionNames + regionNamesInCluster catpb.RegionNames - regionNamesNotInDatabase descpb.RegionNames + regionNamesNotInDatabase catpb.RegionNames } func getRegions(ctx context.Context, tx pgx.Tx) (getRegionsResult, error) { @@ -539,7 +539,7 @@ func getRegions(ctx context.Context, tx pgx.Tx) (getRegionsResult, error) { if err != nil { return getRegionsResult{}, err } - regionNamesNotInDatabaseSet := make(map[descpb.RegionName]struct{}, len(regionNamesInCluster)) + regionNamesNotInDatabaseSet := make(map[catpb.RegionName]struct{}, len(regionNamesInCluster)) for _, clusterRegionName := range regionNamesInCluster { regionNamesNotInDatabaseSet[clusterRegionName] = struct{}{} } @@ -551,7 +551,7 @@ func getRegions(ctx context.Context, tx pgx.Tx) (getRegionsResult, error) { delete(regionNamesNotInDatabaseSet, databaseRegionName) } - regionNamesNotInDatabase := make(descpb.RegionNames, 0, len(regionNamesNotInDatabaseSet)) + regionNamesNotInDatabase := make(catpb.RegionNames, 0, len(regionNamesNotInDatabaseSet)) for regionName := range regionNamesNotInDatabaseSet { regionNamesNotInDatabase = append(regionNamesNotInDatabase, regionName) } @@ -562,8 +562,8 @@ func getRegions(ctx context.Context, tx pgx.Tx) (getRegionsResult, error) { }, nil } -func scanRegionNames(ctx context.Context, tx pgx.Tx, query string) (descpb.RegionNames, error) { - var regionNames descpb.RegionNames +func scanRegionNames(ctx context.Context, tx pgx.Tx, query string) (catpb.RegionNames, error) { + var regionNames catpb.RegionNames rows, err := tx.Query(ctx, query) if err != nil { return nil, err @@ -571,7 +571,7 @@ func scanRegionNames(ctx context.Context, tx pgx.Tx, query string) (descpb.Regio defer rows.Close() for rows.Next() { - var regionName descpb.RegionName + var regionName catpb.RegionName if err := rows.Scan(®ionName); err != nil { return nil, err } diff --git a/scripts/sgrep b/scripts/sgrep new file mode 100755 index 000000000000..c5eabba3a808 --- /dev/null +++ b/scripts/sgrep @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +if [ "$#" -eq 0 ]; then + cat <