Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
43023: sql: acceptance test and test cleanup for TimeTZ r=otan a=otan

Resolves #26097.

This PR completes the TimeTZ saga!

* Added Java unit tests
* Removed some tests from the test whitelist
* Added postgres regress suite.

Fix parse error to use New instead of Wrap, as the Wrap actually
confuses the error message more.

Release note (sql change): This PR (along with a string of past PRs)
allows the usage of TimeTZ throughout cockroach.

43208: blobs: Reduce file size of benchmark tests r=g3orgia a=g3orgia

As titled.

Release note: None

43221: backupccl: change the error code for "file already exists" errors r=andreimatei a=andreimatei

The backup code used to use a class 58 error code ("system error") for
situations where a backup target already exists - DuplicateFile. Class
58 is the wrong one, particularly since we've started using 58 errors to
represent errors about the state of the cluster (range unavailable,
dropped connections). So clients should treat 58 errors as retriable
(and for example the scaledata tests do).  This patch switches to a new
code in "Class 42 - Syntax or Access Rule Violation".

It's hard to imagine that Postgres returns the 58 code for anything
related to user input.

Release note (sql change): The error code for backups which would
overwrite files changed from class 58 ("system") to class 42 ("Syntax or
Access Rule Violation").

43240: storage/engine: small logging fixes r=petermattis a=petermattis

Change `Pebble.GetCompactionStats` to be prefixed with a newline to
match the formatting of RocksDB. This ensures that the compaction stats
display will not contain the log prefix which was misaligning the table
header.

Adding a missing sort to `Pebble.GetSSTables`. This was causing the
sstable summary log message to be much busier than for RocksDB because
`SSTableInfos.String` expects the infos to be sorted.

Move the formatting of `estimated_pending_compaction_bytes: x` into
`RocksDB.GetCompactionStats`. The Pebble compaction stats already
included this and it is useful to see the estimated pending compaction
bytes whenever the compaction stats are output.

Release note: None

Co-authored-by: Oliver Tan <otan@cockroachlabs.com>
Co-authored-by: Georgia Hong <georgiah@cockroachlabs.com>
Co-authored-by: Andrei Matei <andrei@cockroachlabs.com>
Co-authored-by: Peter Mattis <petermattis@gmail.com>
  • Loading branch information
5 people committed Dec 17, 2019
5 parents 20e2f03 + ebd4b84 + 710fa23 + 431d18d + e5ed1be commit 00a58ee
Show file tree
Hide file tree
Showing 9 changed files with 149 additions and 12 deletions.
9 changes: 9 additions & 0 deletions pkg/acceptance/testdata/java/src/main/java/MainTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,15 @@ public void testTime() throws Exception {
Assert.assertEquals("01:02:03.456", actual);
}

@Test
public void testTimeTZ() throws Exception {
PreparedStatement stmt = conn.prepareStatement("SELECT '01:02:03.456-07:00'::TIMETZ");
ResultSet rs = stmt.executeQuery();
rs.next();
String actual = new SimpleDateFormat("HH:mm:ss.SSSZ").format(rs.getTime(1));
Assert.assertEquals("08:02:03.456+0000", actual);
}

@Test
public void testUUID() throws Exception {
UUID uuid = UUID.randomUUID();
Expand Down
8 changes: 6 additions & 2 deletions pkg/blobs/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/hlc"
)

// filesize should be at least 1 GB when running these benchmarks.
// Reduced to 129 K for CI.
const filesize = 129 * 1 << 10

type benchmarkTestCase struct {
localNodeID roachpb.NodeID
remoteNodeID roachpb.NodeID
Expand Down Expand Up @@ -68,7 +72,7 @@ func BenchmarkStreamingReadFile(b *testing.B) {
localExternalDir: localExternalDir,
remoteExternalDir: remoteExternalDir,
blobClient: blobClient,
fileSize: 1 << 30, // 1 GB
fileSize: filesize,
fileName: "test/largefile.csv",
}
benchmarkStreamingReadFile(b, params)
Expand Down Expand Up @@ -119,7 +123,7 @@ func BenchmarkStreamingWriteFile(b *testing.B) {
localExternalDir: localExternalDir,
remoteExternalDir: remoteExternalDir,
blobClient: blobClient,
fileSize: 1 << 30, // 1 GB
fileSize: filesize,
fileName: "test/largefile.csv",
}
benchmarkStreamingWriteFile(b, params)
Expand Down
6 changes: 3 additions & 3 deletions pkg/ccl/backupccl/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -938,21 +938,21 @@ func VerifyUsableExportTarget(
// TODO(dt): If we audit exactly what not-exists error each ExternalStorage
// returns (and then wrap/tag them), we could narrow this check.
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
return pgerror.Newf(pgcode.FileAlreadyExists,
"%s already contains a %s file",
readable, BackupDescriptorName)
}
if r, err := exportStore.ReadFile(ctx, BackupManifestName); err == nil {
// TODO(dt): If we audit exactly what not-exists error each ExternalStorage
// returns (and then wrap/tag them), we could narrow this check.
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
return pgerror.Newf(pgcode.FileAlreadyExists,
"%s already contains a %s file",
readable, BackupManifestName)
}
if r, err := exportStore.ReadFile(ctx, BackupDescriptorCheckpointName); err == nil {
r.Close()
return pgerror.Newf(pgcode.DuplicateFile,
return pgerror.Newf(pgcode.FileAlreadyExists,
"%s already contains a %s file (is another operation already in progress?)",
readable, BackupDescriptorCheckpointName)
}
Expand Down
113 changes: 113 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/timetz
Original file line number Diff line number Diff line change
Expand Up @@ -526,3 +526,116 @@ query R
SELECT extract(epoch from timetz '12:00:00+04')
----
28800

# Adapted from `src/test/regress/expected/timetz.out` in postgres
subtest regress_postgres

statement ok
CREATE TABLE TIMETZ_TBL (id serial primary key, f1 time(2) with time zone)

# Changed PDT/PST/EDT -> zone offsets, as pgdate does not support abbreviations.
statement ok
INSERT INTO TIMETZ_TBL (f1) VALUES ('00:01-07')

statement ok
INSERT INTO TIMETZ_TBL (f1) VALUES ('01:00-07')

statement ok
INSERT INTO TIMETZ_TBL (f1) VALUES ('02:03-07')

statement ok
INSERT INTO TIMETZ_TBL (f1) VALUES ('07:07-05')

statement ok
INSERT INTO TIMETZ_TBL (f1) VALUES ('08:08-04')

statement ok
INSERT INTO TIMETZ_TBL (f1) VALUES ('11:59-07')

statement ok
INSERT INTO TIMETZ_TBL (f1) VALUES ('12:00-07')

statement ok
INSERT INTO TIMETZ_TBL (f1) VALUES ('12:01-07')

statement ok
INSERT INTO TIMETZ_TBL (f1) VALUES ('23:59-07')

statement ok
INSERT INTO TIMETZ_TBL (f1) VALUES ('11:59:59.99 PM-07')

statement ok
INSERT INTO TIMETZ_TBL (f1) VALUES ('2003-03-07 15:36:39 America/New_York')

statement ok
INSERT INTO TIMETZ_TBL (f1) VALUES ('2003-07-07 15:36:39 America/New_York')

# pgdate supports this, but postgres does not.
# INSERT INTO TIMETZ_TBL (f1) VALUES ('15:36:39 America/New_York')

# this should fail (timezone not specified without a date)
query error could not parse "1970-01-01 15:36:39 m2" as TimeTZ
INSERT INTO TIMETZ_TBL (f1) VALUES ('15:36:39 m2')

# this should fail (dynamic timezone abbreviation without a date)
query error could not parse "1970-01-01 15:36:39 MSK m2" as TimeTZ
INSERT INTO TIMETZ_TBL (f1) VALUES ('15:36:39 MSK m2')

query T
SELECT f1::string AS "Time TZ" FROM TIMETZ_TBL ORDER BY id
----
00:01:00-07:00:00
01:00:00-07:00:00
02:03:00-07:00:00
07:07:00-05:00:00
08:08:00-04:00:00
11:59:00-07:00:00
12:00:00-07:00:00
12:01:00-07:00:00
23:59:00-07:00:00
23:59:59.99-07:00:00
15:36:39-05:00:00
15:36:39-04:00:00

query T
SELECT f1::string AS "Three" FROM TIMETZ_TBL WHERE f1 < '05:06:07-07' ORDER BY id
----
00:01:00-07:00:00
01:00:00-07:00:00
02:03:00-07:00:00

query T
SELECT f1::string AS "Seven" FROM TIMETZ_TBL WHERE f1 > '05:06:07-07' ORDER BY id
----
07:07:00-05:00:00
08:08:00-04:00:00
11:59:00-07:00:00
12:00:00-07:00:00
12:01:00-07:00:00
23:59:00-07:00:00
23:59:59.99-07:00:00
15:36:39-05:00:00
15:36:39-04:00:00

query T
SELECT f1::string AS "None" FROM TIMETZ_TBL WHERE f1 < '00:00-07' ORDER BY id
----

query T
SELECT f1::string AS "Ten" FROM TIMETZ_TBL WHERE f1 >= '00:00-07' ORDER BY id
----
00:01:00-07:00:00
01:00:00-07:00:00
02:03:00-07:00:00
07:07:00-05:00:00
08:08:00-04:00:00
11:59:00-07:00:00
12:00:00-07:00:00
12:01:00-07:00:00
23:59:00-07:00:00
23:59:59.99-07:00:00
15:36:39-05:00:00
15:36:39-04:00:00

query error pq: unsupported binary operator: <timetz\(2\)> \+ <timetz>
SELECT f1 + time with time zone '00:01' AS "Illegal" FROM TIMETZ_TBL ORDER BY id
1 change: 1 addition & 0 deletions pkg/sql/pgwire/pgcode/codes.go
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,7 @@ const (
InvalidSchemaDefinition = "42P15"
InvalidTableDefinition = "42P16"
InvalidObjectDefinition = "42P17"
FileAlreadyExists = "42C01"
// Class 44 - WITH CHECK OPTION Violation
WithCheckOptionViolation = "44000"
// Class 53 - Insufficient Resources
Expand Down
8 changes: 7 additions & 1 deletion pkg/storage/engine/pebble.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
"io"
"io/ioutil"
"os"
"sort"

"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/roachpb"
Expand Down Expand Up @@ -487,7 +488,10 @@ func (p *Pebble) Get(key MVCCKey) ([]byte, error) {

// GetCompactionStats implements the Engine interface.
func (p *Pebble) GetCompactionStats() string {
return p.db.Metrics().String()
// NB: The initial blank line matches the formatting used by RocksDB and
// ensures that compaction stats display will not contain the log prefix
// (this method is only used for logging purposes).
return "\n" + p.db.Metrics().String()
}

// GetTickersAndHistograms implements the Engine interface.
Expand Down Expand Up @@ -879,6 +883,8 @@ func (p *Pebble) GetSSTables() (sstables SSTableInfos) {
sstables = append(sstables, info)
}
}

sort.Sort(sstables)
return sstables
}

Expand Down
9 changes: 8 additions & 1 deletion pkg/storage/engine/rocksdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
Expand Down Expand Up @@ -1261,7 +1262,13 @@ func (r *RocksDB) GetTickersAndHistograms() (*enginepb.TickersAndHistograms, err
// GetCompactionStats returns the internal RocksDB compaction stats. See
// https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide#rocksdb-statistics.
func (r *RocksDB) GetCompactionStats() string {
return cStringToGoString(C.DBGetCompactionStats(r.rdb))
s := cStringToGoString(C.DBGetCompactionStats(r.rdb)) +
"estimated_pending_compaction_bytes: "
stats, err := r.GetStats()
if err != nil {
return s + err.Error()
}
return s + humanizeutil.IBytes(stats.PendingCompactionBytesEstimate)
}

// GetEnvStats returns stats for the RocksDB env. This may include encryption stats.
Expand Down
4 changes: 1 addition & 3 deletions pkg/storage/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/contextutil"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/cockroach/pkg/util/limit"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/metric"
Expand Down Expand Up @@ -2354,8 +2353,7 @@ func (s *Store) ComputeMetrics(ctx context.Context, tick int) error {
// stats.
if tick%logSSTInfoTicks == 1 /* every 10m */ {
log.Infof(ctx, "sstables (read amplification = %d):\n%s", readAmp, sstables)
log.Infof(ctx, "%sestimated_pending_compaction_bytes: %s",
s.engine.GetCompactionStats(), humanizeutil.IBytes(stats.PendingCompactionBytesEstimate))
log.Infof(ctx, "%s", s.engine.GetCompactionStats())
}
return nil
}
Expand Down
3 changes: 1 addition & 2 deletions pkg/util/timetz/timetz.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,7 @@ func ParseTimeTZ(now time.Time, s string, precision time.Duration) (TimeTZ, erro
t, err := pgdate.ParseTimestamp(now, pgdate.ParseModeYMD, s)
if err != nil {
// Build our own error message to avoid exposing the dummy date.
return TimeTZ{}, pgerror.Wrapf(
err,
return TimeTZ{}, pgerror.Newf(
pgcode.InvalidTextRepresentation,
"could not parse %q as TimeTZ",
s,
Expand Down

0 comments on commit 00a58ee

Please sign in to comment.