Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
42952: pgdate: fix parsing of dates before unix epoch r=otan a=otan

Resolves #42937.

Previously, pgdate handling dates before the unix epoch with time
attached to it rounded up instead of rounding down, meaning times such
as '1969-12-30 01:00:00' rounded up to days = `-1`, when it should be
days = `-2`. This PR addresses that change.

I will probably look at backporting this.

Release note (bug fix): We did not previously handled date casts from
timestamp/timestamptz with time attached to it for times before the unix
epoch correctly. For example, '1969-12-30 01:00:00'::timestamp would
round to '1969-12-31' instead of '1969-12-30'. This PR addresses that
change.

43006: ccl/changefeedccl: respect filesystem walk errors in cloudFeed r=nvanbenschoten a=nvanbenschoten

Closes #42979.

This commit properly handles errors in `cloudFeed`'s `filepath.WalkFunc`
implementation. The documentation says:
> If there was a problem walking to the file or directory named by
> path, the incoming error will describe the problem and the function
> can decide how to handle that error (and Walk will not descend into
> that directory). If an error is returned, processing stops.

I stressed the test for 10,000 iterations and never saw anything, so it's
possible that this was a fluke. It's not clear what filesystem error this was
throwing so we might still see this pop up again, but at least we'll now
correctly propagate the error and surface it instead of hitting an NPE.
Because of that, I'm closing the issue for now.

Release note: None

43009: row: move cfetcher initialization validation steps outside of loop r=yuzefovich a=rohany

Some initialization checks that the cfetcher performed were occuring
in a loop for no reason. This PR moves them out of the loop.

Release note: None

43010: cmd/roachtest: only log sqlsmith on non-errors r=mjibson a=mjibson

Release note: None

Co-authored-by: Oliver Tan <otan@cockroachlabs.com>
Co-authored-by: Nathan VanBenschoten <nvanbenschoten@gmail.com>
Co-authored-by: Rohan Yadav <rohany@alumni.cmu.edu>
Co-authored-by: Matt Jibson <matt.jibson@gmail.com>
  • Loading branch information
5 people committed Dec 6, 2019
5 parents b5dbca3 + 8e57ced + 0104746 + a6b779c + da0c4f4 commit 28f216e
Show file tree
Hide file tree
Showing 6 changed files with 108 additions and 71 deletions.
12 changes: 11 additions & 1 deletion pkg/ccl/changefeedccl/cdctest/testfeed.go
Original file line number Diff line number Diff line change
Expand Up @@ -613,12 +613,22 @@ func (c *cloudFeed) Next() (*TestFeedMessage, error) {
}
}

func (c *cloudFeed) walkDir(path string, info os.FileInfo, _ error) error {
func (c *cloudFeed) walkDir(path string, info os.FileInfo, err error) error {
if strings.HasSuffix(path, `.tmp`) {
// File in the process of being written by ExternalStorage. Ignore.
return nil
}

if err != nil {
// From filepath.WalkFunc:
// If there was a problem walking to the file or directory named by
// path, the incoming error will describe the problem and the function
// can decide how to handle that error (and Walk will not descend into
// that directory). In the case of an error, the info argument will be
// nil. If an error is returned, processing stops.
return err
}

if info.IsDir() {
// Nothing to do for directories.
return nil
Expand Down
3 changes: 2 additions & 1 deletion pkg/cmd/roachtest/sqlsmith.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,9 @@ func registerSQLSmith(r *testRegistry) {
c.l.Printf("setup:\n%s", setup)
if _, err := conn.Exec(setup); err != nil {
t.Fatal(err)
} else {
logStmt(setup)
}
logStmt(setup)

const timeout = time.Minute
setStmtTimeout := fmt.Sprintf("SET statement_timeout='%s';", timeout.String())
Expand Down
114 changes: 57 additions & 57 deletions pkg/sql/colexec/cfetcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -358,78 +358,78 @@ func (rf *cFetcher) Init(
return errors.AssertionFailedf("needed column %d not in colIdxMap", id)
}
}
}

// - If there are interleaves, we need to read the index key in order to
// determine whether this row is actually part of the index we're scanning.
// - If there are needed columns from the index key, we need to read it.
//
// Otherwise, we can completely avoid decoding the index key.
if neededIndexCols > 0 || len(table.index.InterleavedBy) > 0 || len(table.index.Interleave.Ancestors) > 0 {
rf.mustDecodeIndexKey = true
}
// - If there are interleaves, we need to read the index key in order to
// determine whether this row is actually part of the index we're scanning.
// - If there are needed columns from the index key, we need to read it.
//
// Otherwise, we can completely avoid decoding the index key.
if neededIndexCols > 0 || len(table.index.InterleavedBy) > 0 || len(table.index.Interleave.Ancestors) > 0 {
rf.mustDecodeIndexKey = true
}

if table.isSecondaryIndex {
for i := range table.cols {
if neededCols.Contains(int(table.cols[i].ID)) && !table.index.ContainsColumnID(table.cols[i].ID) {
return errors.Errorf("requested column %s not in index", table.cols[i].Name)
}
if table.isSecondaryIndex {
for i := range table.cols {
if neededCols.Contains(int(table.cols[i].ID)) && !table.index.ContainsColumnID(table.cols[i].ID) {
return errors.Errorf("requested column %s not in index", table.cols[i].Name)
}
}
}

// Prepare our index key vals slice.
table.keyValTypes, err = sqlbase.GetColumnTypes(table.desc.TableDesc(), indexColumnIDs)
if err != nil {
return err
// Prepare our index key vals slice.
table.keyValTypes, err = sqlbase.GetColumnTypes(table.desc.TableDesc(), indexColumnIDs)
if err != nil {
return err
}
if cHasExtraCols(&table) {
// Unique secondary indexes have a value that is the
// primary index key.
// Primary indexes only contain ascendingly-encoded
// values. If this ever changes, we'll probably have to
// figure out the directions here too.
table.extraTypes, err = sqlbase.GetColumnTypes(table.desc.TableDesc(), table.index.ExtraColumnIDs)
nExtraColumns := len(table.index.ExtraColumnIDs)
if cap(table.extraValColOrdinals) >= nExtraColumns {
table.extraValColOrdinals = table.extraValColOrdinals[:nExtraColumns]
} else {
table.extraValColOrdinals = make([]int, nExtraColumns)
}
if cHasExtraCols(&table) {
// Unique secondary indexes have a value that is the
// primary index key.
// Primary indexes only contain ascendingly-encoded
// values. If this ever changes, we'll probably have to
// figure out the directions here too.
table.extraTypes, err = sqlbase.GetColumnTypes(table.desc.TableDesc(), table.index.ExtraColumnIDs)
nExtraColumns := len(table.index.ExtraColumnIDs)
if cap(table.extraValColOrdinals) >= nExtraColumns {
table.extraValColOrdinals = table.extraValColOrdinals[:nExtraColumns]
} else {
table.extraValColOrdinals = make([]int, nExtraColumns)
}

if cap(table.allExtraValColOrdinals) >= nExtraColumns {
table.allExtraValColOrdinals = table.allExtraValColOrdinals[:nExtraColumns]
} else {
table.allExtraValColOrdinals = make([]int, nExtraColumns)
}
if cap(table.allExtraValColOrdinals) >= nExtraColumns {
table.allExtraValColOrdinals = table.allExtraValColOrdinals[:nExtraColumns]
} else {
table.allExtraValColOrdinals = make([]int, nExtraColumns)
}

for i, id := range table.index.ExtraColumnIDs {
table.allExtraValColOrdinals[i] = tableArgs.ColIdxMap[id]
if neededCols.Contains(int(id)) {
table.extraValColOrdinals[i] = tableArgs.ColIdxMap[id]
} else {
table.extraValColOrdinals[i] = -1
}
}
if err != nil {
return err
for i, id := range table.index.ExtraColumnIDs {
table.allExtraValColOrdinals[i] = tableArgs.ColIdxMap[id]
if neededCols.Contains(int(id)) {
table.extraValColOrdinals[i] = tableArgs.ColIdxMap[id]
} else {
table.extraValColOrdinals[i] = -1
}
}

// Keep track of the maximum keys per row to accommodate a
// limitHint when StartScan is invoked.
if keysPerRow := table.desc.KeysPerRow(table.index.ID); keysPerRow > rf.maxKeysPerRow {
rf.maxKeysPerRow = keysPerRow
if err != nil {
return err
}
}

for i := range table.desc.Families {
id := table.desc.Families[i].ID
if id > table.maxColumnFamilyID {
table.maxColumnFamilyID = id
}
}
// Keep track of the maximum keys per row to accommodate a
// limitHint when StartScan is invoked.
if keysPerRow := table.desc.KeysPerRow(table.index.ID); keysPerRow > rf.maxKeysPerRow {
rf.maxKeysPerRow = keysPerRow
}

rf.table = table
for i := range table.desc.Families {
id := table.desc.Families[i].ID
if id > table.maxColumnFamilyID {
table.maxColumnFamilyID = id
}
}

rf.table = table

return nil
}

Expand Down
5 changes: 5 additions & 0 deletions pkg/sql/sem/tree/testdata/eval/cast
Original file line number Diff line number Diff line change
Expand Up @@ -434,6 +434,11 @@ eval
----
'2010-09-28'

eval
'1969-12-30 01:00:00'::timestamp::date
----
'1969-12-30'

eval
time '12:00:00'
----
Expand Down
8 changes: 8 additions & 0 deletions pkg/util/timeutil/pgdate/pgdate.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,15 @@ func MakeDateFromTime(t time.Time) (Date, error) {
sec := t.Unix()
_, offset := t.Zone()
sec += int64(offset)

days := sec / secondsPerDay
if sec < 0 && sec%secondsPerDay != 0 {
// If days are negative AND not divisible by secondsPerDay,
// we need to round down.
// e.g. for 1969-12-30 01:00, the division will round to -1
// but we want -2.
days--
}
d, err := MakeDateFromUnixEpoch(days)
return d, err
}
Expand Down
37 changes: 25 additions & 12 deletions pkg/util/timeutil/pgdate/pgdate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import (
"time"
)

func TestDateFromTime(t *testing.T) {
func TestParseDate(t *testing.T) {
for _, tc := range []struct {
s string
err string
Expand Down Expand Up @@ -116,24 +116,37 @@ func TestMakeCompatibleDateFromDisk(t *testing.T) {
}

func TestMakeDateFromTime(t *testing.T) {
pgEpoch := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)
pgEpochWithHourOffset := time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)
// These dates are negative, which makes rounding a little different.
dayBeforeUnixEpoch := time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC)
dayBeforeUnixEpochWithHourOffset := time.Date(1969, 12, 31, 1, 0, 0, 0, time.UTC)
twoDaysBeforeUnixEpoch := time.Date(1969, 12, 30, 0, 0, 0, 0, time.UTC)
twoDaysBeforeUnixEpochWithHourOffset := time.Date(1969, 12, 30, 1, 0, 0, 0, time.UTC)

for _, tc := range []struct {
loc *time.Location
in time.Time
out string
}{
{time.FixedZone("secsPerDay", secondsPerDay), "2000-01-02"},
{time.FixedZone("secsPerDay-1", secondsPerDay-1), "2000-01-01"},
{time.FixedZone("1", 1), "2000-01-01"},
{time.UTC, "2000-01-01"},
{time.FixedZone("-1", -1), "1999-12-31"},
{time.FixedZone("-secsPerDay", -secondsPerDay), "1999-12-31"},
{pgEpoch.In(time.FixedZone("secsPerDay", secondsPerDay)), "2000-01-02"},
{pgEpoch.In(time.FixedZone("secsPerDay-1", secondsPerDay-1)), "2000-01-01"},
{pgEpoch.In(time.FixedZone("1", 1)), "2000-01-01"},
{pgEpoch, "2000-01-01"},
{pgEpoch.In(time.FixedZone("-1", -1)), "1999-12-31"},
{pgEpoch.In(time.FixedZone("-secsPerDay", -secondsPerDay)), "1999-12-31"},
{pgEpochWithHourOffset, "2000-01-01"},

{dayBeforeUnixEpoch, "1969-12-31"},
{dayBeforeUnixEpochWithHourOffset, "1969-12-31"},
{twoDaysBeforeUnixEpoch, "1969-12-30"},
{twoDaysBeforeUnixEpochWithHourOffset, "1969-12-30"},
} {
t.Run(tc.loc.String(), func(t *testing.T) {
tm := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC).In(tc.loc)
d, err := MakeDateFromTime(tm)
t.Run(tc.in.Format(time.RFC3339), func(t *testing.T) {
d, err := MakeDateFromTime(tc.in)
if err != nil {
t.Fatal(err)
}
exp := tm.Format("2006-01-02")
exp := tc.in.Format("2006-01-02")
// Sanity check our tests.
if exp != tc.out {
t.Fatalf("got %s, expected %s", exp, tc.out)
Expand Down

0 comments on commit 28f216e

Please sign in to comment.