Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
68812: randgen: generate random expression indexes r=mgartner a=mgartner

#### randgen: refactor random expression generation

This commit refactors the code that generates random computed columns so
that the logic for generating random expressions can be used in a future
commit to generate random expression indexes.

Release note: None

#### randgen: generate random expression indexes

The `randgen` package now generates schemas with random expression
indexes. This allows for random testing of expression indexes in
`sqlsmith` and ternary logic partitioning (TLP).

Fixes #68174

Release note: None


68918: Revert "streamingccl: hang processors on losing connection with sinkless stream client" r=arulajmani a=adityamaru

This reverts commit f5244f4.

68990: roachtest/tests: adjust sqlsmith slightly r=yuzefovich a=yuzefovich

This commit adjusts `sqlsmith` roachtest slightly so that vectorized
panic injection occurs with 50% probability (instead of 100%). This is
done to check whether the panic injection is the root cause of the inbox
communication errors we have been seeing sporadically.

Informs: #66174.

Release note: None

69003: backupccl: skip TestBackupRestoreSystemJobProgress under stressrace r=arulajmani a=adityamaru

The test times out under stressrace. It runs without flaking under `stress` after #68961.

Release note: None

Co-authored-by: Marcus Gartner <marcus@cockroachlabs.com>
Co-authored-by: Aditya Maru <adityamaru@gmail.com>
Co-authored-by: Yahor Yuzefovich <yahor@cockroachlabs.com>
  • Loading branch information
4 people committed Aug 16, 2021
5 parents 66c60f2 + 4fd177c + 749c8ee + c987fc9 + a0e8f2c commit 852934c
Show file tree
Hide file tree
Showing 10 changed files with 201 additions and 341 deletions.
2 changes: 2 additions & 0 deletions pkg/ccl/backupccl/backup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1632,6 +1632,8 @@ func TestBackupRestoreSystemJobsProgress(t *testing.T) {
defer log.Scope(t).Close(t)
defer jobs.TestingSetProgressThresholds()()

skip.UnderStressRace(t, "test takes too long to run under stressrace")

checkFraction := func(ctx context.Context, ip inProgressState) error {
jobID, err := ip.latestJobID()
if err != nil {
Expand Down
5 changes: 0 additions & 5 deletions pkg/ccl/streamingccl/event.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,3 @@ func MakeKVEvent(kv roachpb.KeyValue) Event {
func MakeCheckpointEvent(resolvedTimestamp hlc.Timestamp) Event {
return checkpointEvent{resolvedTimestamp: resolvedTimestamp}
}

// MakeGenerationEvent creates an GenerationEvent.
func MakeGenerationEvent() Event {
return generationEvent{}
}
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ package streamclient
import (
"context"
gosql "database/sql"
"database/sql/driver"
"fmt"
"strconv"

Expand Down Expand Up @@ -125,15 +124,7 @@ func (m *sinklessReplicationClient) ConsumePartition(
}
}
if err := rows.Err(); err != nil {
if errors.Is(err, driver.ErrBadConn) {
select {
case eventCh <- streamingccl.MakeGenerationEvent():
case <-ctx.Done():
errCh <- ctx.Err()
}
} else {
errCh <- err
}
errCh <- err
return
}
}()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,17 +112,4 @@ INSERT INTO d.t2 VALUES (2);
feed.ObserveResolved(secondObserved.Value.Timestamp)
cancelIngestion()
})

t.Run("stream-address-disconnects", func(t *testing.T) {
clientCtx, cancelIngestion := context.WithCancel(ctx)
eventCh, errCh, err := client.ConsumePartition(clientCtx, pa, startTime)
require.NoError(t, err)
feedSource := &channelFeedSource{eventCh: eventCh, errCh: errCh}
feed := streamingtest.MakeReplicationFeed(t, feedSource)

h.SysServer.Stopper().Stop(clientCtx)

require.True(t, feed.ObserveGeneration())
cancelIngestion()
})
}
55 changes: 14 additions & 41 deletions pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
"golang.org/x/sync/errgroup"
Expand Down Expand Up @@ -96,6 +95,14 @@ type streamIngestionProcessor struct {
// and have attempted to flush them with `internalDrained`.
internalDrained bool

// ingestionErr stores any error that is returned from the worker goroutine so
// that it can be forwarded through the DistSQL flow.
ingestionErr error

// pollingErr stores any error that is returned from the poller checking for a
// cutover signal so that it can be forwarded through the DistSQL flow.
pollingErr error

// pollingWaitGroup registers the polling goroutine and waits for it to return
// when the processor is being drained.
pollingWaitGroup sync.WaitGroup
Expand All @@ -110,20 +117,6 @@ type streamIngestionProcessor struct {
// closePoller is used to shutdown the poller that checks the job for a
// cutover signal.
closePoller chan struct{}

// mu is used to provide thread-safe read-write operations to ingestionErr
// and pollingErr.
mu struct {
syncutil.Mutex

// ingestionErr stores any error that is returned from the worker goroutine so
// that it can be forwarded through the DistSQL flow.
ingestionErr error

// pollingErr stores any error that is returned from the poller checking for a
// cutover signal so that it can be forwarded through the DistSQL flow.
pollingErr error
}
}

// partitionEvent augments a normal event with the partition it came from.
Expand Down Expand Up @@ -197,9 +190,7 @@ func (sip *streamIngestionProcessor) Start(ctx context.Context) {
defer sip.pollingWaitGroup.Done()
err := sip.checkForCutoverSignal(ctx, sip.closePoller)
if err != nil {
sip.mu.Lock()
sip.mu.pollingErr = errors.Wrap(err, "error while polling job for cutover signal")
sip.mu.Unlock()
sip.pollingErr = errors.Wrap(err, "error while polling job for cutover signal")
}
}()

Expand Down Expand Up @@ -229,11 +220,8 @@ func (sip *streamIngestionProcessor) Next() (rowenc.EncDatumRow, *execinfrapb.Pr
return nil, sip.DrainHelper()
}

sip.mu.Lock()
err := sip.mu.pollingErr
sip.mu.Unlock()
if err != nil {
sip.MoveToDraining(err)
if sip.pollingErr != nil {
sip.MoveToDraining(sip.pollingErr)
return nil, sip.DrainHelper()
}

Expand All @@ -255,11 +243,8 @@ func (sip *streamIngestionProcessor) Next() (rowenc.EncDatumRow, *execinfrapb.Pr
return row, nil
}

sip.mu.Lock()
err = sip.mu.ingestionErr
sip.mu.Unlock()
if err != nil {
sip.MoveToDraining(err)
if sip.ingestionErr != nil {
sip.MoveToDraining(sip.ingestionErr)
return nil, sip.DrainHelper()
}

Expand Down Expand Up @@ -387,10 +372,7 @@ func (sip *streamIngestionProcessor) merge(
})
}
go func() {
err := g.Wait()
sip.mu.Lock()
defer sip.mu.Unlock()
sip.mu.ingestionErr = err
sip.ingestionErr = g.Wait()
close(merged)
}()

Expand Down Expand Up @@ -444,15 +426,6 @@ func (sip *streamIngestionProcessor) consumeEvents() (*jobspb.ResolvedSpans, err
}

return sip.flush()
case streamingccl.GenerationEvent:
log.Info(sip.Ctx, "GenerationEvent received")
select {
case <-sip.cutoverCh:
sip.internalDrained = true
return nil, nil
case <-sip.Ctx.Done():
return nil, sip.Ctx.Err()
}
default:
return nil, errors.Newf("unknown streaming event type %v", event.Type())
}
Expand Down
153 changes: 15 additions & 138 deletions pkg/ccl/streamingccl/streamingest/stream_ingestion_processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ import (
"context"
"fmt"
"strconv"
"sync"
"testing"
"time"

Expand Down Expand Up @@ -49,20 +48,9 @@ import (
// partition addresses.
type mockStreamClient struct {
partitionEvents map[streamingccl.PartitionAddress][]streamingccl.Event

// mu is used to provide a threadsafe interface to interceptors.
mu struct {
syncutil.Mutex

// interceptors can be registered to peek at every event generated by this
// client.
interceptors []func(streamingccl.Event, streamingccl.PartitionAddress)
tableID int
}
}

var _ streamclient.Client = &mockStreamClient{}
var _ streamclient.InterceptableStreamClient = &mockStreamClient{}

// GetTopology implements the Client interface.
func (m *mockStreamClient) GetTopology(
Expand All @@ -73,51 +61,22 @@ func (m *mockStreamClient) GetTopology(

// ConsumePartition implements the Client interface.
func (m *mockStreamClient) ConsumePartition(
ctx context.Context, address streamingccl.PartitionAddress, _ hlc.Timestamp,
_ context.Context, address streamingccl.PartitionAddress, _ hlc.Timestamp,
) (chan streamingccl.Event, chan error, error) {
var events []streamingccl.Event
var ok bool
if events, ok = m.partitionEvents[address]; !ok {
return nil, nil, errors.Newf("no events found for paritition %s", address)
}

eventCh := make(chan streamingccl.Event)
errCh := make(chan error)

go func() {
defer close(eventCh)
defer close(errCh)

for _, event := range events {
select {
case eventCh <- event:
case <-ctx.Done():
errCh <- ctx.Err()
}

func() {
m.mu.Lock()
defer m.mu.Unlock()

if len(m.mu.interceptors) > 0 {
for _, interceptor := range m.mu.interceptors {
if interceptor != nil {
interceptor(event, address)
}
}
}
}()
}
}()
eventCh := make(chan streamingccl.Event, len(events))

return eventCh, errCh, nil
}
for _, event := range events {
eventCh <- event
}
close(eventCh)

// RegisterInterception implements the InterceptableStreamClient interface.
func (m *mockStreamClient) RegisterInterception(fn streamclient.InterceptFn) {
m.mu.Lock()
defer m.mu.Unlock()
m.mu.interceptors = append(m.mu.interceptors, fn)
return eventCh, nil, nil
}

// errorStreamClient always returns an error when consuming a partition.
Expand Down Expand Up @@ -212,59 +171,6 @@ func TestStreamIngestionProcessor(t *testing.T) {
require.Nil(t, row)
testutils.IsError(meta.Err, "this client always returns an error")
})

t.Run("stream ingestion processor shuts down gracefully on losing client connection", func(t *testing.T) {
events := []streamingccl.Event{streamingccl.MakeGenerationEvent()}
pa := streamingccl.PartitionAddress("partition")
mockClient := &mockStreamClient{
partitionEvents: map[streamingccl.PartitionAddress][]streamingccl.Event{pa: events},
}

startTime := hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}
partitionAddresses := []streamingccl.PartitionAddress{"partition"}

interceptCh := make(chan struct{})
defer close(interceptCh)
sendToInterceptCh := func() {
interceptCh <- struct{}{}
}
interceptGeneration := markGenerationEventReceived(sendToInterceptCh)
sip, out, err := getStreamIngestionProcessor(ctx, t, registry, kvDB, "randomgen://test/",
partitionAddresses, startTime, []streamclient.InterceptFn{interceptGeneration}, mockClient)
require.NoError(t, err)

var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
sip.Run(ctx)
}()

// The channel will block on read if the event has not been intercepted yet.
// Once it unblocks, we are guaranteed that the mockClient has sent the
// GenerationEvent and the processor has read it.
<-interceptCh

// The sip processor has received a GenerationEvent and is thus
// waiting for a cutover signal, so let's send one!
sip.cutoverCh <- struct{}{}

wg.Wait()
// Ensure that all the outputs are properly closed.
if !out.ProducerClosed() {
t.Fatalf("output RowReceiver not closed")
}

for {
// No metadata should have been produced since the processor
// should have been moved to draining state with a nil error.
row := out.NextNoMeta(t)
if row == nil {
break
}
t.Fatalf("more output rows than expected")
}
})
}

func getPartitionSpanToTableID(
Expand Down Expand Up @@ -473,30 +379,6 @@ func runStreamIngestionProcessor(
interceptEvents []streamclient.InterceptFn,
mockClient streamclient.Client,
) (*distsqlutils.RowBuffer, error) {
sip, out, err := getStreamIngestionProcessor(ctx, t, registry, kvDB, streamAddr,
partitionAddresses, startTime, interceptEvents, mockClient)
require.NoError(t, err)

sip.Run(ctx)

// Ensure that all the outputs are properly closed.
if !out.ProducerClosed() {
t.Fatalf("output RowReceiver not closed")
}
return out, err
}

func getStreamIngestionProcessor(
ctx context.Context,
t *testing.T,
registry *jobs.Registry,
kvDB *kv.DB,
streamAddr string,
partitionAddresses []streamingccl.PartitionAddress,
startTime hlc.Timestamp,
interceptEvents []streamclient.InterceptFn,
mockClient streamclient.Client,
) (*streamIngestionProcessor, *distsqlutils.RowBuffer, error) {
st := cluster.MakeTestingClusterSettings()
evalCtx := tree.MakeTestingEvalContext(st)

Expand Down Expand Up @@ -541,7 +423,14 @@ func getStreamIngestionProcessor(
interceptable.RegisterInterception(interceptor)
}
}
return sip, out, err

sip.Run(ctx)

// Ensure that all the outputs are properly closed.
if !out.ProducerClosed() {
t.Fatalf("output RowReceiver not closed")
}
return out, err
}

func registerValidatorWithClient(
Expand Down Expand Up @@ -587,15 +476,3 @@ func makeCheckpointEventCounter(
}
}
}

// markGenerationEventReceived runs f after seeing a GenerationEvent.
func markGenerationEventReceived(
f func(),
) func(event streamingccl.Event, pa streamingccl.PartitionAddress) {
return func(event streamingccl.Event, pa streamingccl.PartitionAddress) {
switch event.Type() {
case streamingccl.GenerationEvent:
f()
}
}
}
Loading

0 comments on commit 852934c

Please sign in to comment.