Skip to content

Commit

Permalink
Merge branch 'master' into copylock
Browse files Browse the repository at this point in the history
  • Loading branch information
Groxx committed May 8, 2024
2 parents 758139b + da5107b commit 9d5ab22
Show file tree
Hide file tree
Showing 12 changed files with 2,699 additions and 632 deletions.
2 changes: 1 addition & 1 deletion codecov.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ coverage:
if_ci_failed: ignore # require the CI to pass before setting the status
patch:
default:
target: 0% # specify the target coverage for each commit status
target: 85% # specify the target coverage for each commit status
# option: "auto" (compare against parent commit or pull request base)
# option: "X%" a static target percentage to hit
threshold: 0% # allow the coverage drop by x% before marking as failure
Expand Down
246 changes: 246 additions & 0 deletions common/persistence/nosql/nosqlplugin/cassandra/history_events_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,246 @@
// The MIT License (MIT)

// Copyright (c) 2017-2020 Uber Technologies Inc.

// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.

package cassandra

import (
"context"
"testing"
"time"

"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"

"github.com/uber/cadence/common/persistence/nosql/nosqlplugin"
"github.com/uber/cadence/common/persistence/nosql/nosqlplugin/cassandra/gocql"
"github.com/uber/cadence/common/types"
)

func TestInsertIntoHistoryTreeAndNode(t *testing.T) {
tests := []struct {
name string
treeRow *nosqlplugin.HistoryTreeRow
nodeRow *nosqlplugin.HistoryNodeRow
setupMocks func(session *fakeSession)
expectError bool
}{
{
name: "Successfully insert tree and node row",
treeRow: &nosqlplugin.HistoryTreeRow{
TreeID: "treeID",
BranchID: "branchID",
Ancestors: []*types.HistoryBranchRange{}, // Adjusted to slice of pointers
CreateTimestamp: time.Now(),
},
nodeRow: &nosqlplugin.HistoryNodeRow{
TreeID: "treeID",
BranchID: "branchID",
NodeID: 1,
Data: []byte("data"),
DataEncoding: "encoding",
},
setupMocks: func(session *fakeSession) {
session.mapExecuteBatchCASApplied = true
session.mapExecuteBatchCASErr = nil
},
expectError: false,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)

session := &fakeSession{
query: gocql.NewMockQuery(ctrl),
}
if tt.setupMocks != nil {
tt.setupMocks(session)
}

db := &cdb{session: session}

err := db.InsertIntoHistoryTreeAndNode(context.Background(), tt.treeRow, tt.nodeRow)
if tt.expectError {
assert.Error(t, err, "Expected an error but got none")
} else {
assert.NoError(t, err, "Did not expect an error but got one")
}
})
}
}

func TestSelectFromHistoryNode(t *testing.T) {
txnID1 := int64(1)
txnID2 := int64(2)
tests := []struct {
name string
filter *nosqlplugin.HistoryNodeFilter
setupMocks func(*gomock.Controller, *fakeSession)
expectedRows []*nosqlplugin.HistoryNodeRow
expectedToken []byte
expectError bool
}{
{
name: "Successfully retrieve history nodes",
filter: &nosqlplugin.HistoryNodeFilter{
TreeID: "treeID",
BranchID: "branchID",
MinNodeID: 1,
MaxNodeID: 10,
PageSize: 5,
NextPageToken: nil,
},
setupMocks: func(ctrl *gomock.Controller, session *fakeSession) {
mockQuery := gocql.NewMockQuery(ctrl)
mockQuery.EXPECT().WithContext(gomock.Any()).Return(mockQuery).AnyTimes()
mockQuery.EXPECT().PageSize(gomock.Any()).Return(mockQuery).AnyTimes()
mockQuery.EXPECT().PageState(gomock.Any()).Return(mockQuery).AnyTimes()
mockQuery.EXPECT().Iter().Return(&fakeIter{
scanInputs: [][]interface{}{
{int64(1), &txnID1, []byte("data1"), "encoding"},
{int64(2), &txnID2, []byte("data2"), "encoding"},
},
pageState: []byte("nextPageToken"),
}).AnyTimes()

session.query = mockQuery
},
expectedRows: []*nosqlplugin.HistoryNodeRow{
{NodeID: int64(1), TxnID: &txnID1, Data: []byte("data1"), DataEncoding: "encoding"},
{NodeID: int64(2), TxnID: &txnID2, Data: []byte("data2"), DataEncoding: "encoding"},
},
expectedToken: []byte("nextPageToken"),
expectError: false,
},
{
name: "Failure to create query iterator",
filter: &nosqlplugin.HistoryNodeFilter{
TreeID: "treeID",
BranchID: "branchID",
MinNodeID: 1,
MaxNodeID: 10,
PageSize: 5,
NextPageToken: nil,
},
setupMocks: func(ctrl *gomock.Controller, session *fakeSession) {
mockQuery := gocql.NewMockQuery(ctrl)
mockQuery.EXPECT().WithContext(gomock.Any()).Return(mockQuery).AnyTimes()
mockQuery.EXPECT().PageSize(gomock.Any()).Return(mockQuery).AnyTimes()
mockQuery.EXPECT().PageState(gomock.Any()).Return(mockQuery).AnyTimes()
mockQuery.EXPECT().Iter().Return(nil).AnyTimes() // Simulating failure

session.query = mockQuery
},
expectError: true,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctrl := gomock.NewController(t)

session := &fakeSession{}
if tt.setupMocks != nil {
tt.setupMocks(ctrl, session)
}

db := &cdb{session: session}
rows, token, err := db.SelectFromHistoryNode(context.Background(), tt.filter)

if tt.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expectedRows, rows)
assert.Equal(t, tt.expectedToken, token)
}
})
}
}

func TestDeleteFromHistoryTreeAndNode(t *testing.T) {
tests := []struct {
name string
treeFilter *nosqlplugin.HistoryTreeFilter
nodeFilters []*nosqlplugin.HistoryNodeFilter
setupMocks func(*fakeSession)
expectError bool
}{
{
name: "Successfully delete tree and nodes",
treeFilter: &nosqlplugin.HistoryTreeFilter{
ShardID: 1,
TreeID: "treeID",
BranchID: stringPtr("branchID"),
},
nodeFilters: []*nosqlplugin.HistoryNodeFilter{
{TreeID: "treeID", BranchID: "branchID", MinNodeID: 1},
{TreeID: "treeID", BranchID: "branchID", MinNodeID: 2},
},
setupMocks: func(session *fakeSession) {
// Simulate successful batch execution
session.mapExecuteBatchCASApplied = true
},
expectError: false,
},
{
name: "Failure in batch execution",
treeFilter: &nosqlplugin.HistoryTreeFilter{
ShardID: 1,
TreeID: "treeID",
BranchID: stringPtr("branchID"),
},
nodeFilters: []*nosqlplugin.HistoryNodeFilter{
{TreeID: "treeID", BranchID: "branchID", MinNodeID: 1},
},
setupMocks: func(session *fakeSession) {
// Simulate failure in batch execution
session.mapExecuteBatchCASErr = types.InternalServiceError{Message: "DB operation failed"}
},
expectError: false,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

session := &fakeSession{}
if tt.setupMocks != nil {
tt.setupMocks(session)
}

db := &cdb{session: session}
err := db.DeleteFromHistoryTreeAndNode(context.Background(), tt.treeFilter, tt.nodeFilters)

if tt.expectError {
assert.Error(t, err, "Expected an error but got none")
} else {
assert.NoError(t, err, "Did not expect an error but got one")
}
})
}
}

func stringPtr(s string) *string {
return &s
}
18 changes: 13 additions & 5 deletions host/workflowidratelimit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,21 +127,29 @@ func (s *WorkflowIDRateLimitIntegrationSuite) TestWorkflowIDSpecificRateLimits()
ctx, cancel := createContext()
defer cancel()

// The ratelimit is 5 per second, so we should be able to start 5 workflows without any error
// The ratelimit is 5 per second with a burst of 5, so we should be able to start 5 workflows without any error
for i := 0; i < 5; i++ {
_, err := s.engine.StartWorkflowExecution(ctx, request)
assert.NoError(s.T(), err)
}

// Now we should get a rate limit error
// Now we should get a rate limit error (with some fuzziness for time passing)
limited := 0
for i := 0; i < 5; i++ {
_, err := s.engine.StartWorkflowExecution(ctx, request)
var busyErr *types.ServiceBusyError
assert.ErrorAs(s.T(), err, &busyErr)
assert.Equal(s.T(), common.WorkflowIDRateLimitReason, busyErr.Reason)
if err != nil {
if assert.ErrorAs(s.T(), err, &busyErr) {
limited++
assert.Equal(s.T(), common.WorkflowIDRateLimitReason, busyErr.Reason)
}
}
}
// 5 fails occasionally, trying 4. If needed, reduce to 3 or find a way to
// make this test less sensitive to latency, as test-runner hosts vary a lot.
assert.GreaterOrEqual(s.T(), limited, 4, "should have encountered some rate-limit errors after the burst was exhausted")

// After 1 second we should be able to start another workflow
// After 1 second (200ms at a minimum) we should be able to start more workflows without being limited
time.Sleep(1 * time.Second)
_, err := s.engine.StartWorkflowExecution(ctx, request)
assert.NoError(s.T(), err)
Expand Down
Loading

0 comments on commit 9d5ab22

Please sign in to comment.