diff --git a/Makefile b/Makefile index 0d02189f508b..f221901a7422 100644 --- a/Makefile +++ b/Makefile @@ -108,7 +108,10 @@ pd-server-basic: .PHONY: pre-build build tools pd-server pd-server-basic # Tools - +pd-ut: pd-xprog failpoint-enable + cd tools && GOEXPERIMENT=$(BUILD_GOEXPERIMENT) CGO_ENABLED=$(BUILD_TOOL_CGO_ENABLED) go build -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o $(BUILD_BIN_PATH)/pd-ut pd-ut/main.go +pd-xprog: + cd tools && GOEXPERIMENT=$(BUILD_GOEXPERIMENT) CGO_ENABLED=$(BUILD_TOOL_CGO_ENABLED) go build -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o $(BUILD_BIN_PATH)/xprog pd-xprog/main.go pd-ctl: cd tools && GOEXPERIMENT=$(BUILD_GOEXPERIMENT) CGO_ENABLED=$(BUILD_TOOL_CGO_ENABLED) go build -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o $(BUILD_BIN_PATH)/pd-ctl pd-ctl/main.go pd-tso-bench: @@ -303,6 +306,8 @@ split: clean: failpoint-disable clean-test clean-build +CLEAN_UT_BINARY := find . -name '*.test.bin'| xargs rm -f + clean-test: # Cleaning test tmp... rm -rf /tmp/test_pd* @@ -310,6 +315,7 @@ clean-test: rm -rf /tmp/test_etcd* rm -f $(REAL_CLUSTER_TEST_PATH)/playground.log go clean -testcache + @$(CLEAN_UT_BINARY) clean-build: # Cleaning building files... diff --git a/client/resource_group/controller/util_test.go b/client/resource_group/controller/util_test.go index a89ea08b955c..10fa7c345a50 100644 --- a/client/resource_group/controller/util_test.go +++ b/client/resource_group/controller/util_test.go @@ -27,7 +27,6 @@ type example struct { } func TestDurationJSON(t *testing.T) { - t.Parallel() re := require.New(t) example := &example{} @@ -41,7 +40,6 @@ func TestDurationJSON(t *testing.T) { } func TestDurationTOML(t *testing.T) { - t.Parallel() re := require.New(t) example := &example{} diff --git a/pkg/audit/audit_test.go b/pkg/audit/audit_test.go index 8098b36975ee..9066d81ebe36 100644 --- a/pkg/audit/audit_test.go +++ b/pkg/audit/audit_test.go @@ -32,7 +32,6 @@ import ( ) func TestLabelMatcher(t *testing.T) { - t.Parallel() re := require.New(t) matcher := &LabelMatcher{"testSuccess"} labels1 := &BackendLabels{Labels: []string{"testFail", "testSuccess"}} @@ -42,7 +41,6 @@ func TestLabelMatcher(t *testing.T) { } func TestPrometheusHistogramBackend(t *testing.T) { - t.Parallel() re := require.New(t) serviceAuditHistogramTest := prometheus.NewHistogramVec( prometheus.HistogramOpts{ @@ -90,7 +88,6 @@ func TestPrometheusHistogramBackend(t *testing.T) { } func TestLocalLogBackendUsingFile(t *testing.T) { - t.Parallel() re := require.New(t) backend := NewLocalLogBackend(true) fname := testutil.InitTempFileLogger("info") diff --git a/pkg/autoscaling/calculation_test.go b/pkg/autoscaling/calculation_test.go index 05a348af59de..5d0c2ba11267 100644 --- a/pkg/autoscaling/calculation_test.go +++ b/pkg/autoscaling/calculation_test.go @@ -29,7 +29,6 @@ import ( ) func TestGetScaledTiKVGroups(t *testing.T) { - t.Parallel() re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -214,7 +213,6 @@ func (q *mockQuerier) Query(options *QueryOptions) (QueryResult, error) { } func TestGetTotalCPUUseTime(t *testing.T) { - t.Parallel() re := require.New(t) querier := &mockQuerier{} instances := []instance{ @@ -237,7 +235,6 @@ func TestGetTotalCPUUseTime(t *testing.T) { } func TestGetTotalCPUQuota(t *testing.T) { - t.Parallel() re := require.New(t) querier := &mockQuerier{} instances := []instance{ @@ -260,7 +257,6 @@ func TestGetTotalCPUQuota(t *testing.T) { } func TestScaleOutGroupLabel(t *testing.T) { - t.Parallel() re := require.New(t) var jsonStr = []byte(` { @@ -303,7 +299,6 @@ func TestScaleOutGroupLabel(t *testing.T) { } func TestStrategyChangeCount(t *testing.T) { - t.Parallel() re := require.New(t) var count uint64 = 2 strategy := &Strategy{ diff --git a/pkg/autoscaling/prometheus_test.go b/pkg/autoscaling/prometheus_test.go index b4cf9aefd918..3ee6cb94e37a 100644 --- a/pkg/autoscaling/prometheus_test.go +++ b/pkg/autoscaling/prometheus_test.go @@ -180,7 +180,6 @@ func (c *normalClient) Do(_ context.Context, req *http.Request) (response *http. } func TestRetrieveCPUMetrics(t *testing.T) { - t.Parallel() re := require.New(t) client := &normalClient{ mockData: make(map[string]*response), @@ -225,7 +224,6 @@ func (c *emptyResponseClient) Do(_ context.Context, req *http.Request) (r *http. } func TestEmptyResponse(t *testing.T) { - t.Parallel() re := require.New(t) client := &emptyResponseClient{} querier := NewPrometheusQuerier(client) @@ -253,7 +251,6 @@ func (c *errorHTTPStatusClient) Do(_ context.Context, req *http.Request) (r *htt } func TestErrorHTTPStatus(t *testing.T) { - t.Parallel() re := require.New(t) client := &errorHTTPStatusClient{} querier := NewPrometheusQuerier(client) @@ -279,7 +276,6 @@ func (c *errorPrometheusStatusClient) Do(_ context.Context, req *http.Request) ( } func TestErrorPrometheusStatus(t *testing.T) { - t.Parallel() re := require.New(t) client := &errorPrometheusStatusClient{} querier := NewPrometheusQuerier(client) @@ -290,7 +286,6 @@ func TestErrorPrometheusStatus(t *testing.T) { } func TestGetInstanceNameFromAddress(t *testing.T) { - t.Parallel() re := require.New(t) testCases := []struct { address string @@ -328,7 +323,6 @@ func TestGetInstanceNameFromAddress(t *testing.T) { } func TestGetDurationExpression(t *testing.T) { - t.Parallel() re := require.New(t) testCases := []struct { duration time.Duration diff --git a/pkg/balancer/balancer_test.go b/pkg/balancer/balancer_test.go index 996b4f1da357..2c760c6220c3 100644 --- a/pkg/balancer/balancer_test.go +++ b/pkg/balancer/balancer_test.go @@ -22,7 +22,6 @@ import ( ) func TestBalancerPutAndDelete(t *testing.T) { - t.Parallel() re := require.New(t) balancers := []Balancer[uint32]{ NewRoundRobin[uint32](), @@ -56,7 +55,6 @@ func TestBalancerPutAndDelete(t *testing.T) { } func TestBalancerDuplicate(t *testing.T) { - t.Parallel() re := require.New(t) balancers := []Balancer[uint32]{ NewRoundRobin[uint32](), @@ -77,7 +75,6 @@ func TestBalancerDuplicate(t *testing.T) { } func TestRoundRobin(t *testing.T) { - t.Parallel() re := require.New(t) balancer := NewRoundRobin[uint32]() for i := 0; i < 100; i++ { diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index dbef41d27548..43e97dfa2b0e 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -25,7 +25,6 @@ import ( ) func TestExpireRegionCache(t *testing.T) { - t.Parallel() re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -121,7 +120,6 @@ func sortIDs(ids []uint64) []uint64 { } func TestLRUCache(t *testing.T) { - t.Parallel() re := require.New(t) cache := newLRU(3) @@ -199,7 +197,6 @@ func TestLRUCache(t *testing.T) { } func TestFifoCache(t *testing.T) { - t.Parallel() re := require.New(t) cache := NewFIFO(3) cache.Put(1, "1") @@ -227,7 +224,6 @@ func TestFifoCache(t *testing.T) { } func TestFifoFromLastSameElems(t *testing.T) { - t.Parallel() re := require.New(t) type testStruct struct { value string @@ -260,7 +256,6 @@ func TestFifoFromLastSameElems(t *testing.T) { } func TestTwoQueueCache(t *testing.T) { - t.Parallel() re := require.New(t) cache := newTwoQueue(3) cache.Put(1, "1") @@ -345,7 +340,6 @@ func (pq PriorityQueueItemTest) ID() uint64 { } func TestPriorityQueue(t *testing.T) { - t.Parallel() re := require.New(t) testData := []PriorityQueueItemTest{0, 1, 2, 3, 4, 5} pq := NewPriorityQueue(0) diff --git a/pkg/codec/codec_test.go b/pkg/codec/codec_test.go index f734d2e528e2..50bf552a60dd 100644 --- a/pkg/codec/codec_test.go +++ b/pkg/codec/codec_test.go @@ -21,7 +21,6 @@ import ( ) func TestDecodeBytes(t *testing.T) { - t.Parallel() re := require.New(t) key := "abcdefghijklmnopqrstuvwxyz" for i := 0; i < len(key); i++ { @@ -32,7 +31,6 @@ func TestDecodeBytes(t *testing.T) { } func TestTableID(t *testing.T) { - t.Parallel() re := require.New(t) key := EncodeBytes([]byte("t\x80\x00\x00\x00\x00\x00\x00\xff")) re.Equal(int64(0xff), key.TableID()) diff --git a/pkg/core/rangetree/range_tree_test.go b/pkg/core/rangetree/range_tree_test.go index 0664a7bdbefe..6955947cb1bc 100644 --- a/pkg/core/rangetree/range_tree_test.go +++ b/pkg/core/rangetree/range_tree_test.go @@ -85,7 +85,6 @@ func bucketDebrisFactory(startKey, endKey []byte, item RangeItem) []RangeItem { } func TestRingPutItem(t *testing.T) { - t.Parallel() re := require.New(t) bucketTree := NewRangeTree(2, bucketDebrisFactory) bucketTree.Update(newSimpleBucketItem([]byte("002"), []byte("100"))) @@ -120,7 +119,6 @@ func TestRingPutItem(t *testing.T) { } func TestDebris(t *testing.T) { - t.Parallel() re := require.New(t) ringItem := newSimpleBucketItem([]byte("010"), []byte("090")) var overlaps []RangeItem diff --git a/pkg/core/storelimit/limit_test.go b/pkg/core/storelimit/limit_test.go index 758653303114..e11618767a12 100644 --- a/pkg/core/storelimit/limit_test.go +++ b/pkg/core/storelimit/limit_test.go @@ -45,7 +45,6 @@ func TestStoreLimit(t *testing.T) { } func TestSlidingWindow(t *testing.T) { - t.Parallel() re := require.New(t) capacity := int64(defaultWindowSize) s := NewSlidingWindows() @@ -92,7 +91,6 @@ func TestSlidingWindow(t *testing.T) { } func TestWindow(t *testing.T) { - t.Parallel() re := require.New(t) capacity := int64(100 * 10) s := newWindow(capacity) diff --git a/pkg/encryption/config_test.go b/pkg/encryption/config_test.go index 6f7e4a41b034..4134d46c2f3b 100644 --- a/pkg/encryption/config_test.go +++ b/pkg/encryption/config_test.go @@ -23,7 +23,6 @@ import ( ) func TestAdjustDefaultValue(t *testing.T) { - t.Parallel() re := require.New(t) config := &Config{} err := config.Adjust() @@ -35,21 +34,18 @@ func TestAdjustDefaultValue(t *testing.T) { } func TestAdjustInvalidDataEncryptionMethod(t *testing.T) { - t.Parallel() re := require.New(t) config := &Config{DataEncryptionMethod: "unknown"} re.Error(config.Adjust()) } func TestAdjustNegativeRotationDuration(t *testing.T) { - t.Parallel() re := require.New(t) config := &Config{DataKeyRotationPeriod: typeutil.NewDuration(time.Duration(int64(-1)))} re.Error(config.Adjust()) } func TestAdjustInvalidMasterKeyType(t *testing.T) { - t.Parallel() re := require.New(t) config := &Config{MasterKey: MasterKeyConfig{Type: "unknown"}} re.Error(config.Adjust()) diff --git a/pkg/encryption/crypter_test.go b/pkg/encryption/crypter_test.go index 12a851d15639..9ac72bd78138 100644 --- a/pkg/encryption/crypter_test.go +++ b/pkg/encryption/crypter_test.go @@ -24,7 +24,6 @@ import ( ) func TestEncryptionMethodSupported(t *testing.T) { - t.Parallel() re := require.New(t) re.Error(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_PLAINTEXT)) re.Error(CheckEncryptionMethodSupported(encryptionpb.EncryptionMethod_UNKNOWN)) @@ -34,7 +33,6 @@ func TestEncryptionMethodSupported(t *testing.T) { } func TestKeyLength(t *testing.T) { - t.Parallel() re := require.New(t) _, err := KeyLength(encryptionpb.EncryptionMethod_PLAINTEXT) re.Error(err) @@ -52,7 +50,6 @@ func TestKeyLength(t *testing.T) { } func TestNewIv(t *testing.T) { - t.Parallel() re := require.New(t) ivCtr, err := NewIvCTR() re.NoError(err) @@ -63,7 +60,6 @@ func TestNewIv(t *testing.T) { } func TestNewDataKey(t *testing.T) { - t.Parallel() re := require.New(t) for _, method := range []encryptionpb.EncryptionMethod{ encryptionpb.EncryptionMethod_AES128_CTR, @@ -82,7 +78,6 @@ func TestNewDataKey(t *testing.T) { } func TestAesGcmCrypter(t *testing.T) { - t.Parallel() re := require.New(t) key, err := hex.DecodeString("ed568fbd8c8018ed2d042a4e5d38d6341486922d401d2022fb81e47c900d3f07") re.NoError(err) diff --git a/pkg/encryption/master_key_test.go b/pkg/encryption/master_key_test.go index 4bc08dab7a50..31962e9e99d7 100644 --- a/pkg/encryption/master_key_test.go +++ b/pkg/encryption/master_key_test.go @@ -24,7 +24,6 @@ import ( ) func TestPlaintextMasterKey(t *testing.T) { - t.Parallel() re := require.New(t) config := &encryptionpb.MasterKey{ Backend: &encryptionpb.MasterKey_Plaintext{ @@ -50,7 +49,6 @@ func TestPlaintextMasterKey(t *testing.T) { } func TestEncrypt(t *testing.T) { - t.Parallel() re := require.New(t) keyHex := "2f07ec61e5a50284f47f2b402a962ec672e500b26cb3aa568bb1531300c74806" // #nosec G101 key, err := hex.DecodeString(keyHex) @@ -66,7 +64,6 @@ func TestEncrypt(t *testing.T) { } func TestDecrypt(t *testing.T) { - t.Parallel() re := require.New(t) keyHex := "2f07ec61e5a50284f47f2b402a962ec672e500b26cb3aa568bb1531300c74806" // #nosec G101 key, err := hex.DecodeString(keyHex) @@ -83,7 +80,6 @@ func TestDecrypt(t *testing.T) { } func TestNewFileMasterKeyMissingPath(t *testing.T) { - t.Parallel() re := require.New(t) config := &encryptionpb.MasterKey{ Backend: &encryptionpb.MasterKey_File{ @@ -97,7 +93,6 @@ func TestNewFileMasterKeyMissingPath(t *testing.T) { } func TestNewFileMasterKeyMissingFile(t *testing.T) { - t.Parallel() re := require.New(t) dir := t.TempDir() path := dir + "/key" @@ -113,7 +108,6 @@ func TestNewFileMasterKeyMissingFile(t *testing.T) { } func TestNewFileMasterKeyNotHexString(t *testing.T) { - t.Parallel() re := require.New(t) dir := t.TempDir() path := dir + "/key" @@ -130,7 +124,6 @@ func TestNewFileMasterKeyNotHexString(t *testing.T) { } func TestNewFileMasterKeyLengthMismatch(t *testing.T) { - t.Parallel() re := require.New(t) dir := t.TempDir() path := dir + "/key" @@ -147,7 +140,6 @@ func TestNewFileMasterKeyLengthMismatch(t *testing.T) { } func TestNewFileMasterKey(t *testing.T) { - t.Parallel() re := require.New(t) key := "2f07ec61e5a50284f47f2b402a962ec672e500b26cb3aa568bb1531300c74806" // #nosec G101 dir := t.TempDir() diff --git a/pkg/encryption/region_crypter_test.go b/pkg/encryption/region_crypter_test.go index 5fd9778a8c0e..b1ca558063c1 100644 --- a/pkg/encryption/region_crypter_test.go +++ b/pkg/encryption/region_crypter_test.go @@ -70,7 +70,6 @@ func (m *testKeyManager) GetKey(keyID uint64) (*encryptionpb.DataKey, error) { } func TestNilRegion(t *testing.T) { - t.Parallel() re := require.New(t) m := newTestKeyManager() region, err := EncryptRegion(nil, m) @@ -81,7 +80,6 @@ func TestNilRegion(t *testing.T) { } func TestEncryptRegionWithoutKeyManager(t *testing.T) { - t.Parallel() re := require.New(t) region := &metapb.Region{ Id: 10, @@ -98,7 +96,6 @@ func TestEncryptRegionWithoutKeyManager(t *testing.T) { } func TestEncryptRegionWhileEncryptionDisabled(t *testing.T) { - t.Parallel() re := require.New(t) region := &metapb.Region{ Id: 10, @@ -117,7 +114,6 @@ func TestEncryptRegionWhileEncryptionDisabled(t *testing.T) { } func TestEncryptRegion(t *testing.T) { - t.Parallel() re := require.New(t) startKey := []byte("abc") endKey := []byte("xyz") @@ -152,7 +148,6 @@ func TestEncryptRegion(t *testing.T) { } func TestDecryptRegionNotEncrypted(t *testing.T) { - t.Parallel() re := require.New(t) region := &metapb.Region{ Id: 10, @@ -170,7 +165,6 @@ func TestDecryptRegionNotEncrypted(t *testing.T) { } func TestDecryptRegionWithoutKeyManager(t *testing.T) { - t.Parallel() re := require.New(t) region := &metapb.Region{ Id: 10, @@ -186,7 +180,6 @@ func TestDecryptRegionWithoutKeyManager(t *testing.T) { } func TestDecryptRegionWhileKeyMissing(t *testing.T) { - t.Parallel() re := require.New(t) keyID := uint64(3) m := newTestKeyManager() @@ -207,7 +200,6 @@ func TestDecryptRegionWhileKeyMissing(t *testing.T) { } func TestDecryptRegion(t *testing.T) { - t.Parallel() re := require.New(t) keyID := uint64(1) startKey := []byte("abc") diff --git a/pkg/errs/errs_test.go b/pkg/errs/errs_test.go index d76c02dc1109..1dcabc32d9a0 100644 --- a/pkg/errs/errs_test.go +++ b/pkg/errs/errs_test.go @@ -97,7 +97,6 @@ func TestError(t *testing.T) { } func TestErrorEqual(t *testing.T) { - t.Parallel() re := require.New(t) err1 := ErrSchedulerNotFound.FastGenByArgs() err2 := ErrSchedulerNotFound.FastGenByArgs() @@ -134,7 +133,6 @@ func TestZapError(t *testing.T) { } func TestErrorWithStack(t *testing.T) { - t.Parallel() re := require.New(t) conf := &log.Config{Level: "debug", File: log.FileLogConfig{}, DisableTimestamp: true} lg := newZapTestLogger(conf) diff --git a/pkg/mcs/discovery/register_test.go b/pkg/mcs/discovery/register_test.go index 032b0558a79a..0b7cf105f691 100644 --- a/pkg/mcs/discovery/register_test.go +++ b/pkg/mcs/discovery/register_test.go @@ -16,6 +16,8 @@ package discovery import ( "context" + "os" + "strings" "testing" "time" @@ -59,10 +61,17 @@ func TestRegister(t *testing.T) { sr = NewServiceRegister(context.Background(), client, "12345", "test_service", "127.0.0.1:2", "127.0.0.1:2", 1) err = sr.Register() re.NoError(err) + fname := testutil.InitTempFileLogger("info") for i := 0; i < 3; i++ { re.Equal("127.0.0.1:2", getKeyAfterLeaseExpired(re, client, sr.key)) - etcd.Server.HardStop() // close the etcd to make the keepalive failed - time.Sleep(etcdutil.DefaultDialTimeout) // ensure that the request is timeout + etcd.Server.HardStop() // close the etcd to make the keepalive failed + // ensure that the request is timeout + testutil.Eventually(re, func() bool { + b, _ := os.ReadFile(fname) + l := string(b) + count := strings.Count(l, "keep alive failed") + return count >= i+1 + }) etcd.Close() etcd, err = embed.StartEtcd(&cfg) re.NoError(err) diff --git a/pkg/mcs/scheduling/server/rule/watcher.go b/pkg/mcs/scheduling/server/rule/watcher.go index d8a8dd3e6096..11546618ae97 100644 --- a/pkg/mcs/scheduling/server/rule/watcher.go +++ b/pkg/mcs/scheduling/server/rule/watcher.go @@ -254,3 +254,32 @@ func (rw *Watcher) Close() { rw.cancel() rw.wg.Wait() } + +// NewWatcherForTest +func NewWatcherForTest( + ctx context.Context, + etcdClient *clientv3.Client, + clusterID uint64, + ruleStorage endpoint.RuleStorage, + checkerController *checker.Controller, + regionLabeler *labeler.RegionLabeler, +) error { + ctx, cancel := context.WithCancel(ctx) + rw := &Watcher{ + ctx: ctx, + cancel: cancel, + rulesPathPrefix: endpoint.RulesPathPrefix(clusterID), + ruleCommonPathPrefix: endpoint.RuleCommonPathPrefix(clusterID), + ruleGroupPathPrefix: endpoint.RuleGroupPathPrefix(clusterID), + regionLabelPathPrefix: endpoint.RegionLabelPathPrefix(clusterID), + etcdClient: etcdClient, + ruleStorage: ruleStorage, + checkerController: checkerController, + regionLabeler: regionLabeler, + } + err := rw.initializeRegionLabelWatcher() + if err != nil { + return err + } + return nil +} diff --git a/pkg/mcs/scheduling/server/rule/watcher_test.go b/pkg/mcs/scheduling/server/rule/watcher_test.go index 37fce0a0ded3..e47550da2c5c 100644 --- a/pkg/mcs/scheduling/server/rule/watcher_test.go +++ b/pkg/mcs/scheduling/server/rule/watcher_test.go @@ -19,7 +19,6 @@ import ( "encoding/json" "os" "strconv" - "testing" "time" "github.com/stretchr/testify/require" @@ -37,24 +36,24 @@ const ( rulesNum = 16384 ) -func TestLoadLargeRules(t *testing.T) { - re := require.New(t) - ctx, client, clean := prepare(t) - defer clean() - runWatcherLoadLabelRule(ctx, re, client) -} - -func BenchmarkLoadLargeRules(b *testing.B) { - re := require.New(b) - ctx, client, clean := prepare(b) - defer clean() - - b.ResetTimer() // Resets the timer to ignore initialization time in the benchmark - - for n := 0; n < b.N; n++ { - runWatcherLoadLabelRule(ctx, re, client) - } -} +//func TestLoadLargeRules(t *testing.T) { +// re := require.New(t) +// ctx, client, clean := prepare(t) +// defer clean() +// runWatcherLoadLabelRule(ctx, re, client) +//} +// +//func BenchmarkLoadLargeRules(b *testing.B) { +// re := require.New(b) +// ctx, client, clean := prepare(b) +// defer clean() +// +// b.ResetTimer() // Resets the timer to ignore initialization time in the benchmark +// +// for n := 0; n < b.N; n++ { +// runWatcherLoadLabelRule(ctx, re, client) +// } +//} func runWatcherLoadLabelRule(ctx context.Context, re *require.Assertions, client *clientv3.Client) { storage := endpoint.NewStorageEndpoint(kv.NewMemoryKV(), nil) diff --git a/pkg/mock/mockhbstream/mockhbstream_test.go b/pkg/mock/mockhbstream/mockhbstream_test.go index a8e88f61aee6..aa1ca85279b4 100644 --- a/pkg/mock/mockhbstream/mockhbstream_test.go +++ b/pkg/mock/mockhbstream/mockhbstream_test.go @@ -29,7 +29,6 @@ import ( ) func TestActivity(t *testing.T) { - t.Parallel() re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/pkg/movingaverage/avg_over_time_test.go b/pkg/movingaverage/avg_over_time_test.go index 43553d9d6088..4a54e33d449b 100644 --- a/pkg/movingaverage/avg_over_time_test.go +++ b/pkg/movingaverage/avg_over_time_test.go @@ -23,7 +23,6 @@ import ( ) func TestPulse(t *testing.T) { - t.Parallel() re := require.New(t) aot := NewAvgOverTime(5 * time.Second) // warm up @@ -43,7 +42,6 @@ func TestPulse(t *testing.T) { } func TestPulse2(t *testing.T) { - t.Parallel() re := require.New(t) dur := 5 * time.Second aot := NewAvgOverTime(dur) @@ -57,7 +55,6 @@ func TestPulse2(t *testing.T) { } func TestChange(t *testing.T) { - t.Parallel() re := require.New(t) aot := NewAvgOverTime(5 * time.Second) @@ -91,7 +88,6 @@ func TestChange(t *testing.T) { } func TestMinFilled(t *testing.T) { - t.Parallel() re := require.New(t) interval := 10 * time.Second rate := 1.0 @@ -108,7 +104,6 @@ func TestMinFilled(t *testing.T) { } func TestUnstableInterval(t *testing.T) { - t.Parallel() re := require.New(t) aot := NewAvgOverTime(5 * time.Second) re.Equal(0., aot.Get()) diff --git a/pkg/movingaverage/max_filter_test.go b/pkg/movingaverage/max_filter_test.go index bba770cecc23..7d3906ec93c5 100644 --- a/pkg/movingaverage/max_filter_test.go +++ b/pkg/movingaverage/max_filter_test.go @@ -21,7 +21,6 @@ import ( ) func TestMaxFilter(t *testing.T) { - t.Parallel() re := require.New(t) var empty float64 = 0 data := []float64{2, 1, 3, 4, 1, 1, 3, 3, 2, 0, 5} diff --git a/pkg/movingaverage/moving_average_test.go b/pkg/movingaverage/moving_average_test.go index 49c20637c209..fd0a1a9fcf36 100644 --- a/pkg/movingaverage/moving_average_test.go +++ b/pkg/movingaverage/moving_average_test.go @@ -72,7 +72,6 @@ func checkInstantaneous(re *require.Assertions, ma MovingAvg) { } func TestMedianFilter(t *testing.T) { - t.Parallel() re := require.New(t) var empty float64 = 0 data := []float64{2, 4, 2, 800, 600, 6, 3} @@ -92,7 +91,6 @@ type testCase struct { } func TestMovingAvg(t *testing.T) { - t.Parallel() re := require.New(t) var empty float64 = 0 data := []float64{1, 1, 1, 1, 5, 1, 1, 1} diff --git a/pkg/movingaverage/weight_allocator_test.go b/pkg/movingaverage/weight_allocator_test.go index 631a71f10c95..405d8f728762 100644 --- a/pkg/movingaverage/weight_allocator_test.go +++ b/pkg/movingaverage/weight_allocator_test.go @@ -21,7 +21,6 @@ import ( ) func TestWeightAllocator(t *testing.T) { - t.Parallel() re := require.New(t) checkSumFunc := func(wa *WeightAllocator, length int) { diff --git a/pkg/progress/progress_test.go b/pkg/progress/progress_test.go index 8f8b0ebcb565..9ea1ccc93b5a 100644 --- a/pkg/progress/progress_test.go +++ b/pkg/progress/progress_test.go @@ -24,7 +24,6 @@ import ( ) func TestProgress(t *testing.T) { - t.Parallel() re := require.New(t) n := "test" m := NewManager() @@ -70,7 +69,6 @@ func TestProgress(t *testing.T) { } func TestAbnormal(t *testing.T) { - t.Parallel() re := require.New(t) n := "test" m := NewManager() diff --git a/pkg/ratelimit/concurrency_limiter_test.go b/pkg/ratelimit/concurrency_limiter_test.go index 5fe03740394a..20658e719e1b 100644 --- a/pkg/ratelimit/concurrency_limiter_test.go +++ b/pkg/ratelimit/concurrency_limiter_test.go @@ -21,7 +21,6 @@ import ( ) func TestConcurrencyLimiter(t *testing.T) { - t.Parallel() re := require.New(t) cl := newConcurrencyLimiter(10) for i := 0; i < 10; i++ { diff --git a/pkg/ratelimit/controller_test.go b/pkg/ratelimit/controller_test.go index 48a5ee2054bc..50be24540e4a 100644 --- a/pkg/ratelimit/controller_test.go +++ b/pkg/ratelimit/controller_test.go @@ -78,7 +78,6 @@ func runMulitLabelLimiter(t *testing.T, limiter *Controller, testCase []labelCas } func TestControllerWithConcurrencyLimiter(t *testing.T) { - t.Parallel() re := require.New(t) limiter := NewController(context.Background(), "grpc", nil) defer limiter.Close() @@ -191,7 +190,6 @@ func TestControllerWithConcurrencyLimiter(t *testing.T) { } func TestBlockList(t *testing.T) { - t.Parallel() re := require.New(t) opts := []Option{AddLabelAllowList()} limiter := NewController(context.Background(), "grpc", nil) @@ -213,7 +211,6 @@ func TestBlockList(t *testing.T) { } func TestControllerWithQPSLimiter(t *testing.T) { - t.Parallel() re := require.New(t) limiter := NewController(context.Background(), "grpc", nil) defer limiter.Close() @@ -323,7 +320,6 @@ func TestControllerWithQPSLimiter(t *testing.T) { } func TestControllerWithTwoLimiters(t *testing.T) { - t.Parallel() re := require.New(t) limiter := NewController(context.Background(), "grpc", nil) defer limiter.Close() diff --git a/pkg/ratelimit/limiter_test.go b/pkg/ratelimit/limiter_test.go index fabb9d989172..36f339b47ac0 100644 --- a/pkg/ratelimit/limiter_test.go +++ b/pkg/ratelimit/limiter_test.go @@ -40,7 +40,6 @@ func (r *releaseUtil) append(d DoneFunc) { } func TestWithConcurrencyLimiter(t *testing.T) { - t.Parallel() re := require.New(t) limiter := newLimiter() @@ -103,7 +102,6 @@ func TestWithConcurrencyLimiter(t *testing.T) { } func TestWithQPSLimiter(t *testing.T) { - t.Parallel() re := require.New(t) limiter := newLimiter() status := limiter.updateQPSConfig(float64(rate.Every(time.Second)), 1) @@ -177,7 +175,6 @@ func TestWithQPSLimiter(t *testing.T) { } func TestWithTwoLimiters(t *testing.T) { - t.Parallel() re := require.New(t) cfg := &DimensionConfig{ QPS: 100, diff --git a/pkg/ratelimit/ratelimiter_test.go b/pkg/ratelimit/ratelimiter_test.go index 35b355e7b210..f16bb6a83d28 100644 --- a/pkg/ratelimit/ratelimiter_test.go +++ b/pkg/ratelimit/ratelimiter_test.go @@ -22,7 +22,6 @@ import ( ) func TestRateLimiter(t *testing.T) { - t.Parallel() re := require.New(t) limiter := NewRateLimiter(100, 100) diff --git a/pkg/schedule/labeler/labeler_test.go b/pkg/schedule/labeler/labeler_test.go index 910a5558eb3a..364f79b7a146 100644 --- a/pkg/schedule/labeler/labeler_test.go +++ b/pkg/schedule/labeler/labeler_test.go @@ -31,6 +31,7 @@ import ( "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" "github.com/tikv/pd/pkg/utils/etcdutil" + "github.com/tikv/pd/pkg/utils/testutil" ) func TestAdjustRule(t *testing.T) { @@ -382,8 +383,10 @@ func TestLabelerRuleTTL(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/labeler/regionLabelExpireSub1Minute", "return(true)")) // rule2 should expire and only 2 labels left. - labels := labeler.GetRegionLabels(region) - re.Len(labels, 2) + testutil.Eventually(re, func() bool { + labels := labeler.GetRegionLabels(region) + return len(labels) == 2 + }) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/schedule/labeler/regionLabelExpireSub1Minute")) // rule2 should be existed since `GetRegionLabels` won't clear it physically. diff --git a/pkg/slice/slice_test.go b/pkg/slice/slice_test.go index 1fe3fe79dcf7..5b4f4ca94adc 100644 --- a/pkg/slice/slice_test.go +++ b/pkg/slice/slice_test.go @@ -22,7 +22,6 @@ import ( ) func TestSlice(t *testing.T) { - t.Parallel() re := require.New(t) testCases := []struct { a []int @@ -45,7 +44,7 @@ func TestSlice(t *testing.T) { } func TestSliceContains(t *testing.T) { - t.Parallel() + re := require.New(t) ss := []string{"a", "b", "c"} re.True(slice.Contains(ss, "a")) @@ -61,7 +60,6 @@ func TestSliceContains(t *testing.T) { } func TestSliceRemoveGenericTypes(t *testing.T) { - t.Parallel() re := require.New(t) ss := []string{"a", "b", "c"} ss = slice.Remove(ss, "a") @@ -77,7 +75,6 @@ func TestSliceRemoveGenericTypes(t *testing.T) { } func TestSliceRemove(t *testing.T) { - t.Parallel() re := require.New(t) is := []int64{} diff --git a/pkg/utils/apiutil/apiutil_test.go b/pkg/utils/apiutil/apiutil_test.go index 106d3fb21cbb..aee21621dd23 100644 --- a/pkg/utils/apiutil/apiutil_test.go +++ b/pkg/utils/apiutil/apiutil_test.go @@ -26,7 +26,6 @@ import ( ) func TestJsonRespondErrorOk(t *testing.T) { - t.Parallel() re := require.New(t) rd := render.New(render.Options{ IndentJSON: true, @@ -45,7 +44,6 @@ func TestJsonRespondErrorOk(t *testing.T) { } func TestJsonRespondErrorBadInput(t *testing.T) { - t.Parallel() re := require.New(t) rd := render.New(render.Options{ IndentJSON: true, @@ -71,7 +69,6 @@ func TestJsonRespondErrorBadInput(t *testing.T) { } func TestGetIPPortFromHTTPRequest(t *testing.T) { - t.Parallel() re := require.New(t) testCases := []struct { diff --git a/pkg/utils/assertutil/assertutil_test.go b/pkg/utils/assertutil/assertutil_test.go index 84bd21cef059..076cdd2ac934 100644 --- a/pkg/utils/assertutil/assertutil_test.go +++ b/pkg/utils/assertutil/assertutil_test.go @@ -22,7 +22,6 @@ import ( ) func TestNilFail(t *testing.T) { - t.Parallel() re := require.New(t) var failErr error checker := NewChecker() diff --git a/pkg/utils/etcdutil/etcdutil_test.go b/pkg/utils/etcdutil/etcdutil_test.go index 4fb968959429..7a9cc25a44bc 100644 --- a/pkg/utils/etcdutil/etcdutil_test.go +++ b/pkg/utils/etcdutil/etcdutil_test.go @@ -584,7 +584,7 @@ func (suite *loopWatcherTestSuite) TestWatcherLoadLargeKey() { ctx, cancel := context.WithCancel(suite.ctx) defer cancel() - // create data + // crate data var wg sync.WaitGroup tasks := make(chan int, count) for w := 0; w < 16; w++ { diff --git a/pkg/utils/grpcutil/grpcutil_test.go b/pkg/utils/grpcutil/grpcutil_test.go index 21b7e1a4acbf..2cbff4f3ebc1 100644 --- a/pkg/utils/grpcutil/grpcutil_test.go +++ b/pkg/utils/grpcutil/grpcutil_test.go @@ -37,7 +37,6 @@ func TestToTLSConfig(t *testing.T) { } }() - t.Parallel() re := require.New(t) tlsConfig := TLSConfig{ KeyPath: path.Join(certPath, "pd-server-key.pem"), diff --git a/pkg/utils/jsonutil/jsonutil_test.go b/pkg/utils/jsonutil/jsonutil_test.go index a046fbaf70a4..1e8c21917bab 100644 --- a/pkg/utils/jsonutil/jsonutil_test.go +++ b/pkg/utils/jsonutil/jsonutil_test.go @@ -31,7 +31,6 @@ type testJSONStructLevel2 struct { } func TestJSONUtil(t *testing.T) { - t.Parallel() re := require.New(t) father := &testJSONStructLevel1{ Name: "father", diff --git a/pkg/utils/keyutil/util_test.go b/pkg/utils/keyutil/util_test.go index 374faa1f7973..7bcb0a49c6ff 100644 --- a/pkg/utils/keyutil/util_test.go +++ b/pkg/utils/keyutil/util_test.go @@ -21,7 +21,6 @@ import ( ) func TestKeyUtil(t *testing.T) { - t.Parallel() re := require.New(t) startKey := []byte("a") endKey := []byte("b") diff --git a/pkg/utils/logutil/log_test.go b/pkg/utils/logutil/log_test.go index 7d4be7a88bd9..650ba62fe9d9 100644 --- a/pkg/utils/logutil/log_test.go +++ b/pkg/utils/logutil/log_test.go @@ -23,7 +23,6 @@ import ( ) func TestStringToZapLogLevel(t *testing.T) { - t.Parallel() re := require.New(t) re.Equal(zapcore.FatalLevel, StringToZapLogLevel("fatal")) re.Equal(zapcore.ErrorLevel, StringToZapLogLevel("ERROR")) @@ -35,7 +34,6 @@ func TestStringToZapLogLevel(t *testing.T) { } func TestRedactLog(t *testing.T) { - t.Parallel() re := require.New(t) testCases := []struct { name string diff --git a/pkg/utils/metricutil/metricutil_test.go b/pkg/utils/metricutil/metricutil_test.go index b817eb0112d4..acac9ce4d495 100644 --- a/pkg/utils/metricutil/metricutil_test.go +++ b/pkg/utils/metricutil/metricutil_test.go @@ -23,7 +23,6 @@ import ( ) func TestCamelCaseToSnakeCase(t *testing.T) { - t.Parallel() re := require.New(t) inputs := []struct { name string diff --git a/pkg/utils/netutil/address_test.go b/pkg/utils/netutil/address_test.go index faa3e2e1d04b..127c9a6d0f7d 100644 --- a/pkg/utils/netutil/address_test.go +++ b/pkg/utils/netutil/address_test.go @@ -22,7 +22,6 @@ import ( ) func TestResolveLoopBackAddr(t *testing.T) { - t.Parallel() re := require.New(t) nodes := []struct { address string @@ -40,7 +39,6 @@ func TestResolveLoopBackAddr(t *testing.T) { } func TestIsEnableHttps(t *testing.T) { - t.Parallel() re := require.New(t) re.False(IsEnableHTTPS(http.DefaultClient)) httpClient := &http.Client{ diff --git a/pkg/utils/reflectutil/tag_test.go b/pkg/utils/reflectutil/tag_test.go index f613f1f81b67..3e49e093912b 100644 --- a/pkg/utils/reflectutil/tag_test.go +++ b/pkg/utils/reflectutil/tag_test.go @@ -35,7 +35,6 @@ type testStruct3 struct { } func TestFindJSONFullTagByChildTag(t *testing.T) { - t.Parallel() re := require.New(t) key := "enable" result := FindJSONFullTagByChildTag(reflect.TypeOf(testStruct1{}), key) @@ -51,7 +50,6 @@ func TestFindJSONFullTagByChildTag(t *testing.T) { } func TestFindSameFieldByJSON(t *testing.T) { - t.Parallel() re := require.New(t) input := map[string]any{ "name": "test2", @@ -65,7 +63,6 @@ func TestFindSameFieldByJSON(t *testing.T) { } func TestFindFieldByJSONTag(t *testing.T) { - t.Parallel() re := require.New(t) t1 := testStruct1{} t2 := testStruct2{} diff --git a/pkg/utils/requestutil/context_test.go b/pkg/utils/requestutil/context_test.go index 298fc1ff8a34..e6bdcd7be46f 100644 --- a/pkg/utils/requestutil/context_test.go +++ b/pkg/utils/requestutil/context_test.go @@ -24,7 +24,6 @@ import ( ) func TestRequestInfo(t *testing.T) { - t.Parallel() re := require.New(t) ctx := context.Background() _, ok := RequestInfoFrom(ctx) @@ -53,7 +52,6 @@ func TestRequestInfo(t *testing.T) { } func TestEndTime(t *testing.T) { - t.Parallel() re := require.New(t) ctx := context.Background() _, ok := EndTimeFrom(ctx) diff --git a/pkg/utils/typeutil/comparison_test.go b/pkg/utils/typeutil/comparison_test.go index b296405b3d51..b53e961b4eea 100644 --- a/pkg/utils/typeutil/comparison_test.go +++ b/pkg/utils/typeutil/comparison_test.go @@ -23,7 +23,6 @@ import ( ) func TestMinUint64(t *testing.T) { - t.Parallel() re := require.New(t) re.Equal(uint64(1), MinUint64(1, 2)) re.Equal(uint64(1), MinUint64(2, 1)) @@ -31,7 +30,6 @@ func TestMinUint64(t *testing.T) { } func TestMaxUint64(t *testing.T) { - t.Parallel() re := require.New(t) re.Equal(uint64(2), MaxUint64(1, 2)) re.Equal(uint64(2), MaxUint64(2, 1)) @@ -39,7 +37,6 @@ func TestMaxUint64(t *testing.T) { } func TestMinDuration(t *testing.T) { - t.Parallel() re := require.New(t) re.Equal(time.Second, MinDuration(time.Minute, time.Second)) re.Equal(time.Second, MinDuration(time.Second, time.Minute)) @@ -47,7 +44,6 @@ func TestMinDuration(t *testing.T) { } func TestEqualFloat(t *testing.T) { - t.Parallel() re := require.New(t) f1 := rand.Float64() re.True(Float64Equal(f1, f1*1.000)) @@ -55,7 +51,6 @@ func TestEqualFloat(t *testing.T) { } func TestAreStringSlicesEquivalent(t *testing.T) { - t.Parallel() re := require.New(t) re.True(AreStringSlicesEquivalent(nil, nil)) re.True(AreStringSlicesEquivalent([]string{}, nil)) diff --git a/pkg/utils/typeutil/conversion_test.go b/pkg/utils/typeutil/conversion_test.go index 14b2f9dea522..7b17cfcbe2c1 100644 --- a/pkg/utils/typeutil/conversion_test.go +++ b/pkg/utils/typeutil/conversion_test.go @@ -23,7 +23,6 @@ import ( ) func TestBytesToUint64(t *testing.T) { - t.Parallel() re := require.New(t) str := "\x00\x00\x00\x00\x00\x00\x03\xe8" a, err := BytesToUint64([]byte(str)) @@ -32,7 +31,6 @@ func TestBytesToUint64(t *testing.T) { } func TestUint64ToBytes(t *testing.T) { - t.Parallel() re := require.New(t) var a uint64 = 1000 b := Uint64ToBytes(a) @@ -41,7 +39,6 @@ func TestUint64ToBytes(t *testing.T) { } func TestJSONToUint64Slice(t *testing.T) { - t.Parallel() re := require.New(t) type testArray struct { Array []uint64 `json:"array"` diff --git a/pkg/utils/typeutil/duration_test.go b/pkg/utils/typeutil/duration_test.go index cff7c3cd66cc..42db0be8cdc7 100644 --- a/pkg/utils/typeutil/duration_test.go +++ b/pkg/utils/typeutil/duration_test.go @@ -27,7 +27,6 @@ type example struct { } func TestDurationJSON(t *testing.T) { - t.Parallel() re := require.New(t) example := &example{} @@ -41,7 +40,6 @@ func TestDurationJSON(t *testing.T) { } func TestDurationTOML(t *testing.T) { - t.Parallel() re := require.New(t) example := &example{} diff --git a/pkg/utils/typeutil/size_test.go b/pkg/utils/typeutil/size_test.go index 57c246953e42..f00a584cb17e 100644 --- a/pkg/utils/typeutil/size_test.go +++ b/pkg/utils/typeutil/size_test.go @@ -23,7 +23,7 @@ import ( ) func TestSizeJSON(t *testing.T) { - t.Parallel() + re := require.New(t) b := ByteSize(265421587) o, err := json.Marshal(b) @@ -40,7 +40,6 @@ func TestSizeJSON(t *testing.T) { } func TestParseMbFromText(t *testing.T) { - t.Parallel() re := require.New(t) testCases := []struct { body []string diff --git a/pkg/utils/typeutil/string_slice_test.go b/pkg/utils/typeutil/string_slice_test.go index 9a197eb68e44..9177cee0eb90 100644 --- a/pkg/utils/typeutil/string_slice_test.go +++ b/pkg/utils/typeutil/string_slice_test.go @@ -22,7 +22,6 @@ import ( ) func TestStringSliceJSON(t *testing.T) { - t.Parallel() re := require.New(t) b := StringSlice([]string{"zone", "rack"}) o, err := json.Marshal(b) @@ -36,7 +35,6 @@ func TestStringSliceJSON(t *testing.T) { } func TestEmpty(t *testing.T) { - t.Parallel() re := require.New(t) ss := StringSlice([]string{}) b, err := json.Marshal(ss) diff --git a/pkg/utils/typeutil/time_test.go b/pkg/utils/typeutil/time_test.go index 7a5baf55afad..b8078f63fa8c 100644 --- a/pkg/utils/typeutil/time_test.go +++ b/pkg/utils/typeutil/time_test.go @@ -23,7 +23,6 @@ import ( ) func TestParseTimestamp(t *testing.T) { - t.Parallel() re := require.New(t) for i := 0; i < 3; i++ { t := time.Now().Add(time.Second * time.Duration(rand.Int31n(1000))) @@ -39,7 +38,6 @@ func TestParseTimestamp(t *testing.T) { } func TestSubTimeByWallClock(t *testing.T) { - t.Parallel() re := require.New(t) for i := 0; i < 100; i++ { r := rand.Int63n(1000) @@ -63,7 +61,6 @@ func TestSubTimeByWallClock(t *testing.T) { } func TestSmallTimeDifference(t *testing.T) { - t.Parallel() re := require.New(t) t1, err := time.Parse("2006-01-02 15:04:05.999", "2021-04-26 00:44:25.682") re.NoError(err) diff --git a/server/api/health_test.go b/server/api/health_test.go index 6d2caec12cd7..89a9627bc377 100644 --- a/server/api/health_test.go +++ b/server/api/health_test.go @@ -26,7 +26,7 @@ import ( ) func checkSliceResponse(re *require.Assertions, body []byte, cfgs []*config.Config, unhealthy string) { - got := []Health{} + var got []Health re.NoError(json.Unmarshal(body, &got)) re.Len(cfgs, len(got)) diff --git a/server/api/member_test.go b/server/api/member_test.go index 65c0ff673608..5d692cda7d92 100644 --- a/server/api/member_test.go +++ b/server/api/member_test.go @@ -158,26 +158,7 @@ func (suite *memberTestSuite) changeLeaderPeerUrls(leader *pdpb.Member, id uint6 resp.Body.Close() } -type resignTestSuite struct { - suite.Suite - cfgs []*config.Config - servers []*server.Server - clean testutil.CleanupFunc -} - -func TestResignTestSuite(t *testing.T) { - suite.Run(t, new(resignTestSuite)) -} - -func (suite *resignTestSuite) SetupSuite() { - suite.cfgs, suite.servers, suite.clean = mustNewCluster(suite.Require(), 1) -} - -func (suite *resignTestSuite) TearDownSuite() { - suite.clean() -} - -func (suite *resignTestSuite) TestResignMyself() { +func (suite *memberTestSuite) TestResignMyself() { re := suite.Require() addr := suite.cfgs[0].ClientUrls + apiPrefix + "/api/v1/leader/resign" resp, err := testDialClient.Post(addr, "", nil) diff --git a/server/server_test.go b/server/server_test.go index 32f5d0646bc7..b2b15962fdcb 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -88,7 +88,7 @@ func (suite *leaderServerTestSuite) TearDownSuite() { } } -func (suite *leaderServerTestSuite) newTestServersWithCfgs( +func newTestServersWithCfgs( ctx context.Context, cfgs []*config.Config, re *require.Assertions, @@ -135,52 +135,6 @@ func (suite *leaderServerTestSuite) newTestServersWithCfgs( return svrs, cleanup } -func (suite *leaderServerTestSuite) TestCheckClusterID() { - re := suite.Require() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - cfgs := NewTestMultiConfig(assertutil.CheckerWithNilAssert(re), 2) - for i, cfg := range cfgs { - cfg.DataDir = fmt.Sprintf("/tmp/test_pd_check_clusterID_%d", i) - // Clean up before testing. - testutil.CleanServer(cfg.DataDir) - } - originInitial := cfgs[0].InitialCluster - for _, cfg := range cfgs { - cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, cfg.PeerUrls) - } - - cfgA, cfgB := cfgs[0], cfgs[1] - // Start a standalone cluster. - svrsA, cleanA := suite.newTestServersWithCfgs(ctx, []*config.Config{cfgA}, re) - defer cleanA() - // Close it. - for _, svr := range svrsA { - svr.Close() - } - - // Start another cluster. - _, cleanB := suite.newTestServersWithCfgs(ctx, []*config.Config{cfgB}, re) - defer cleanB() - - // Start previous cluster, expect an error. - cfgA.InitialCluster = originInitial - mockHandler := CreateMockHandler(re, "127.0.0.1") - svr, err := CreateServer(ctx, cfgA, nil, mockHandler) - re.NoError(err) - - etcd, err := embed.StartEtcd(svr.etcdCfg) - re.NoError(err) - urlsMap, err := types.NewURLsMap(svr.cfg.InitialCluster) - re.NoError(err) - tlsConfig, err := svr.cfg.Security.ToTLSConfig() - re.NoError(err) - err = etcdutil.CheckClusterID(etcd.Server.Cluster().ID(), urlsMap, tlsConfig) - re.Error(err) - etcd.Close() - testutil.CleanServer(cfgA.DataDir) -} - func (suite *leaderServerTestSuite) TestRegisterServerHandler() { re := suite.Require() cfg := NewTestSingleConfig(assertutil.CheckerWithNilAssert(re)) @@ -330,3 +284,49 @@ func TestIsPathInDirectory(t *testing.T) { path = filepath.Join(directory, fileName) re.False(isPathInDirectory(path, directory)) } + +func TestCheckClusterID(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfgs := NewTestMultiConfig(assertutil.CheckerWithNilAssert(re), 2) + for i, cfg := range cfgs { + cfg.DataDir = fmt.Sprintf("/tmp/test_pd_check_clusterID_%d", i) + // Clean up before testing. + testutil.CleanServer(cfg.DataDir) + } + originInitial := cfgs[0].InitialCluster + for _, cfg := range cfgs { + cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, cfg.PeerUrls) + } + + cfgA, cfgB := cfgs[0], cfgs[1] + // Start a standalone cluster. + svrsA, cleanA := newTestServersWithCfgs(ctx, []*config.Config{cfgA}, re) + defer cleanA() + // Close it. + for _, svr := range svrsA { + svr.Close() + } + + // Start another cluster. + _, cleanB := newTestServersWithCfgs(ctx, []*config.Config{cfgB}, re) + defer cleanB() + + // Start previous cluster, expect an error. + cfgA.InitialCluster = originInitial + mockHandler := CreateMockHandler(re, "127.0.0.1") + svr, err := CreateServer(ctx, cfgA, nil, mockHandler) + re.NoError(err) + + etcd, err := embed.StartEtcd(svr.etcdCfg) + re.NoError(err) + urlsMap, err := types.NewURLsMap(svr.cfg.InitialCluster) + re.NoError(err) + tlsConfig, err := svr.cfg.Security.ToTLSConfig() + re.NoError(err) + err = etcdutil.CheckClusterID(etcd.Server.Cluster().ID(), urlsMap, tlsConfig) + re.Error(err) + etcd.Close() + testutil.CleanServer(cfgA.DataDir) +} diff --git a/tests/integrations/mcs/scheduling/watcher_test.go b/tests/integrations/mcs/scheduling/watcher_test.go new file mode 100644 index 000000000000..d6d2ada357e7 --- /dev/null +++ b/tests/integrations/mcs/scheduling/watcher_test.go @@ -0,0 +1,103 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduling + +import ( + "context" + "encoding/json" + "github.com/tikv/pd/pkg/mcs/scheduling/server/rule" + "os" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tikv/pd/pkg/keyspace" + "github.com/tikv/pd/pkg/schedule/labeler" + "github.com/tikv/pd/pkg/storage/endpoint" + "github.com/tikv/pd/pkg/storage/kv" + "github.com/tikv/pd/pkg/utils/etcdutil" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/embed" +) + +const ( + clusterID = uint64(20240117) + rulesNum = 16384 +) + +func TestLoadLargeRules(t *testing.T) { + re := require.New(t) + ctx, client, clean := prepare(t) + defer clean() + runWatcherLoadLabelRule(ctx, re, client) +} + +func BenchmarkLoadLargeRules(b *testing.B) { + re := require.New(b) + ctx, client, clean := prepare(b) + defer clean() + + b.ResetTimer() // Resets the timer to ignore initialization time in the benchmark + + for n := 0; n < b.N; n++ { + runWatcherLoadLabelRule(ctx, re, client) + } +} + +func runWatcherLoadLabelRule(ctx context.Context, re *require.Assertions, client *clientv3.Client) { + storage := endpoint.NewStorageEndpoint(kv.NewMemoryKV(), nil) + labelerManager, err := labeler.NewRegionLabeler(ctx, storage, time.Hour) + re.NoError(err) + ctx, cancel := context.WithCancel(ctx) + err = rule.NewWatcherForTest(ctx, client, clusterID, storage, nil, labelerManager) + re.NoError(err) + re.Len(labelerManager.GetAllLabelRules(), rulesNum) + cancel() +} + +func prepare(t require.TestingT) (context.Context, *clientv3.Client, func()) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + cfg := etcdutil.NewTestSingleConfig() + cfg.Dir = os.TempDir() + "/test_etcd" + os.RemoveAll(cfg.Dir) + etcd, err := embed.StartEtcd(cfg) + re.NoError(err) + client, err := etcdutil.CreateEtcdClient(nil, cfg.ListenClientUrls) + re.NoError(err) + <-etcd.Server.ReadyNotify() + + for i := 1; i < rulesNum+1; i++ { + rule := &labeler.LabelRule{ + ID: "test_" + strconv.Itoa(i), + Labels: []labeler.RegionLabel{{Key: "test", Value: "test"}}, + RuleType: labeler.KeyRange, + Data: keyspace.MakeKeyRanges(uint32(i)), + } + value, err := json.Marshal(rule) + re.NoError(err) + key := endpoint.RegionLabelPathPrefix(clusterID) + "/" + rule.ID + _, err = clientv3.NewKV(client).Put(ctx, key, string(value)) + re.NoError(err) + } + + return ctx, client, func() { + cancel() + client.Close() + etcd.Close() + os.RemoveAll(cfg.Dir) + } +} diff --git a/tools/go.mod b/tools/go.mod index 077f93777289..ea6f57b96e3e 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -31,9 +31,11 @@ require ( github.com/tikv/pd v0.0.0-00010101000000-000000000000 github.com/tikv/pd/client v0.0.0-00010101000000-000000000000 go.etcd.io/etcd v0.5.0-alpha.5.0.20240131130919-ff2304879e1b + go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.2.0 go.uber.org/zap v1.26.0 golang.org/x/text v0.14.0 + golang.org/x/tools v0.14.0 google.golang.org/grpc v1.59.0 ) @@ -172,7 +174,6 @@ require ( golang.org/x/sync v0.4.0 // indirect golang.org/x/sys v0.16.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.14.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect diff --git a/tools/go.sum b/tools/go.sum index 6b46e5bfbfb6..9432114ee974 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -416,6 +416,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= @@ -565,6 +567,8 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/dig v1.9.0 h1:pJTDXKEhRqBI8W7rU7kwT5EgyRZuSMVSFcZolOvKK9U= go.uber.org/dig v1.9.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= go.uber.org/fx v1.12.0 h1:+1+3Cz9M0dFMPy9SW9XUIUHye8bnPUm7q7DroNGWYG4= diff --git a/tools/pd-ut/main.go b/tools/pd-ut/main.go new file mode 100644 index 000000000000..111522fbddf6 --- /dev/null +++ b/tools/pd-ut/main.go @@ -0,0 +1,1068 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bufio" + "bytes" + "encoding/xml" + "fmt" + "io" + "log" + "math/rand" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "runtime" + "sort" + "strings" + "sync" + "time" + + // Set the correct value when it runs inside docker. + _ "go.uber.org/automaxprocs" + "golang.org/x/tools/cover" +) + +func usage() bool { + msg := `// run all tests +ut + +// show usage +ut -h + +// list all packages +ut list + +// list test cases of a single package +ut list $package + +// list test cases that match a pattern +ut list $package 'r:$regex' + +// run all tests +ut run + +// run test all cases of a single package +ut run $package + +// run test cases of a single package +ut run $package $test + +// run test cases that match a pattern +ut run $package 'r:$regex' + +// build all test package +ut build + +// build a test package +ut build xxx + +// write the junitfile +ut run --junitfile xxx + +// test with race flag +ut run --race + +// test with test.short flag +ut run --short` + + fmt.Println(msg) + return true +} + +const modulePath = "github.com/tikv/pd" + +type task struct { + pkg string + test string +} + +func (t *task) String() string { + return t.pkg + " " + t.test +} + +var p int +var buildParallel int +var workDir string + +func cmdList(args ...string) bool { + pkgs, err := listPackages() + if err != nil { + log.Println("list package error", err) + return false + } + + // list all packages + if len(args) == 0 { + for _, pkg := range pkgs { + fmt.Println(pkg) + } + return false + } + + // list test case of a single package + if len(args) == 1 || len(args) == 2 { + pkg := args[0] + pkgs = filter(pkgs, func(s string) bool { return s == pkg }) + if len(pkgs) != 1 { + fmt.Println("package not exist", pkg) + return false + } + + err := buildTestBinary(pkg) + if err != nil { + log.Println("build package error", pkg, err) + return false + } + exist, err := testBinaryExist(pkg) + if err != nil { + log.Println("check test binary existence error", err) + return false + } + if !exist { + fmt.Println("no test case in ", pkg) + return false + } + + res, err := listTestCases(pkg, nil) + if err != nil { + log.Println("list test cases for package error", err) + return false + } + + if len(args) == 2 { + res, err = filterTestCases(res, args[1]) + if err != nil { + log.Println("filter test cases error", err) + return false + } + } + + for _, x := range res { + fmt.Println(x.test) + } + } + return true +} + +func cmdBuild(args ...string) bool { + pkgs, err := listPackages() + if err != nil { + log.Println("list package error", err) + return false + } + + // build all packages + if len(args) == 0 { + err := buildTestBinaryMulti(pkgs) + if err != nil { + fmt.Println("build package error", pkgs, err) + return false + } + return true + } + + // build test binary of a single package + if len(args) >= 1 { + pkg := args[0] + err := buildTestBinary(pkg) + if err != nil { + log.Println("build package error", pkg, err) + return false + } + } + return true +} + +func cmdRun(args ...string) bool { + var err error + pkgs, err := listPackages() + if err != nil { + fmt.Println("list packages error", err) + return false + } + tasks := make([]task, 0, 5000) + start := time.Now() + // run all tests + if len(args) == 0 { + err := buildTestBinaryMulti(pkgs) + if err != nil { + fmt.Println("build package error", pkgs, err) + return false + } + + for _, pkg := range pkgs { + exist, err := testBinaryExist(pkg) + if err != nil { + fmt.Println("check test binary existance error", err) + return false + } + if !exist { + fmt.Println("no test case in ", pkg) + continue + } + + tasks, err = listTestCases(pkg, tasks) + if err != nil { + fmt.Println("list test cases error", err) + return false + } + } + } + + // run tests for a single package + if len(args) == 1 { + println("start args 1") + pkg := args[0] + err := buildTestBinary(pkg) + if err != nil { + log.Println("build package error", pkg, err) + return false + } + exist, err := testBinaryExist(pkg) + if err != nil { + log.Println("check test binary existence error", err) + return false + } + + if !exist { + fmt.Println("no test case in ", pkg) + return false + } + tasks, err = listTestCases(pkg, tasks) + if err != nil { + log.Println("list test cases error", err) + return false + } + } + + // run a single test + if len(args) == 2 { + println("start args 2") + pkg := args[0] + err := buildTestBinary(pkg) + if err != nil { + log.Println("build package error", pkg, err) + return false + } + exist, err := testBinaryExist(pkg) + if err != nil { + log.Println("check test binary existence error", err) + return false + } + if !exist { + fmt.Println("no test case in ", pkg) + return false + } + + tasks, err = listTestCases(pkg, tasks) + if err != nil { + log.Println("list test cases error", err) + return false + } + tasks, err = filterTestCases(tasks, args[1]) + if err != nil { + log.Println("filter test cases error", err) + return false + } + } + + if except != "" { + list, err := parseCaseListFromFile(except) + if err != nil { + log.Println("parse --except file error", err) + return false + } + tmp := tasks[:0] + for _, task := range tasks { + if _, ok := list[task.String()]; !ok { + tmp = append(tmp, task) + } + } + tasks = tmp + } + + if only != "" { + list, err := parseCaseListFromFile(only) + if err != nil { + log.Println("parse --only file error", err) + return false + } + tmp := tasks[:0] + for _, task := range tasks { + if _, ok := list[task.String()]; ok { + tmp = append(tmp, task) + } + } + tasks = tmp + } + + fmt.Printf("building task finish, parallelism=%d, count=%d, takes=%v\n", buildParallel, len(tasks), time.Since(start)) + + taskCh := make(chan task, 100) + works := make([]numa, p) + var wg sync.WaitGroup + for i := 0; i < p; i++ { + wg.Add(1) + go works[i].worker(&wg, taskCh) + } + + shuffle(tasks) + + start = time.Now() + for _, task := range tasks { + taskCh <- task + } + close(taskCh) + wg.Wait() + fmt.Println("run all tasks takes", time.Since(start)) + + if junitfile != "" { + out := collectTestResults(works) + f, err := os.Create(junitfile) + if err != nil { + fmt.Println("create junit file fail:", err) + return false + } + if err := write(f, out); err != nil { + fmt.Println("write junit file error:", err) + return false + } + } + + if coverprofile != "" { + collectCoverProfileFile() + } + + for _, work := range works { + if work.Fail { + return false + } + } + return true +} + +func parseCaseListFromFile(fileName string) (map[string]struct{}, error) { + ret := make(map[string]struct{}) + + f, err := os.Open(filepath.Clean(fileName)) + if os.IsNotExist(err) { + return ret, nil + } + if err != nil { + return nil, withTrace(err) + } + //nolint: errcheck + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + line := s.Bytes() + ret[string(line)] = struct{}{} + } + if err := s.Err(); err != nil { + return nil, withTrace(err) + } + return ret, nil +} + +// handleFlags strip the '--flag xxx' from the command line os.Args +// Example of the os.Args changes +// Before: ut run sessoin TestXXX --coverprofile xxx --junitfile yyy +// After: ut run session TestXXX +// The value of the flag is returned. +func handleFlags(flag string) string { + var res string + tmp := os.Args[:0] + // Iter to the flag + var i int + for ; i < len(os.Args); i++ { + if os.Args[i] == flag { + i++ + break + } + tmp = append(tmp, os.Args[i]) + } + // Handle the flag + if i < len(os.Args) { + res = os.Args[i] + i++ + } + // Iter the remain flags + for ; i < len(os.Args); i++ { + tmp = append(tmp, os.Args[i]) + } + + // os.Args is now the original flags with '--coverprofile XXX' removed. + os.Args = tmp + return res +} + +func handleFlag(f string) (found bool) { + tmp := os.Args[:0] + for i := 0; i < len(os.Args); i++ { + if os.Args[i] == f { + found = true + continue + } + tmp = append(tmp, os.Args[i]) + } + os.Args = tmp + return +} + +var junitfile string +var coverprofile string +var coverFileTempDir string +var race bool +var short bool + +var except string +var only string + +//nolint:typecheck +func main() { + junitfile = handleFlags("--junitfile") + coverprofile = handleFlags("--coverprofile") + except = handleFlags("--except") + only = handleFlags("--only") + race = handleFlag("--race") + short = handleFlag("--short") + + if coverprofile != "" { + var err error + coverFileTempDir, err = os.MkdirTemp(os.TempDir(), "cov") + if err != nil { + fmt.Println("create temp dir fail", coverFileTempDir) + os.Exit(1) + } + defer os.Remove(coverFileTempDir) + } + + // Get the correct count of CPU if it's in docker. + p = runtime.GOMAXPROCS(0) + // We use 2 * p for `go build` to make it faster. + buildParallel = p * 2 + var err error + workDir, err = os.Getwd() + if err != nil { + fmt.Println("os.Getwd() error", err) + } + + var isSucceed bool + if len(os.Args) == 1 { + // run all tests + isSucceed = cmdRun() + } + + if len(os.Args) >= 2 { + switch os.Args[1] { + case "list": + isSucceed = cmdList(os.Args[2:]...) + case "build": + isSucceed = cmdBuild(os.Args[2:]...) + case "run": + isSucceed = cmdRun(os.Args[2:]...) + default: + isSucceed = usage() + } + } + if !isSucceed { + os.Exit(1) + } +} + +func collectCoverProfileFile() { + // Combine all the cover file of single test function into a whole. + files, err := os.ReadDir(coverFileTempDir) + if err != nil { + fmt.Println("collect cover file error:", err) + os.Exit(-1) + } + + w, err := os.Create(coverprofile) + if err != nil { + fmt.Println("create cover file error:", err) + os.Exit(-1) + } + //nolint: errcheck + defer w.Close() + w.WriteString("mode: set\n") + + result := make(map[string]*cover.Profile) + for _, file := range files { + if file.IsDir() { + continue + } + collectOneCoverProfileFile(result, file) + } + + w1 := bufio.NewWriter(w) + for _, prof := range result { + for _, block := range prof.Blocks { + fmt.Fprintf(w1, "%s:%d.%d,%d.%d %d %d\n", + prof.FileName, + block.StartLine, + block.StartCol, + block.EndLine, + block.EndCol, + block.NumStmt, + block.Count, + ) + } + if err := w1.Flush(); err != nil { + fmt.Println("flush data to cover profile file error:", err) + os.Exit(-1) + } + } +} + +func collectOneCoverProfileFile(result map[string]*cover.Profile, file os.DirEntry) { + f, err := os.Open(path.Join(coverFileTempDir, file.Name())) + if err != nil { + fmt.Println("open temp cover file error:", err) + os.Exit(-1) + } + //nolint: errcheck + defer f.Close() + + profs, err := cover.ParseProfilesFromReader(f) + if err != nil { + fmt.Println("parse cover profile file error:", err) + os.Exit(-1) + } + mergeProfile(result, profs) +} + +func mergeProfile(m map[string]*cover.Profile, profs []*cover.Profile) { + for _, prof := range profs { + sort.Sort(blocksByStart(prof.Blocks)) + old, ok := m[prof.FileName] + if !ok { + m[prof.FileName] = prof + continue + } + + // Merge samples from the same location. + // The data has already been sorted. + tmp := old.Blocks[:0] + var i, j int + for i < len(old.Blocks) && j < len(prof.Blocks) { + v1 := old.Blocks[i] + v2 := prof.Blocks[j] + + switch compareProfileBlock(v1, v2) { + case -1: + tmp = appendWithReduce(tmp, v1) + i++ + case 1: + tmp = appendWithReduce(tmp, v2) + j++ + default: + tmp = appendWithReduce(tmp, v1) + tmp = appendWithReduce(tmp, v2) + i++ + j++ + } + } + for ; i < len(old.Blocks); i++ { + tmp = appendWithReduce(tmp, old.Blocks[i]) + } + for ; j < len(prof.Blocks); j++ { + tmp = appendWithReduce(tmp, prof.Blocks[j]) + } + + m[prof.FileName] = old + } +} + +// appendWithReduce works like append(), but it merge the duplicated values. +func appendWithReduce(input []cover.ProfileBlock, b cover.ProfileBlock) []cover.ProfileBlock { + if len(input) >= 1 { + last := &input[len(input)-1] + if b.StartLine == last.StartLine && + b.StartCol == last.StartCol && + b.EndLine == last.EndLine && + b.EndCol == last.EndCol { + if b.NumStmt != last.NumStmt { + panic(fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt)) + } + // Merge the data with the last one of the slice. + last.Count |= b.Count + return input + } + } + return append(input, b) +} + +type blocksByStart []cover.ProfileBlock + +func compareProfileBlock(x, y cover.ProfileBlock) int { + if x.StartLine < y.StartLine { + return -1 + } + if x.StartLine > y.StartLine { + return 1 + } + + // Now x.StartLine == y.StartLine + if x.StartCol < y.StartCol { + return -1 + } + if x.StartCol > y.StartCol { + return 1 + } + + return 0 +} + +func (b blocksByStart) Len() int { return len(b) } +func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b blocksByStart) Less(i, j int) bool { + bi, bj := b[i], b[j] + return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol +} + +func listTestCases(pkg string, tasks []task) ([]task, error) { + newCases, err := listNewTestCases(pkg) + if err != nil { + log.Println("list test case error", pkg, err) + return nil, withTrace(err) + } + for _, c := range newCases { + tasks = append(tasks, task{pkg, c}) + } + + return tasks, nil +} + +func filterTestCases(tasks []task, arg1 string) ([]task, error) { + if strings.HasPrefix(arg1, "r:") { + r, err := regexp.Compile(arg1[2:]) + if err != nil { + return nil, err + } + tmp := tasks[:0] + for _, task := range tasks { + if r.MatchString(task.test) { + tmp = append(tmp, task) + } + } + return tmp, nil + } + tmp := tasks[:0] + for _, task := range tasks { + if strings.Contains(task.test, arg1) { + tmp = append(tmp, task) + } + } + return tmp, nil +} + +func listPackages() ([]string, error) { + cmd := exec.Command("go", "list", "./...") + ss, err := cmdToLines(cmd) + if err != nil { + return nil, withTrace(err) + } + + ret := ss[:0] + for _, s := range ss { + if !strings.HasPrefix(s, modulePath) { + continue + } + pkg := s[len(modulePath)+1:] + if skipDIR(pkg) { + continue + } + ret = append(ret, pkg) + } + return ret, nil +} + +type numa struct { + Fail bool + results []testResult +} + +func (n *numa) worker(wg *sync.WaitGroup, ch chan task) { + defer wg.Done() + for t := range ch { + res := n.runTestCase(t.pkg, t.test) + if res.Failure != nil { + fmt.Println("[FAIL] ", t.pkg, t.test) + fmt.Fprintf(os.Stderr, "err=%s\n%s", res.err.Error(), res.Failure.Contents) + n.Fail = true + } + n.results = append(n.results, res) + } +} + +type testResult struct { + JUnitTestCase + d time.Duration + err error +} + +func (n *numa) runTestCase(pkg string, fn string) testResult { + res := testResult{ + JUnitTestCase: JUnitTestCase{ + Classname: path.Join(modulePath, pkg), + Name: fn, + }, + } + + var buf bytes.Buffer + var err error + var start time.Time + for i := 0; i < 3; i++ { + cmd := n.testCommand(pkg, fn) + cmd.Dir = path.Join(workDir, pkg) + // Combine the test case output, so the run result for failed cases can be displayed. + cmd.Stdout = &buf + cmd.Stderr = &buf + + start = time.Now() + err = cmd.Run() + if err != nil { + //lint:ignore S1020 + if _, ok := err.(*exec.ExitError); ok { + // Retry 3 times to get rid of the weird error: + switch err.Error() { + case "signal: segmentation fault (core dumped)": + buf.Reset() + continue + case "signal: trace/breakpoint trap (core dumped)": + buf.Reset() + continue + } + if strings.Contains(buf.String(), "panic during panic") { + buf.Reset() + continue + } + } + } + break + } + if err != nil { + res.Failure = &JUnitFailure{ + Message: "Failed", + Contents: buf.String(), + } + res.err = err + } + + res.d = time.Since(start) + res.Time = formatDurationAsSeconds(res.d) + return res +} + +func collectTestResults(workers []numa) JUnitTestSuites { + version := goVersion() + // pkg => test cases + pkgs := make(map[string][]JUnitTestCase) + durations := make(map[string]time.Duration) + + // The test result in workers are shuffled, so group by the packages here + for _, n := range workers { + for _, res := range n.results { + cases, ok := pkgs[res.Classname] + if !ok { + cases = make([]JUnitTestCase, 0, 10) + } + cases = append(cases, res.JUnitTestCase) + pkgs[res.Classname] = cases + durations[res.Classname] = durations[res.Classname] + res.d + } + } + + suites := JUnitTestSuites{} + // Turn every package result to a suite. + for pkg, cases := range pkgs { + suite := JUnitTestSuite{ + Tests: len(cases), + Failures: failureCases(cases), + Time: formatDurationAsSeconds(durations[pkg]), + Name: pkg, + Properties: packageProperties(version), + TestCases: cases, + } + suites.Suites = append(suites.Suites, suite) + } + return suites +} + +func failureCases(input []JUnitTestCase) int { + sum := 0 + for _, v := range input { + if v.Failure != nil { + sum++ + } + } + return sum +} + +func (n *numa) testCommand(pkg string, fn string) *exec.Cmd { + args := make([]string, 0, 10) + exe := "./" + testFileName(pkg) + if coverprofile != "" { + fileName := strings.ReplaceAll(pkg, "/", "_") + "." + fn + tmpFile := path.Join(coverFileTempDir, fileName) + args = append(args, "-test.coverprofile", tmpFile) + } + args = append(args, "-test.cpu", "1") + if !race { + args = append(args, []string{"-test.timeout", "5m"}...) + } else { + // it takes a longer when race is enabled. so it is set more timeout value. + args = append(args, []string{"-test.timeout", "30m"}...) + } + + // session.test -test.run TestClusteredPrefixColum + args = append(args, "-test.run", "^"+fn+"$") + + return exec.Command(exe, args...) +} + +func skipDIR(pkg string) bool { + skipDir := []string{"tests", "tools", "bin", "cmd"} + for _, ignore := range skipDir { + if strings.HasPrefix(pkg, ignore) { + return true + } + } + return false +} + +// buildTestBinaryMulti is much faster than build the test packages one by one. +func buildTestBinaryMulti(pkgs []string) error { + // go test --exec=xprog -cover -vet=off --count=0 $(pkgs) + xprogPath := path.Join(workDir, "bin/xprog") + packages := make([]string, 0, len(pkgs)) + for _, pkg := range pkgs { + packages = append(packages, path.Join(modulePath, pkg)) + } + + var cmd *exec.Cmd + cmd = exec.Command("go", "test", "--exec", xprogPath, "-vet", "off", "--tags=tso_function_test,deadlock", "-count", "0") + cmd.Args = append(cmd.Args, packages...) + cmd.Dir = workDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return withTrace(err) + } + return nil +} + +func buildTestBinary(pkg string) error { + // go test -c + cmd := exec.Command("go", "test", "-c", "-vet", "off", "--tags=tso_function_test,deadlock", "-o", testFileName(pkg), "-v") + if coverprofile != "" { + cmd.Args = append(cmd.Args, "-cover") + } + if race { + cmd.Args = append(cmd.Args, "-race") + } + if short { + cmd.Args = append(cmd.Args, "--test.short") + } + cmd.Dir = path.Join(workDir, pkg) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return withTrace(err) + } + return nil +} + +func testBinaryExist(pkg string) (bool, error) { + _, err := os.Stat(testFileFullPath(pkg)) + if err != nil { + //lint:ignore S1020 + if _, ok := err.(*os.PathError); ok { + return false, nil + } + } + return true, withTrace(err) +} + +func testFileName(pkg string) string { + _, file := path.Split(pkg) + return file + ".test.bin" +} + +func testFileFullPath(pkg string) string { + return path.Join(workDir, pkg, testFileName(pkg)) +} + +func listNewTestCases(pkg string) ([]string, error) { + exe := "./" + testFileName(pkg) + + // session.test -test.list Test + cmd := exec.Command(exe, "-test.list", "Test") + cmd.Dir = path.Join(workDir, pkg) + var buf bytes.Buffer + cmd.Stdout = &buf + err := cmd.Run() + res := strings.Split(buf.String(), "\n") + if err != nil && len(res) == 0 { + fmt.Println("err ==", err) + } + return filter(res, func(s string) bool { + return strings.HasPrefix(s, "Test") && s != "TestT" && s != "TestBenchDaily" + }), nil +} + +func cmdToLines(cmd *exec.Cmd) ([]string, error) { + res, err := cmd.Output() + if err != nil { + return nil, withTrace(err) + } + ss := bytes.Split(res, []byte{'\n'}) + ret := make([]string, len(ss)) + for i, s := range ss { + ret[i] = string(s) + } + return ret, nil +} + +func filter(input []string, f func(string) bool) []string { + ret := input[:0] + for _, s := range input { + if f(s) { + ret = append(ret, s) + } + } + return ret +} + +func shuffle(tasks []task) { + for i := 0; i < len(tasks); i++ { + pos := rand.Intn(len(tasks)) + tasks[i], tasks[pos] = tasks[pos], tasks[i] + } +} + +type errWithStack struct { + err error + buf []byte +} + +func (e *errWithStack) Error() string { + return e.err.Error() + "\n" + string(e.buf) +} + +func withTrace(err error) error { + if err == nil { + return err + } + if _, ok := err.(*errWithStack); ok { + return err + } + var stack [4096]byte + sz := runtime.Stack(stack[:], false) + return &errWithStack{err, stack[:sz]} +} + +func formatDurationAsSeconds(d time.Duration) string { + return fmt.Sprintf("%f", d.Seconds()) +} + +func packageProperties(goVersion string) []JUnitProperty { + return []JUnitProperty{ + {Name: "go.version", Value: goVersion}, + } +} + +// goVersion returns the version as reported by the go binary in PATH. This +// version will not be the same as runtime.Version, which is always the version +// of go used to build the gotestsum binary. +// +// To skip the os/exec call set the GOVERSION environment variable to the +// desired value. +func goVersion() string { + if version, ok := os.LookupEnv("GOVERSION"); ok { + return version + } + cmd := exec.Command("go", "version") + out, err := cmd.Output() + if err != nil { + return "unknown" + } + return strings.TrimPrefix(strings.TrimSpace(string(out)), "go version ") +} + +func write(out io.Writer, suites JUnitTestSuites) error { + doc, err := xml.MarshalIndent(suites, "", "\t") + if err != nil { + return err + } + _, err = out.Write([]byte(xml.Header)) + if err != nil { + return err + } + _, err = out.Write(doc) + return err +} + +// JUnitTestSuites is a collection of JUnit test suites. +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + Suites []JUnitTestSuite +} + +// JUnitTestSuite is a single JUnit test suite which may contain many +// testcases. +type JUnitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + Tests int `xml:"tests,attr"` + Failures int `xml:"failures,attr"` + Time string `xml:"time,attr"` + Name string `xml:"name,attr"` + Properties []JUnitProperty `xml:"properties>property,omitempty"` + TestCases []JUnitTestCase +} + +// JUnitTestCase is a single test case with its result. +type JUnitTestCase struct { + XMLName xml.Name `xml:"testcase"` + Classname string `xml:"classname,attr"` + Name string `xml:"name,attr"` + Time string `xml:"time,attr"` + SkipMessage *JUnitSkipMessage `xml:"skipped,omitempty"` + Failure *JUnitFailure `xml:"failure,omitempty"` +} + +// JUnitSkipMessage contains the reason why a testcase was skipped. +type JUnitSkipMessage struct { + Message string `xml:"message,attr"` +} + +// JUnitProperty represents a key/value pair used to define properties. +type JUnitProperty struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +// JUnitFailure contains data related to a failed test. +type JUnitFailure struct { + Message string `xml:"message,attr"` + Type string `xml:"type,attr"` + Contents string `xml:",chardata"` +} diff --git a/tools/pd-xprog/main.go b/tools/pd-xprog/main.go new file mode 100644 index 000000000000..d74f7a36ae73 --- /dev/null +++ b/tools/pd-xprog/main.go @@ -0,0 +1,120 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "strings" +) + +func main() { + // See https://github.com/golang/go/issues/15513#issuecomment-773994959 + // go test --exec=xprog ./... + // Command line args looks like: + // '$CWD/xprog /tmp/go-build2662369829/b1382/aggfuncs.test -test.paniconexit0 -test.timeout=10m0s' + // This program moves the test binary /tmp/go-build2662369829/b1382/aggfuncs.test to someplace else for later use. + + // Extract the current work directory + cwd := os.Args[0] + println(cwd) + cwd = cwd[:len(cwd)-len("bin/xprog")] + + testBinaryPath := os.Args[1] + dir, _ := filepath.Split(testBinaryPath) + + // Extract the package info from /tmp/go-build2662369829/b1382/importcfg.link + pkg := getPackageInfo(dir) + + const prefix = "github.com/tikv/pd" + if !strings.HasPrefix(pkg, prefix) { + os.Exit(-3) + } + + // github.com/pingcap/tidb/util/topsql.test => util/topsql + pkg = pkg[len(prefix) : len(pkg)-len(".test")] + + _, file := filepath.Split(pkg) + + // The path of the destination file looks like $CWD/util/topsql/topsql.test.bin + newName := filepath.Join(cwd, pkg, file+".test.bin") + fmt.Print("check!:", newName) + + if err1 := os.Rename(testBinaryPath, newName); err1 != nil { + // Rename fail, handle error like "invalid cross-device linkcd tools/check" + err1 = MoveFile(testBinaryPath, newName) + if err1 != nil { + os.Exit(-4) + } + } +} + +func getPackageInfo(dir string) string { + // Read the /tmp/go-build2662369829/b1382/importcfg.link file to get the package information + f, err := os.Open(filepath.Join(dir, "importcfg.link")) + if err != nil { + os.Exit(-1) + } + defer f.Close() + + r := bufio.NewReader(f) + // packagefile github.com/pingcap/tidb/session.test=/home/genius/.cache/go-build/fb/fb1587cce5727fa9461131eab8260a52878da04f5c8da49dd3c7b2d941430c63-d + line, _, err := r.ReadLine() + if err != nil { + os.Exit(-2) + } + start := strings.IndexByte(string(line), ' ') + end := strings.IndexByte(string(line), '=') + pkg := string(line[start+1 : end]) + return pkg +} + +func MoveFile(sourcePath, destPath string) error { + inputFile, err := os.Open(sourcePath) + if err != nil { + return fmt.Errorf("Couldn't open source file: %s", err) + } + outputFile, err := os.Create(destPath) + if err != nil { + inputFile.Close() + return fmt.Errorf("Couldn't open dest file: %s", err) + } + defer outputFile.Close() + _, err = io.Copy(outputFile, inputFile) + inputFile.Close() + if err != nil { + return fmt.Errorf("Writing to output file failed: %s", err) + } + + // Handle the permissions + si, err := os.Stat(sourcePath) + if err != nil { + return fmt.Errorf("Stat error: %s", err) + } + err = os.Chmod(destPath, si.Mode()) + if err != nil { + return fmt.Errorf("Chmod error: %s", err) + } + + // The copy was successful, so now delete the original file + err = os.Remove(sourcePath) + if err != nil { + return fmt.Errorf("Failed removing original file: %s", err) + } + return nil +}