diff --git a/.bazelrc b/.bazelrc index e1e11044ae533..855bc5770e169 100644 --- a/.bazelrc +++ b/.bazelrc @@ -5,9 +5,9 @@ run --color=yes build:release --workspace_status_command=./build/print-workspace-status.sh --stamp build:release --config=ci build --incompatible_strict_action_env --incompatible_enable_cc_toolchain_resolution -build:ci --remote_cache=http://172.16.4.3:8084/tidb +build:ci --remote_cache=http://172.16.4.21:8080/tidb --remote_timeout="10s" test:ci --verbose_failures test:ci --test_timeout=180 test:ci --test_env=GO_TEST_WRAP_TESTV=1 --test_verbose_timeout_warnings -test:ci --remote_cache=http://172.16.4.3:8084/tidb +test:ci --remote_cache=http://172.16.4.21:8080/tidb --remote_timeout="10s" test:ci --test_env=TZ=Asia/Shanghai --test_output=errors --experimental_ui_max_stdouterr_bytes=104857600 diff --git a/DEPS.bzl b/DEPS.bzl index 65f9d5b4b657c..909a95484d13c 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -1,6 +1,35 @@ load("@bazel_gazelle//:deps.bzl", "go_repository") def go_deps(): + go_repository( + name = "cc_mvdan_gofumpt", + build_file_proto_mode = "disable", + importpath = "mvdan.cc/gofumpt", + sum = "h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8=", + version = "v0.3.1", + ) + go_repository( + name = "cc_mvdan_interfacer", + build_file_proto_mode = "disable", + importpath = "mvdan.cc/interfacer", + sum = "h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=", + version = "v0.0.0-20180901003855-c20040233aed", + ) + go_repository( + name = "cc_mvdan_lint", + build_file_proto_mode = "disable", + importpath = "mvdan.cc/lint", + sum = "h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=", + version = "v0.0.0-20170908181259-adc824a0674b", + ) + go_repository( + name = "cc_mvdan_unparam", + build_file_proto_mode = "disable", + importpath = "mvdan.cc/unparam", + sum = "h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio=", + version = "v0.0.0-20211214103731-d0ef000c54e5", + ) + go_repository( name = "co_honnef_go_tools", build_file_proto_mode = "disable_global", @@ -9,6 +38,14 @@ def go_deps(): sum = "h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34=", version = "v0.3.2", ) + go_repository( + name = "com_4d63_gochecknoglobals", + build_file_proto_mode = "disable", + importpath = "4d63.com/gochecknoglobals", + sum = "h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0=", + version = "v0.1.0", + ) + go_repository( name = "com_github_ajg_form", build_file_proto_mode = "disable_global", @@ -37,6 +74,14 @@ def go_deps(): sum = "h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=", version = "v0.0.0-20190924025748-f65c72e2690d", ) + go_repository( + name = "com_github_alexkohler_prealloc", + build_file_proto_mode = "disable", + importpath = "github.com/alexkohler/prealloc", + sum = "h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw=", + version = "v1.0.0", + ) + go_repository( name = "com_github_aliyun_alibaba_cloud_sdk_go", build_file_proto_mode = "disable", @@ -59,6 +104,21 @@ def go_deps(): sum = "h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=", version = "v1.0.0", ) + go_repository( + name = "com_github_antonboom_errname", + build_file_proto_mode = "disable", + importpath = "github.com/Antonboom/errname", + sum = "h1:LzIJZlyLOCSu51o3/t2n9Ck7PcoP9wdbrdaW6J8fX24=", + version = "v0.1.6", + ) + go_repository( + name = "com_github_antonboom_nilnil", + build_file_proto_mode = "disable", + importpath = "github.com/Antonboom/nilnil", + sum = "h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q=", + version = "v0.1.1", + ) + go_repository( name = "com_github_apache_thrift", build_file_proto_mode = "disable_global", @@ -94,6 +154,21 @@ def go_deps(): sum = "h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=", version = "v0.0.0-20180808171621-7fddfc383310", ) + go_repository( + name = "com_github_ashanbrown_forbidigo", + build_file_proto_mode = "disable", + importpath = "github.com/ashanbrown/forbidigo", + sum = "h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_ashanbrown_makezero", + build_file_proto_mode = "disable", + importpath = "github.com/ashanbrown/makezero", + sum = "h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=", + version = "v1.1.1", + ) + go_repository( name = "com_github_aws_aws_sdk_go", build_file_proto_mode = "disable_global", @@ -164,6 +239,14 @@ def go_deps(): sum = "h1:+0HFd5KSZ/mm3JmhmrDukiId5iR6w4+BdFtfSy4yWIc=", version = "v0.0.3-0.20200106085610-5cbc8cc4026c", ) + go_repository( + name = "com_github_bkielbasa_cyclop", + build_file_proto_mode = "disable", + importpath = "github.com/bkielbasa/cyclop", + sum = "h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A=", + version = "v1.2.0", + ) + go_repository( name = "com_github_blacktear23_go_proxyprotocol", build_file_proto_mode = "disable_global", @@ -171,12 +254,41 @@ def go_deps(): sum = "h1:WmMmtZanGEfIHnJN9N3A4Pl6mM69D+GxEph2eOaCf7g=", version = "v1.0.0", ) + go_repository( + name = "com_github_blizzy78_varnamelen", + build_file_proto_mode = "disable", + importpath = "github.com/blizzy78/varnamelen", + sum = "h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M=", + version = "v0.8.0", + ) + go_repository( + name = "com_github_bombsimon_wsl_v3", + build_file_proto_mode = "disable", + importpath = "github.com/bombsimon/wsl/v3", + sum = "h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM=", + version = "v3.3.0", + ) + go_repository( + name = "com_github_breml_bidichk", + build_file_proto_mode = "disable", + importpath = "github.com/breml/bidichk", + sum = "h1:qe6ggxpTfA8E75hdjWPZ581sY3a2lnl0IRxLQFelECI=", + version = "v0.2.3", + ) + go_repository( + name = "com_github_breml_errchkjson", + build_file_proto_mode = "disable", + importpath = "github.com/breml/errchkjson", + sum = "h1:YdDqhfqMT+I1vIxPSas44P+9Z9HzJwCeAzjB8PxP1xw=", + version = "v0.3.0", + ) + go_repository( name = "com_github_burntsushi_toml", build_file_proto_mode = "disable_global", importpath = "github.com/BurntSushi/toml", - sum = "h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=", - version = "v0.4.1", + sum = "h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=", + version = "v1.1.0", ) go_repository( name = "com_github_burntsushi_xgb", @@ -185,6 +297,14 @@ def go_deps(): sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=", version = "v0.0.0-20160522181843-27f122750802", ) + go_repository( + name = "com_github_butuzov_ireturn", + build_file_proto_mode = "disable", + importpath = "github.com/butuzov/ireturn", + sum = "h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY=", + version = "v0.1.1", + ) + go_repository( name = "com_github_carlmjohnson_flagext", build_file_proto_mode = "disable_global", @@ -227,6 +347,13 @@ def go_deps(): sum = "h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk=", version = "v0.0.9", ) + go_repository( + name = "com_github_chavacava_garif", + build_file_proto_mode = "disable", + importpath = "github.com/chavacava/garif", + sum = "h1:tFXjAxje9thrTF4h57Ckik+scJjTWdwAtZqZPtOT48M=", + version = "v0.0.0-20220316182200-5cad0b5181d4", + ) go_repository( name = "com_github_cheggaaa_pb_v3", @@ -473,6 +600,14 @@ def go_deps(): sum = "h1:0rkFMAbn5KBKNpJyHQ6Prb95vIKanmAe62KxsrN+sqA=", version = "v0.0.0-20171016134553-529a34b1c186", ) + go_repository( + name = "com_github_daixiang0_gci", + build_file_proto_mode = "disable", + importpath = "github.com/daixiang0/gci", + sum = "h1:+EZ83znNs73C9ZBTM7xhNagMP6gJs5wlptiFiuce5BM=", + version = "v0.3.4", + ) + go_repository( name = "com_github_danjacques_gofslock", build_file_proto_mode = "disable_global", @@ -501,6 +636,14 @@ def go_deps(): sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=", version = "v1.1.1", ) + go_repository( + name = "com_github_denis_tingaikin_go_header", + build_file_proto_mode = "disable", + importpath = "github.com/denis-tingaikin/go-header", + sum = "h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU=", + version = "v0.4.3", + ) + go_repository( name = "com_github_dgraph_io_badger", build_file_proto_mode = "disable_global", @@ -537,6 +680,14 @@ def go_deps(): sum = "h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=", version = "v0.0.0-20181026042036-e10d5fee7954", ) + go_repository( + name = "com_github_djarvur_go_err113", + build_file_proto_mode = "disable", + importpath = "github.com/Djarvur/go-err113", + sum = "h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=", + version = "v0.0.0-20210108212216-aea10b59be24", + ) + go_repository( name = "com_github_dnaeon_go_vcr", build_file_proto_mode = "disable_global", @@ -600,6 +751,14 @@ def go_deps(): sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=", version = "v0.1.0", ) + go_repository( + name = "com_github_esimonov_ifshort", + build_file_proto_mode = "disable", + importpath = "github.com/esimonov/ifshort", + sum = "h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA=", + version = "v1.0.4", + ) + go_repository( name = "com_github_etcd_io_bbolt", build_file_proto_mode = "disable_global", @@ -614,6 +773,14 @@ def go_deps(): sum = "h1:Y2I0lxOttdUKz+hNaIdG3FtjuQrTmwXun1opRV65IZc=", version = "v0.0.0-20190801230047-ad7f989257ca", ) + go_repository( + name = "com_github_ettle_strcase", + build_file_proto_mode = "disable", + importpath = "github.com/ettle/strcase", + sum = "h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw=", + version = "v0.1.1", + ) + go_repository( name = "com_github_fasthttp_contrib_websocket", build_file_proto_mode = "disable_global", @@ -635,6 +802,14 @@ def go_deps(): sum = "h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=", version = "v1.1.0", ) + go_repository( + name = "com_github_fatih_structtag", + build_file_proto_mode = "disable", + importpath = "github.com/fatih/structtag", + sum = "h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=", + version = "v1.2.0", + ) + go_repository( name = "com_github_felixge_httpsnoop", build_file_proto_mode = "disable_global", @@ -642,6 +817,14 @@ def go_deps(): sum = "h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=", version = "v1.0.1", ) + go_repository( + name = "com_github_firefart_nonamedreturns", + build_file_proto_mode = "disable", + importpath = "github.com/firefart/nonamedreturns", + sum = "h1:fSvcq6ZpK/uBAgJEGMvzErlzyM4NELLqqdTofVjVNag=", + version = "v1.0.1", + ) + go_repository( name = "com_github_flosch_pongo2", build_file_proto_mode = "disable_global", @@ -681,8 +864,8 @@ def go_deps(): name = "com_github_fsnotify_fsnotify", build_file_proto_mode = "disable_global", importpath = "github.com/fsnotify/fsnotify", - sum = "h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=", - version = "v1.5.1", + sum = "h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=", + version = "v1.5.4", ) go_repository( name = "com_github_fsouza_fake_gcs_server", @@ -695,9 +878,17 @@ def go_deps(): name = "com_github_fzipp_gocyclo", build_file_proto_mode = "disable_global", importpath = "github.com/fzipp/gocyclo", - sum = "h1:A9UeX3HJSXTBzvHzhqoYVuE0eAhe+aM8XBCCwsPMZOc=", - version = "v0.3.1", + sum = "h1:L66amyuYogbxl0j2U+vGqJXusPF2IkduvXLnYD5TFgw=", + version = "v0.5.1", + ) + go_repository( + name = "com_github_gaijinentertainment_go_exhaustruct_v2", + build_file_proto_mode = "disable", + importpath = "github.com/GaijinEntertainment/go-exhaustruct/v2", + sum = "h1:LAPPhJ4KR5Z8aKVZF5S48csJkxL5RMKmE/98fMs1u5M=", + version = "v2.1.0", ) + go_repository( name = "com_github_gavv_httpexpect", build_file_proto_mode = "disable_global", @@ -747,6 +938,14 @@ def go_deps(): sum = "h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI=", version = "v0.0.0-20180628173108-788fd7840127", ) + go_repository( + name = "com_github_go_critic_go_critic", + build_file_proto_mode = "disable", + importpath = "github.com/go-critic/go-critic", + sum = "h1:abibh5XYBTASawfTQ0rA7dVtQT+6KzpGqb/J+DxRDaw=", + version = "v0.6.3", + ) + go_repository( name = "com_github_go_errors_errors", build_file_proto_mode = "disable_global", @@ -824,6 +1023,70 @@ def go_deps(): sum = "h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=", version = "v0.0.0-20210107165309-348f09dbbbc0", ) + go_repository( + name = "com_github_go_toolsmith_astcast", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/astcast", + sum = "h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_go_toolsmith_astcopy", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/astcopy", + sum = "h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_go_toolsmith_astequal", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/astequal", + sum = "h1:JbSszi42Jiqu36Gnf363HWS9MTEAz67vTQLponh3Moc=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_go_toolsmith_astfmt", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/astfmt", + sum = "h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_go_toolsmith_astp", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/astp", + sum = "h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_go_toolsmith_strparse", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/strparse", + sum = "h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_go_toolsmith_typep", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/typep", + sum = "h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_go_xmlfmt_xmlfmt", + build_file_proto_mode = "disable", + importpath = "github.com/go-xmlfmt/xmlfmt", + sum = "h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo=", + version = "v0.0.0-20191208150333-d5b6f63a941b", + ) + go_repository( + name = "com_github_gobwas_glob", + build_file_proto_mode = "disable", + importpath = "github.com/gobwas/glob", + sum = "h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=", + version = "v0.2.3", + ) + go_repository( name = "com_github_gobwas_httphead", build_file_proto_mode = "disable_global", @@ -852,6 +1115,14 @@ def go_deps(): sum = "h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=", version = "v5.0.4", ) + go_repository( + name = "com_github_gofrs_flock", + build_file_proto_mode = "disable", + importpath = "github.com/gofrs/flock", + sum = "h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=", + version = "v0.8.1", + ) + go_repository( name = "com_github_gogo_googleapis", build_file_proto_mode = "disable_global", @@ -927,6 +1198,28 @@ def go_deps(): sum = "h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=", version = "v0.0.4", ) + go_repository( + name = "com_github_golangci_check", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/check", + sum = "h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=", + version = "v0.0.0-20180506172741-cfe4005ccda2", + ) + go_repository( + name = "com_github_golangci_dupl", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/dupl", + sum = "h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=", + version = "v0.0.0-20180902072040-3e9179ac440a", + ) + go_repository( + name = "com_github_golangci_go_misc", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/go-misc", + sum = "h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo=", + version = "v0.0.0-20220329215616-d24fe342adfe", + ) + go_repository( name = "com_github_golangci_gofmt", build_file_proto_mode = "disable", @@ -934,6 +1227,42 @@ def go_deps(): sum = "h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks=", version = "v0.0.0-20190930125516-244bba706f1a", ) + go_repository( + name = "com_github_golangci_golangci_lint", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/golangci-lint", + sum = "h1:o90t/Xa6dhJbvy8Bz2RpzUXqrkigp19DLStMolTZbyo=", + version = "v1.46.2", + ) + go_repository( + name = "com_github_golangci_gosec", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/gosec", + sum = "h1:Bi7BYmZVg4C+mKGi8LeohcP2GGUl2XJD4xCkJoZSaYc=", + version = "v0.0.0-20180901114220-8afd9cbb6cfb", + ) + go_repository( + name = "com_github_golangci_lint_1", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/lint-1", + sum = "h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=", + version = "v0.0.0-20191013205115-297bf364a8e0", + ) + go_repository( + name = "com_github_golangci_maligned", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/maligned", + sum = "h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=", + version = "v0.0.0-20180506175553-b1d89398deca", + ) + + go_repository( + name = "com_github_golangci_misspell", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/misspell", + sum = "h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo=", + version = "v0.3.5", + ) go_repository( name = "com_github_golangci_prealloc", @@ -942,6 +1271,21 @@ def go_deps(): sum = "h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us=", version = "v0.0.0-20180630174525-215b22d4de21", ) + go_repository( + name = "com_github_golangci_revgrep", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/revgrep", + sum = "h1:SgM7GDZTxtTTQPU84heOxy34iG5Du7F2jcoZnvp+fXI=", + version = "v0.0.0-20210930125155-c22e5001d4f2", + ) + + go_repository( + name = "com_github_golangci_unconvert", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/unconvert", + sum = "h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=", + version = "v0.0.0-20180507085042-28b1c447d1f4", + ) go_repository( name = "com_github_gomodule_redigo", @@ -1070,6 +1414,35 @@ def go_deps(): sum = "h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=", version = "v1.4.2", ) + go_repository( + name = "com_github_gostaticanalysis_analysisutil", + build_file_proto_mode = "disable", + importpath = "github.com/gostaticanalysis/analysisutil", + sum = "h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk=", + version = "v0.7.1", + ) + go_repository( + name = "com_github_gostaticanalysis_comment", + build_file_proto_mode = "disable", + importpath = "github.com/gostaticanalysis/comment", + sum = "h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q=", + version = "v1.4.2", + ) + go_repository( + name = "com_github_gostaticanalysis_forcetypeassert", + build_file_proto_mode = "disable", + importpath = "github.com/gostaticanalysis/forcetypeassert", + sum = "h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_gostaticanalysis_nilerr", + build_file_proto_mode = "disable", + importpath = "github.com/gostaticanalysis/nilerr", + sum = "h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk=", + version = "v0.1.1", + ) + go_repository( name = "com_github_grpc_ecosystem_go_grpc_middleware", build_file_proto_mode = "disable_global", @@ -1142,8 +1515,8 @@ def go_deps(): name = "com_github_hashicorp_go_multierror", build_file_proto_mode = "disable_global", importpath = "github.com/hashicorp/go-multierror", - sum = "h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=", - version = "v1.0.0", + sum = "h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=", + version = "v1.1.1", ) go_repository( name = "com_github_hashicorp_go_net", @@ -1184,8 +1557,8 @@ def go_deps(): name = "com_github_hashicorp_go_version", build_file_proto_mode = "disable_global", importpath = "github.com/hashicorp/go-version", - sum = "h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=", - version = "v1.2.0", + sum = "h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4=", + version = "v1.4.0", ) go_repository( name = "com_github_hashicorp_golang_lru", @@ -1236,6 +1609,14 @@ def go_deps(): sum = "h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=", version = "v1.1.2", ) + go_repository( + name = "com_github_hexops_gotextdiff", + build_file_proto_mode = "disable", + importpath = "github.com/hexops/gotextdiff", + sum = "h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=", + version = "v1.0.3", + ) + go_repository( name = "com_github_hpcloud_tail", build_file_proto_mode = "disable_global", @@ -1362,6 +1743,28 @@ def go_deps(): sum = "h1:ANfZYjpMlfTTKebycu4X1AgkVWumFVDYQl7JwOr4mDk=", version = "v2.5.1", ) + go_repository( + name = "com_github_jgautheron_goconst", + build_file_proto_mode = "disable", + importpath = "github.com/jgautheron/goconst", + sum = "h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM=", + version = "v1.5.1", + ) + go_repository( + name = "com_github_jingyugao_rowserrcheck", + build_file_proto_mode = "disable", + importpath = "github.com/jingyugao/rowserrcheck", + sum = "h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_jirfag_go_printf_func_name", + build_file_proto_mode = "disable", + importpath = "github.com/jirfag/go-printf-func-name", + sum = "h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48=", + version = "v0.0.0-20200119135958-7558a9eaa5af", + ) + go_repository( name = "com_github_jmespath_go_jmespath", build_file_proto_mode = "disable_global", @@ -1460,6 +1863,14 @@ def go_deps(): sum = "h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=", version = "v1.3.0", ) + go_repository( + name = "com_github_julz_importas", + build_file_proto_mode = "disable", + importpath = "github.com/julz/importas", + sum = "h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY=", + version = "v0.1.0", + ) + go_repository( name = "com_github_jung_kurt_gofpdf", build_file_proto_mode = "disable_global", @@ -1506,8 +1917,12 @@ def go_deps(): name = "com_github_kisielk_errcheck", build_file_proto_mode = "disable_global", importpath = "github.com/kisielk/errcheck", - sum = "h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=", - version = "v1.5.0", + patch_args = ["-p1"], + patches = [ + "//build/patches:com_github_kisielk_errcheck.patch", + ], + sum = "h1:cErYo+J4SmEjdXZrVXGwLJCE2sB06s23LpkcyWNrT+s=", + version = "v1.6.1", ) go_repository( name = "com_github_kisielk_gotool", @@ -1565,6 +1980,21 @@ def go_deps(): sum = "h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=", version = "v0.2.0", ) + go_repository( + name = "com_github_kulti_thelper", + build_file_proto_mode = "disable", + importpath = "github.com/kulti/thelper", + sum = "h1:K4xulKkwOCnT1CDms6Ex3uG1dvSMUUQe9zxgYQgbRXs=", + version = "v0.6.2", + ) + go_repository( + name = "com_github_kunwardeep_paralleltest", + build_file_proto_mode = "disable", + importpath = "github.com/kunwardeep/paralleltest", + sum = "h1:UdKIkImEAXjR1chUWLn+PNXqWUGs//7tzMeWuP7NhmI=", + version = "v1.0.3", + ) + go_repository( name = "com_github_kyoh86_exportloopref", build_file_proto_mode = "disable", @@ -1587,6 +2017,35 @@ def go_deps(): sum = "h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=", version = "v0.3.0", ) + go_repository( + name = "com_github_ldez_gomoddirectives", + build_file_proto_mode = "disable", + importpath = "github.com/ldez/gomoddirectives", + sum = "h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA=", + version = "v0.2.3", + ) + go_repository( + name = "com_github_ldez_tagliatelle", + build_file_proto_mode = "disable", + importpath = "github.com/ldez/tagliatelle", + sum = "h1:3BqVVlReVUZwafJUwQ+oxbx2BEX2vUG4Yu/NOfMiKiM=", + version = "v0.3.1", + ) + go_repository( + name = "com_github_leonklingele_grouper", + build_file_proto_mode = "disable", + importpath = "github.com/leonklingele/grouper", + sum = "h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_lufeee_execinquery", + build_file_proto_mode = "disable", + importpath = "github.com/lufeee/execinquery", + sum = "h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM=", + version = "v1.2.1", + ) + go_repository( name = "com_github_lufia_plan9stats", build_file_proto_mode = "disable_global", @@ -1598,9 +2057,31 @@ def go_deps(): name = "com_github_magiconair_properties", build_file_proto_mode = "disable_global", importpath = "github.com/magiconair/properties", - sum = "h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=", - version = "v1.8.1", + sum = "h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=", + version = "v1.8.6", + ) + go_repository( + name = "com_github_maratori_testpackage", + build_file_proto_mode = "disable", + importpath = "github.com/maratori/testpackage", + sum = "h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_masterminds_semver", + build_file_proto_mode = "disable", + importpath = "github.com/Masterminds/semver", + sum = "h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=", + version = "v1.5.0", + ) + go_repository( + name = "com_github_matoous_godox", + build_file_proto_mode = "disable", + importpath = "github.com/matoous/godox", + sum = "h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA=", + version = "v0.0.0-20210227103229-6504466cf951", ) + go_repository( name = "com_github_mattn_go_colorable", build_file_proto_mode = "disable_global", @@ -1636,6 +2117,14 @@ def go_deps(): sum = "h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=", version = "v1.0.1", ) + go_repository( + name = "com_github_mbilski_exhaustivestruct", + build_file_proto_mode = "disable", + importpath = "github.com/mbilski/exhaustivestruct", + sum = "h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo=", + version = "v1.2.0", + ) + go_repository( name = "com_github_mediocregopher_mediocre_go_lib", build_file_proto_mode = "disable_global", @@ -1650,6 +2139,14 @@ def go_deps(): sum = "h1:oacPXPKHJg0hcngVVrdtTnfGJiS+PtwoQwTBZGFlV4k=", version = "v3.3.0", ) + go_repository( + name = "com_github_mgechev_revive", + build_file_proto_mode = "disable", + importpath = "github.com/mgechev/revive", + sum = "h1:GjFml7ZsoR0IrQ2E2YIvWFNS5GPDV7xNwvA5GM1HZC4=", + version = "v1.2.1", + ) + go_repository( name = "com_github_microcosm_cc_bluemonday", build_file_proto_mode = "disable_global", @@ -1678,6 +2175,14 @@ def go_deps(): sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=", version = "v1.1.0", ) + go_repository( + name = "com_github_mitchellh_go_ps", + build_file_proto_mode = "disable", + importpath = "github.com/mitchellh/go-ps", + sum = "h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=", + version = "v1.0.0", + ) + go_repository( name = "com_github_mitchellh_go_testing_interface", build_file_proto_mode = "disable_global", @@ -1703,8 +2208,8 @@ def go_deps(): name = "com_github_mitchellh_mapstructure", build_file_proto_mode = "disable_global", importpath = "github.com/mitchellh/mapstructure", - sum = "h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=", - version = "v1.1.2", + sum = "h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=", + version = "v1.5.0", ) go_repository( name = "com_github_modern_go_concurrent", @@ -1727,6 +2232,14 @@ def go_deps(): sum = "h1:8Q0qkMVC/MmWkpIdlvZgcv2o2jrlF6zqVOh7W5YHdMA=", version = "v0.0.0-20171022184752-b58185e213c5", ) + go_repository( + name = "com_github_moricho_tparallel", + build_file_proto_mode = "disable", + importpath = "github.com/moricho/tparallel", + sum = "h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4=", + version = "v0.2.1", + ) + go_repository( name = "com_github_moul_http2curl", build_file_proto_mode = "disable_global", @@ -1741,6 +2254,14 @@ def go_deps(): sum = "h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=", version = "v0.0.0-20190716064945-2f068394615f", ) + go_repository( + name = "com_github_nakabonne_nestif", + build_file_proto_mode = "disable", + importpath = "github.com/nakabonne/nestif", + sum = "h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U=", + version = "v0.3.1", + ) + go_repository( name = "com_github_nats_io_nats_go", build_file_proto_mode = "disable_global", @@ -1762,6 +2283,14 @@ def go_deps(): sum = "h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=", version = "v1.0.1", ) + go_repository( + name = "com_github_nbutton23_zxcvbn_go", + build_file_proto_mode = "disable", + importpath = "github.com/nbutton23/zxcvbn-go", + sum = "h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=", + version = "v0.0.0-20210217022336-fa2cb2858354", + ) + go_repository( name = "com_github_ncw_directio", build_file_proto_mode = "disable_global", @@ -1790,6 +2319,22 @@ def go_deps(): sum = "h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=", version = "v0.0.0-20200227124842-a10e7caefd8e", ) + go_repository( + name = "com_github_nishanths_exhaustive", + build_file_proto_mode = "disable", + importpath = "github.com/nishanths/exhaustive", + sum = "h1:xV/WU3Vdwh5BUH4N06JNUznb6d5zhRPOnlgCrpNYNKA=", + version = "v0.7.11", + ) + + go_repository( + name = "com_github_nishanths_predeclared", + build_file_proto_mode = "disable", + importpath = "github.com/nishanths/predeclared", + sum = "h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=", + version = "v0.2.2", + ) + go_repository( name = "com_github_nxadm_tail", build_file_proto_mode = "disable_global", @@ -1839,6 +2384,14 @@ def go_deps(): sum = "h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=", version = "v1.18.1", ) + go_repository( + name = "com_github_openpeedeep_depguard", + build_file_proto_mode = "disable", + importpath = "github.com/OpenPeeDeeP/depguard", + sum = "h1:pjK9nLPS1FwQYGGpPxoMYpe7qACHOhAWQMQzV71i49o=", + version = "v1.1.0", + ) + go_repository( name = "com_github_opentracing_basictracer_go", build_file_proto_mode = "disable_global", @@ -1871,9 +2424,24 @@ def go_deps(): name = "com_github_pelletier_go_toml", build_file_proto_mode = "disable_global", importpath = "github.com/pelletier/go-toml", - sum = "h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=", - version = "v1.2.0", + sum = "h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=", + version = "v1.9.5", + ) + go_repository( + name = "com_github_pelletier_go_toml_v2", + build_file_proto_mode = "disable", + importpath = "github.com/pelletier/go-toml/v2", + sum = "h1:P7Bq0SaI8nsexyay5UAyDo+ICWy5MQPgEZ5+l8JQTKo=", + version = "v2.0.0", ) + go_repository( + name = "com_github_phayes_checkstyle", + build_file_proto_mode = "disable", + importpath = "github.com/phayes/checkstyle", + sum = "h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA=", + version = "v0.0.0-20170904204023-bfd46e6a821d", + ) + go_repository( name = "com_github_phayes_freeport", build_file_proto_mode = "disable_global", @@ -1934,8 +2502,8 @@ def go_deps(): name = "com_github_pingcap_kvproto", build_file_proto_mode = "disable_global", importpath = "github.com/pingcap/kvproto", - sum = "h1:dsMpneacHyuVslSVndgUfJKrXFNG7VPdXip2ulG6glo=", - version = "v0.0.0-20220517085838-12e2f5a9d167", + sum = "h1:nP2wmyw9JTRsk5rm+tZtfAso6c/1FvuaFNbXTaYz3FE=", + version = "v0.0.0-20220705053936-aa9c2d20cd2a", ) go_repository( name = "com_github_pingcap_log", @@ -1955,8 +2523,8 @@ def go_deps(): name = "com_github_pingcap_tipb", build_file_proto_mode = "disable_global", importpath = "github.com/pingcap/tipb", - sum = "h1:L4nZwfYSrIsWPAZR8zMwHaNQJy0Rjy3Od6Smj5mlOms=", - version = "v0.0.0-20220602075447-4847c5d68e73", + sum = "h1:XaTE4ZhQbQtQZtAVzlZh/Pf6SjFfMSTe1ia2nGcl36Y=", + version = "v0.0.0-20220706024432-7be3cc83a7d5", ) go_repository( name = "com_github_pkg_browser", @@ -1986,6 +2554,14 @@ def go_deps(): sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=", version = "v1.0.0", ) + go_repository( + name = "com_github_polyfloyd_go_errorlint", + build_file_proto_mode = "disable", + importpath = "github.com/polyfloyd/go-errorlint", + sum = "h1:pDrQG0lrh68e602Wfp68BlUTRFoHn8PZYAjLgt2LFsM=", + version = "v1.0.0", + ) + go_repository( name = "com_github_posener_complete", build_file_proto_mode = "disable_global", @@ -2035,6 +2611,42 @@ def go_deps(): sum = "h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=", version = "v0.7.1", ) + go_repository( + name = "com_github_quasilyte_go_ruleguard", + build_file_proto_mode = "disable", + importpath = "github.com/quasilyte/go-ruleguard", + sum = "h1:sWFavxtIctGrVs5SYZ5Ml1CvrDAs8Kf5kx2PI3C41dA=", + version = "v0.3.16-0.20220213074421-6aa060fab41a", + ) + go_repository( + name = "com_github_quasilyte_go_ruleguard_dsl", + build_file_proto_mode = "disable", + importpath = "github.com/quasilyte/go-ruleguard/dsl", + sum = "h1:5+KTKb2YREUYiqZFEIuifFyBxlcCUPWgNZkWy71XS0Q=", + version = "v0.3.19", + ) + go_repository( + name = "com_github_quasilyte_gogrep", + build_file_proto_mode = "disable", + importpath = "github.com/quasilyte/gogrep", + sum = "h1:PDWGei+Rf2bBiuZIbZmM20J2ftEy9IeUCHA8HbQqed8=", + version = "v0.0.0-20220120141003-628d8b3623b5", + ) + go_repository( + name = "com_github_quasilyte_regex_syntax", + build_file_proto_mode = "disable", + importpath = "github.com/quasilyte/regex/syntax", + sum = "h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY=", + version = "v0.0.0-20200407221936-30656e2c4a95", + ) + go_repository( + name = "com_github_quasilyte_stdinfo", + build_file_proto_mode = "disable", + importpath = "github.com/quasilyte/stdinfo", + sum = "h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs=", + version = "v0.0.0-20220114132959-f7386bf02567", + ) + go_repository( name = "com_github_rcrowley_go_metrics", build_file_proto_mode = "disable_global", @@ -2084,6 +2696,21 @@ def go_deps(): sum = "h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=", version = "v2.1.0", ) + go_repository( + name = "com_github_ryancurrah_gomodguard", + build_file_proto_mode = "disable", + importpath = "github.com/ryancurrah/gomodguard", + sum = "h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8=", + version = "v1.2.3", + ) + go_repository( + name = "com_github_ryanrolds_sqlclosecheck", + build_file_proto_mode = "disable", + importpath = "github.com/ryanrolds/sqlclosecheck", + sum = "h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw=", + version = "v0.3.0", + ) + go_repository( name = "com_github_ryanuber_columnize", build_file_proto_mode = "disable_global", @@ -2091,6 +2718,14 @@ def go_deps(): sum = "h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s=", version = "v2.1.0+incompatible", ) + go_repository( + name = "com_github_sanposhiho_wastedassign_v2", + build_file_proto_mode = "disable", + importpath = "github.com/sanposhiho/wastedassign/v2", + sum = "h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA=", + version = "v2.0.6", + ) + go_repository( name = "com_github_sclevine_agouti", build_file_proto_mode = "disable_global", @@ -2105,6 +2740,14 @@ def go_deps(): sum = "h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=", version = "v0.0.0-20170313163322-e2103e2c3529", ) + go_repository( + name = "com_github_securego_gosec_v2", + build_file_proto_mode = "disable", + importpath = "github.com/securego/gosec/v2", + sum = "h1:+PDkpzR41OI2jrw1q6AdXZCbsNGNGT7pQjal0H0cArI=", + version = "v2.11.0", + ) + go_repository( name = "com_github_sergi_go_diff", build_file_proto_mode = "disable_global", @@ -2112,12 +2755,20 @@ def go_deps(): sum = "h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=", version = "v1.1.0", ) + go_repository( + name = "com_github_shazow_go_diff", + build_file_proto_mode = "disable", + importpath = "github.com/shazow/go-diff", + sum = "h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=", + version = "v0.0.0-20160112020656-b6b7b6733b8c", + ) + go_repository( name = "com_github_shirou_gopsutil_v3", build_file_proto_mode = "disable_global", importpath = "github.com/shirou/gopsutil/v3", - sum = "h1:VoGxEW2hpmz0Vt3wUvHIl9fquzYLNpVpgNNB7pGJimA=", - version = "v3.21.12", + sum = "h1:srAQaiX6jX/cYL6q29aE0m8lOskT9CurZ9N61YR3yoI=", + version = "v3.22.4", ) go_repository( name = "com_github_shopify_goreferrer", @@ -2175,6 +2826,21 @@ def go_deps(): sum = "h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=", version = "v1.8.1", ) + go_repository( + name = "com_github_sivchari_containedctx", + build_file_proto_mode = "disable", + importpath = "github.com/sivchari/containedctx", + sum = "h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_sivchari_tenv", + build_file_proto_mode = "disable", + importpath = "github.com/sivchari/tenv", + sum = "h1:wxW0mFpKI6DIb3s6m1jCDYvkWXCskrimXMuGd0K/kSQ=", + version = "v1.5.0", + ) + go_repository( name = "com_github_smartystreets_assertions", build_file_proto_mode = "disable_global", @@ -2196,6 +2862,21 @@ def go_deps(): sum = "h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=", version = "v0.1.5", ) + go_repository( + name = "com_github_sonatard_noctx", + build_file_proto_mode = "disable", + importpath = "github.com/sonatard/noctx", + sum = "h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY=", + version = "v0.0.1", + ) + go_repository( + name = "com_github_sourcegraph_go_diff", + build_file_proto_mode = "disable", + importpath = "github.com/sourcegraph/go-diff", + sum = "h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ=", + version = "v0.6.1", + ) + go_repository( name = "com_github_spaolacci_murmur3", build_file_proto_mode = "disable_global", @@ -2207,15 +2888,15 @@ def go_deps(): name = "com_github_spf13_afero", build_file_proto_mode = "disable_global", importpath = "github.com/spf13/afero", - sum = "h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=", - version = "v1.2.2", + sum = "h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=", + version = "v1.8.2", ) go_repository( name = "com_github_spf13_cast", build_file_proto_mode = "disable_global", importpath = "github.com/spf13/cast", - sum = "h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=", - version = "v1.3.0", + sum = "h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=", + version = "v1.4.1", ) go_repository( name = "com_github_spf13_cobra", @@ -2228,8 +2909,8 @@ def go_deps(): name = "com_github_spf13_jwalterweatherman", build_file_proto_mode = "disable_global", importpath = "github.com/spf13/jwalterweatherman", - sum = "h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=", - version = "v1.0.0", + sum = "h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=", + version = "v1.1.0", ) go_repository( name = "com_github_spf13_pflag", @@ -2242,9 +2923,17 @@ def go_deps(): name = "com_github_spf13_viper", build_file_proto_mode = "disable_global", importpath = "github.com/spf13/viper", - sum = "h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=", - version = "v1.7.0", + sum = "h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44=", + version = "v1.11.0", + ) + go_repository( + name = "com_github_ssgreg_nlreturn_v2", + build_file_proto_mode = "disable", + importpath = "github.com/ssgreg/nlreturn/v2", + sum = "h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=", + version = "v2.2.1", ) + go_repository( name = "com_github_stathat_consistent", build_file_proto_mode = "disable", @@ -2252,6 +2941,13 @@ def go_deps(): sum = "h1:ZFJ1QTRn8npNBKW065raSZ8xfOqhpb8vLOkfp4CcL/U=", version = "v1.0.0", ) + go_repository( + name = "com_github_stbenjam_no_sprintf_host_port", + build_file_proto_mode = "disable", + importpath = "github.com/stbenjam/no-sprintf-host-port", + sum = "h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc=", + version = "v0.1.1", + ) go_repository( name = "com_github_stretchr_objx", @@ -2274,6 +2970,29 @@ def go_deps(): sum = "h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=", version = "v1.2.0", ) + go_repository( + name = "com_github_sylvia7788_contextcheck", + build_file_proto_mode = "disable", + importpath = "github.com/sylvia7788/contextcheck", + sum = "h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04=", + version = "v1.0.4", + ) + go_repository( + name = "com_github_tdakkota_asciicheck", + build_file_proto_mode = "disable", + importpath = "github.com/tdakkota/asciicheck", + sum = "h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A=", + version = "v0.1.1", + ) + + go_repository( + name = "com_github_tetafro_godot", + build_file_proto_mode = "disable", + importpath = "github.com/tetafro/godot", + sum = "h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw=", + version = "v1.4.11", + ) + go_repository( name = "com_github_tiancaiamao_appdash", build_file_proto_mode = "disable_global", @@ -2285,8 +3004,8 @@ def go_deps(): name = "com_github_tikv_client_go_v2", build_file_proto_mode = "disable_global", importpath = "github.com/tikv/client-go/v2", - sum = "h1:N5ivsNkDQDgimY0ZVqMnWqXjEnxy5uFChoB4wPIKpPI=", - version = "v2.0.1-0.20220613112734-be31f33ba03b", + sum = "h1:VAyYcN1Nw7RupQszUYqOkueEVapWSxKFU7uBaYY5Dv8=", + version = "v2.0.1-0.20220627063500-947d923945fd", ) go_repository( name = "com_github_tikv_pd_client", @@ -2295,19 +3014,27 @@ def go_deps(): sum = "h1:jxgmKOscXSjaFEKQGRyY5qOpK8hLqxs2irb/uDJMtwk=", version = "v0.0.0-20220307081149-841fa61e9710", ) + go_repository( + name = "com_github_timakin_bodyclose", + build_file_proto_mode = "disable", + importpath = "github.com/timakin/bodyclose", + sum = "h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro=", + version = "v0.0.0-20210704033933-f49887972144", + ) + go_repository( name = "com_github_tklauser_go_sysconf", build_file_proto_mode = "disable_global", importpath = "github.com/tklauser/go-sysconf", - sum = "h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo=", - version = "v0.3.9", + sum = "h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=", + version = "v0.3.10", ) go_repository( name = "com_github_tklauser_numcpus", build_file_proto_mode = "disable_global", importpath = "github.com/tklauser/numcpus", - sum = "h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ=", - version = "v0.3.0", + sum = "h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=", + version = "v0.4.0", ) go_repository( name = "com_github_tmc_grpc_websocket_proxy", @@ -2316,6 +3043,21 @@ def go_deps(): sum = "h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=", version = "v0.0.0-20201229170055-e5319fda7802", ) + go_repository( + name = "com_github_tomarrell_wrapcheck_v2", + build_file_proto_mode = "disable", + importpath = "github.com/tomarrell/wrapcheck/v2", + sum = "h1:Cf4a/iwuMp9s7kKrh74GTgijRVim0wEpKjgAsT7Wctw=", + version = "v2.6.1", + ) + go_repository( + name = "com_github_tommy_muehle_go_mnd_v2", + build_file_proto_mode = "disable", + importpath = "github.com/tommy-muehle/go-mnd/v2", + sum = "h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s=", + version = "v2.5.0", + ) + go_repository( name = "com_github_twmb_murmur3", build_file_proto_mode = "disable_global", @@ -2351,6 +3093,21 @@ def go_deps(): sum = "h1:3SVOIvH7Ae1KRYyQWRjXWJEA9sS/c/pjvH++55Gr648=", version = "v0.0.0-20181204163529-d75b2dcb6bc8", ) + go_repository( + name = "com_github_ultraware_funlen", + build_file_proto_mode = "disable", + importpath = "github.com/ultraware/funlen", + sum = "h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA=", + version = "v0.0.3", + ) + go_repository( + name = "com_github_ultraware_whitespace", + build_file_proto_mode = "disable", + importpath = "github.com/ultraware/whitespace", + sum = "h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI=", + version = "v0.0.5", + ) + go_repository( name = "com_github_urfave_negroni", build_file_proto_mode = "disable_global", @@ -2358,6 +3115,14 @@ def go_deps(): sum = "h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc=", version = "v1.0.0", ) + go_repository( + name = "com_github_uudashr_gocognit", + build_file_proto_mode = "disable", + importpath = "github.com/uudashr/gocognit", + sum = "h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4=", + version = "v1.0.5", + ) + go_repository( name = "com_github_valyala_bytebufferpool", build_file_proto_mode = "disable_global", @@ -2379,6 +3144,14 @@ def go_deps(): sum = "h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=", version = "v1.0.1", ) + go_repository( + name = "com_github_valyala_quicktemplate", + build_file_proto_mode = "disable", + importpath = "github.com/valyala/quicktemplate", + sum = "h1:LUPTJmlVcb46OOUY3IeD9DojFpAVbsG+5WFTcjMJzCM=", + version = "v1.7.0", + ) + go_repository( name = "com_github_valyala_tcplisten", build_file_proto_mode = "disable_global", @@ -2463,6 +3236,14 @@ def go_deps(): sum = "h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=", version = "v0.0.3-0.20170626215501-b2862e3d0a77", ) + go_repository( + name = "com_github_yagipy_maintidx", + build_file_proto_mode = "disable", + importpath = "github.com/yagipy/maintidx", + sum = "h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM=", + version = "v1.0.0", + ) + go_repository( name = "com_github_yalp_jsonpath", build_file_proto_mode = "disable_global", @@ -2470,6 +3251,14 @@ def go_deps(): sum = "h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY=", version = "v0.0.0-20180802001716-5cc68e5049a0", ) + go_repository( + name = "com_github_yeya24_promlinter", + build_file_proto_mode = "disable", + importpath = "github.com/yeya24/promlinter", + sum = "h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o=", + version = "v0.2.0", + ) + go_repository( name = "com_github_yudai_gojsondiff", build_file_proto_mode = "disable_global", @@ -2505,6 +3294,14 @@ def go_deps(): sum = "h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=", version = "v1.2.2", ) + go_repository( + name = "com_gitlab_bosi_decorder", + build_file_proto_mode = "disable", + importpath = "gitlab.com/bosi/decorder", + sum = "h1:ehqZe8hI4w7O4b1vgsDZw1YU1PE7iJXrQWFMsocbQ1w=", + version = "v0.2.1", + ) + go_repository( name = "com_google_cloud_go", build_file_proto_mode = "disable_global", @@ -2636,8 +3433,8 @@ def go_deps(): name = "in_gopkg_ini_v1", build_file_proto_mode = "disable_global", importpath = "gopkg.in/ini.v1", - sum = "h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI=", - version = "v1.66.2", + sum = "h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=", + version = "v1.66.4", ) go_repository( name = "in_gopkg_jcmturner_aescts_v1", @@ -2952,8 +3749,8 @@ def go_deps(): name = "org_golang_google_protobuf", build_file_proto_mode = "disable_global", importpath = "google.golang.org/protobuf", - sum = "h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=", - version = "v1.27.1", + sum = "h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=", + version = "v1.28.0", ) go_repository( name = "org_golang_x_crypto", @@ -2966,8 +3763,8 @@ def go_deps(): name = "org_golang_x_exp", build_file_proto_mode = "disable_global", importpath = "golang.org/x/exp", - sum = "h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=", - version = "v0.0.0-20220426173459-3bcf042a4bf5", + sum = "h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4=", + version = "v0.0.0-20220428152302-39d4317da171", ) go_repository( name = "org_golang_x_exp_typeparams", @@ -3030,8 +3827,8 @@ def go_deps(): name = "org_golang_x_sys", build_file_proto_mode = "disable_global", importpath = "golang.org/x/sys", - sum = "h1:8w7RhxzTVgUzw/AH/9mUV5q0vMgy40SQRursCcfmkCw=", - version = "v0.0.0-20220408201424-a24fb2fb8a0f", + sum = "h1:wEZYwx+kK+KlZ0hpvP2Ls1Xr4+RWnlzGFwPP0aiDjIU=", + version = "v0.0.0-20220622161953-175b2fd9d664", ) go_repository( name = "org_golang_x_term", @@ -3058,15 +3855,15 @@ def go_deps(): name = "org_golang_x_tools", build_file_proto_mode = "disable_global", importpath = "golang.org/x/tools", - sum = "h1:OKYpQQVE3DKSc3r3zHVzq46vq5YH7x8xpR3/k9ixmUg=", - version = "v0.1.11-0.20220513221640-090b14e8501f", + sum = "h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY=", + version = "v0.1.11", ) go_repository( name = "org_golang_x_xerrors", build_file_proto_mode = "disable_global", importpath = "golang.org/x/xerrors", - sum = "h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=", - version = "v0.0.0-20200804184101-5ec99f83aff1", + sum = "h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U=", + version = "v0.0.0-20220411194840-2f41105eb62f", ) go_repository( name = "org_gonum_v1_gonum", diff --git a/Dockerfile b/Dockerfile index eee4529c2a974..99da796860a77 100644 --- a/Dockerfile +++ b/Dockerfile @@ -32,6 +32,8 @@ WORKDIR /go/src/github.com/pingcap/tidb # Cache dependencies COPY go.mod . COPY go.sum . +COPY parser/go.mod parser/go.mod +COPY parser/go.sum parser/go.sum RUN GO111MODULE=on go mod download diff --git a/Makefile b/Makefile index 2314bcff43e63..6f45837b2d2b1 100644 --- a/Makefile +++ b/Makefile @@ -327,14 +327,6 @@ build_for_br_integration_test: ) || (make failpoint-disable && exit 1) @make failpoint-disable -build_for_lightning_test: - @make failpoint-enable - $(GOTEST) -c -cover -covermode=count \ - -coverpkg=github.com/pingcap/tidb/br/... \ - -o $(LIGHTNING_BIN).test \ - github.com/pingcap/tidb/br/cmd/tidb-lightning - @make failpoint-disable - br_unit_test: export ARGS=$$($(BR_PACKAGES)) br_unit_test: @make failpoint-enable @@ -448,7 +440,7 @@ bazel_coverage_test: failpoint-enable bazel_ci_prepare bazel_build: bazel_ci_prepare mkdir -p bin - bazel --output_user_root=/home/jenkins/.tidb/tmp build --config=ci //tidb-server/... //br/cmd/... //cmd/... + bazel --output_user_root=/home/jenkins/.tidb/tmp build -k --config=ci //tidb-server/... //br/cmd/... //cmd/... --//build:with_nogo_flag=true cp bazel-out/k8-fastbuild/bin/tidb-server/tidb-server_/tidb-server ./bin cp bazel-out/k8-fastbuild/bin/cmd/importer/importer_/importer ./bin cp bazel-out/k8-fastbuild/bin/tidb-server/tidb-server-check_/tidb-server-check ./bin diff --git a/WORKSPACE b/WORKSPACE index 3553ec948fe50..818097c85455f 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -11,10 +11,10 @@ http_archive( http_archive( name = "bazel_gazelle", - sha256 = "de69a09dc70417580aabf20a28619bb3ef60d038470c7cf8442fafcf627c21cb", + sha256 = "501deb3d5695ab658e82f6f6f549ba681ea3ca2a5fb7911154b5aa45596183fa", urls = [ - "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.24.0/bazel-gazelle-v0.24.0.tar.gz", - "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.24.0/bazel-gazelle-v0.24.0.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.26.0/bazel-gazelle-v0.26.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.26.0/bazel-gazelle-v0.26.0.tar.gz", ], ) diff --git a/bindinfo/BUILD.bazel b/bindinfo/BUILD.bazel index 22cb9e355d99e..299bcc1c6cbee 100644 --- a/bindinfo/BUILD.bazel +++ b/bindinfo/BUILD.bazel @@ -12,6 +12,7 @@ go_library( importpath = "github.com/pingcap/tidb/bindinfo", visibility = ["//visibility:public"], deps = [ + "//kv", "//metrics", "//parser", "//parser/ast", @@ -19,6 +20,7 @@ go_library( "//parser/mysql", "//parser/terror", "//sessionctx", + "//sessionctx/sessionstates", "//sessionctx/stmtctx", "//sessionctx/variable", "//types", diff --git a/bindinfo/bind_record.go b/bindinfo/bind_record.go index bdb8301befb0f..63517d91ac189 100644 --- a/bindinfo/bind_record.go +++ b/bindinfo/bind_record.go @@ -69,9 +69,9 @@ type Binding struct { Charset string Collation string // Hint is the parsed hints, it is used to bind hints to stmt node. - Hint *hint.HintsSet + Hint *hint.HintsSet `json:"-"` // ID is the string form of Hint. It would be non-empty only when the status is `Using` or `PendingVerify`. - ID string + ID string `json:"-"` } func (b *Binding) isSame(rb *Binding) bool { diff --git a/bindinfo/handle.go b/bindinfo/handle.go index 48781082b6a3c..c6c5fe8677359 100644 --- a/bindinfo/handle.go +++ b/bindinfo/handle.go @@ -24,6 +24,7 @@ import ( "sync/atomic" "time" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" @@ -134,9 +135,10 @@ func (h *BindHandle) Update(fullLoad bool) (err error) { exec := h.sctx.Context.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) // No need to acquire the session context lock for ExecRestrictedSQL, it // uses another background session. - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, `SELECT original_sql, bind_sql, default_db, status, create_time, update_time, charset, collation, source + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT original_sql, bind_sql, default_db, status, create_time, update_time, charset, collation, source FROM mysql.bind_info WHERE update_time > %? ORDER BY update_time, create_time`, updateTime) if err != nil { @@ -209,20 +211,21 @@ func (h *BindHandle) CreateBindRecord(sctx sessionctx.Context, record *BindRecor h.sctx.Unlock() h.bindInfo.Unlock() }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) exec, _ := h.sctx.Context.(sqlexec.SQLExecutor) - _, err = exec.ExecuteInternal(context.TODO(), "BEGIN PESSIMISTIC") + _, err = exec.ExecuteInternal(ctx, "BEGIN PESSIMISTIC") if err != nil { return } defer func() { if err != nil { - _, err1 := exec.ExecuteInternal(context.TODO(), "ROLLBACK") + _, err1 := exec.ExecuteInternal(ctx, "ROLLBACK") terror.Log(err1) return } - _, err = exec.ExecuteInternal(context.TODO(), "COMMIT") + _, err = exec.ExecuteInternal(ctx, "COMMIT") if err != nil { return } @@ -239,7 +242,7 @@ func (h *BindHandle) CreateBindRecord(sctx sessionctx.Context, record *BindRecor now := types.NewTime(types.FromGoTime(time.Now()), mysql.TypeTimestamp, 3) updateTs := now.String() - _, err = exec.ExecuteInternal(context.TODO(), `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %?`, + _, err = exec.ExecuteInternal(ctx, `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %?`, deleted, updateTs, record.OriginalSQL, updateTs) if err != nil { return err @@ -250,7 +253,7 @@ func (h *BindHandle) CreateBindRecord(sctx sessionctx.Context, record *BindRecor record.Bindings[i].UpdateTime = now // Insert the BindRecord to the storage. - _, err = exec.ExecuteInternal(context.TODO(), `INSERT INTO mysql.bind_info VALUES (%?,%?, %?, %?, %?, %?, %?, %?, %?)`, + _, err = exec.ExecuteInternal(ctx, `INSERT INTO mysql.bind_info VALUES (%?,%?, %?, %?, %?, %?, %?, %?, %?)`, record.OriginalSQL, record.Bindings[i].BindSQL, record.Db, @@ -296,20 +299,21 @@ func (h *BindHandle) AddBindRecord(sctx sessionctx.Context, record *BindRecord) h.sctx.Unlock() h.bindInfo.Unlock() }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) exec, _ := h.sctx.Context.(sqlexec.SQLExecutor) - _, err = exec.ExecuteInternal(context.TODO(), "BEGIN PESSIMISTIC") + _, err = exec.ExecuteInternal(ctx, "BEGIN PESSIMISTIC") if err != nil { return } defer func() { if err != nil { - _, err1 := exec.ExecuteInternal(context.TODO(), "ROLLBACK") + _, err1 := exec.ExecuteInternal(ctx, "ROLLBACK") terror.Log(err1) return } - _, err = exec.ExecuteInternal(context.TODO(), "COMMIT") + _, err = exec.ExecuteInternal(ctx, "COMMIT") if err != nil { return } @@ -322,7 +326,7 @@ func (h *BindHandle) AddBindRecord(sctx sessionctx.Context, record *BindRecord) return err } if duplicateBinding != nil { - _, err = exec.ExecuteInternal(context.TODO(), `DELETE FROM mysql.bind_info WHERE original_sql = %? AND bind_sql = %?`, record.OriginalSQL, duplicateBinding.BindSQL) + _, err = exec.ExecuteInternal(ctx, `DELETE FROM mysql.bind_info WHERE original_sql = %? AND bind_sql = %?`, record.OriginalSQL, duplicateBinding.BindSQL) if err != nil { return err } @@ -338,7 +342,7 @@ func (h *BindHandle) AddBindRecord(sctx sessionctx.Context, record *BindRecord) record.Bindings[i].UpdateTime = now // Insert the BindRecord to the storage. - _, err = exec.ExecuteInternal(context.TODO(), `INSERT INTO mysql.bind_info VALUES (%?, %?, %?, %?, %?, %?, %?, %?, %?)`, + _, err = exec.ExecuteInternal(ctx, `INSERT INTO mysql.bind_info VALUES (%?, %?, %?, %?, %?, %?, %?, %?, %?)`, record.OriginalSQL, record.Bindings[i].BindSQL, record.Db, @@ -365,20 +369,21 @@ func (h *BindHandle) DropBindRecord(originalSQL, db string, binding *Binding) (e h.sctx.Unlock() h.bindInfo.Unlock() }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) exec, _ := h.sctx.Context.(sqlexec.SQLExecutor) - _, err = exec.ExecuteInternal(context.TODO(), "BEGIN PESSIMISTIC") + _, err = exec.ExecuteInternal(ctx, "BEGIN PESSIMISTIC") if err != nil { return err } var deleteRows int defer func() { if err != nil { - _, err1 := exec.ExecuteInternal(context.TODO(), "ROLLBACK") + _, err1 := exec.ExecuteInternal(ctx, "ROLLBACK") terror.Log(err1) return } - _, err = exec.ExecuteInternal(context.TODO(), "COMMIT") + _, err = exec.ExecuteInternal(ctx, "COMMIT") if err != nil || deleteRows == 0 { return } @@ -398,10 +403,10 @@ func (h *BindHandle) DropBindRecord(originalSQL, db string, binding *Binding) (e updateTs := types.NewTime(types.FromGoTime(time.Now()), mysql.TypeTimestamp, 3).String() if binding == nil { - _, err = exec.ExecuteInternal(context.TODO(), `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND status != %?`, + _, err = exec.ExecuteInternal(ctx, `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND status != %?`, deleted, updateTs, originalSQL, updateTs, deleted) } else { - _, err = exec.ExecuteInternal(context.TODO(), `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND bind_sql = %? and status != %?`, + _, err = exec.ExecuteInternal(ctx, `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND bind_sql = %? and status != %?`, deleted, updateTs, originalSQL, updateTs, binding.BindSQL, deleted) } @@ -417,8 +422,9 @@ func (h *BindHandle) SetBindRecordStatus(originalSQL string, binding *Binding, n h.sctx.Unlock() h.bindInfo.Unlock() }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) exec, _ := h.sctx.Context.(sqlexec.SQLExecutor) - _, err = exec.ExecuteInternal(context.TODO(), "BEGIN PESSIMISTIC") + _, err = exec.ExecuteInternal(ctx, "BEGIN PESSIMISTIC") if err != nil { return } @@ -439,12 +445,12 @@ func (h *BindHandle) SetBindRecordStatus(originalSQL string, binding *Binding, n } defer func() { if err != nil { - _, err1 := exec.ExecuteInternal(context.TODO(), "ROLLBACK") + _, err1 := exec.ExecuteInternal(ctx, "ROLLBACK") terror.Log(err1) return } - _, err = exec.ExecuteInternal(context.TODO(), "COMMIT") + _, err = exec.ExecuteInternal(ctx, "COMMIT") if err != nil { return } @@ -485,10 +491,10 @@ func (h *BindHandle) SetBindRecordStatus(originalSQL string, binding *Binding, n updateTsStr := updateTs.String() if binding == nil { - _, err = exec.ExecuteInternal(context.TODO(), `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND status IN (%?, %?)`, + _, err = exec.ExecuteInternal(ctx, `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND status IN (%?, %?)`, newStatus, updateTsStr, originalSQL, updateTsStr, oldStatus0, oldStatus1) } else { - _, err = exec.ExecuteInternal(context.TODO(), `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND bind_sql = %? AND status IN (%?, %?)`, + _, err = exec.ExecuteInternal(ctx, `UPDATE mysql.bind_info SET status = %?, update_time = %? WHERE original_sql = %? AND update_time < %? AND bind_sql = %? AND status IN (%?, %?)`, newStatus, updateTsStr, originalSQL, updateTsStr, binding.BindSQL, oldStatus0, oldStatus1) } affectRows = int(h.sctx.Context.GetSessionVars().StmtCtx.AffectedRows()) @@ -504,18 +510,19 @@ func (h *BindHandle) GCBindRecord() (err error) { h.bindInfo.Unlock() }() exec, _ := h.sctx.Context.(sqlexec.SQLExecutor) - _, err = exec.ExecuteInternal(context.TODO(), "BEGIN PESSIMISTIC") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) + _, err = exec.ExecuteInternal(ctx, "BEGIN PESSIMISTIC") if err != nil { return err } defer func() { if err != nil { - _, err1 := exec.ExecuteInternal(context.TODO(), "ROLLBACK") + _, err1 := exec.ExecuteInternal(ctx, "ROLLBACK") terror.Log(err1) return } - _, err = exec.ExecuteInternal(context.TODO(), "COMMIT") + _, err = exec.ExecuteInternal(ctx, "COMMIT") if err != nil { return } @@ -530,7 +537,7 @@ func (h *BindHandle) GCBindRecord() (err error) { // we only garbage collect those records with update_time before 10 leases. updateTime := time.Now().Add(-(10 * Lease)) updateTimeStr := types.NewTime(types.FromGoTime(updateTime), mysql.TypeTimestamp, 3).String() - _, err = exec.ExecuteInternal(context.TODO(), `DELETE FROM mysql.bind_info WHERE status = 'deleted' and update_time < %?`, updateTimeStr) + _, err = exec.ExecuteInternal(ctx, `DELETE FROM mysql.bind_info WHERE status = 'deleted' and update_time < %?`, updateTimeStr) return err } @@ -542,8 +549,9 @@ func (h *BindHandle) GCBindRecord() (err error) { // even if they come from different tidb instances. func (h *BindHandle) lockBindInfoTable() error { // h.sctx already locked. + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) exec, _ := h.sctx.Context.(sqlexec.SQLExecutor) - _, err := exec.ExecuteInternal(context.TODO(), h.LockBindInfoSQL()) + _, err := exec.ExecuteInternal(ctx, h.LockBindInfoSQL()) return err } @@ -790,9 +798,10 @@ func (h *BindHandle) extractCaptureFilterFromStorage() (filter *captureFilter) { users: make(map[string]struct{}), } exec := h.sctx.Context.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) // No need to acquire the session context lock for ExecRestrictedSQL, it // uses another background session. - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, `SELECT filter_type, filter_value FROM mysql.capture_plan_baselines_blacklist order by filter_type`) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT filter_type, filter_value FROM mysql.capture_plan_baselines_blacklist order by filter_type`) if err != nil { logutil.BgLogger().Warn("[sql-bind] failed to load mysql.capture_plan_baselines_blacklist", zap.Error(err)) return @@ -898,7 +907,8 @@ func getHintsForSQL(sctx sessionctx.Context, sql string) (string, error) { // Usually passing a sprintf to ExecuteInternal is not recommended, but in this case // it is safe because ExecuteInternal does not permit MultiStatement execution. Thus, // the statement won't be able to "break out" from EXPLAIN. - rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), fmt.Sprintf("EXPLAIN FORMAT='hint' %s", sql)) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) + rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, fmt.Sprintf("EXPLAIN FORMAT='hint' %s", sql)) sctx.GetSessionVars().UsePlanBaselines = origVals if rs != nil { defer func() { @@ -1018,9 +1028,10 @@ func (h *BindHandle) SaveEvolveTasksToStore() { h.pendingVerifyBindRecordMap.flushToStore() } -func getEvolveParameters(ctx sessionctx.Context) (time.Duration, time.Time, time.Time, error) { - rows, _, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL( - context.TODO(), +func getEvolveParameters(sctx sessionctx.Context) (time.Duration, time.Time, time.Time, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) + rows, _, err := sctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL( + ctx, nil, "SELECT variable_name, variable_value FROM mysql.global_variables WHERE variable_name IN (%?, %?, %?)", variable.TiDBEvolvePlanTaskMaxTime, @@ -1093,7 +1104,7 @@ func (h *BindHandle) getOnePendingVerifyJob() (string, string, Binding) { } func (h *BindHandle) getRunningDuration(sctx sessionctx.Context, db, sql string, maxTime time.Duration) (time.Duration, error) { - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBindInfo) if db != "" { _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "use %n", db) if err != nil { diff --git a/bindinfo/session_handle.go b/bindinfo/session_handle.go index 742a45473c096..e6baebe3ea960 100644 --- a/bindinfo/session_handle.go +++ b/bindinfo/session_handle.go @@ -15,6 +15,8 @@ package bindinfo import ( + "context" + "encoding/json" "strings" "time" @@ -22,7 +24,9 @@ import ( "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/sessionstates" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/logutil" "go.uber.org/zap" ) @@ -104,6 +108,39 @@ func (h *SessionHandle) GetAllBindRecord() (bindRecords []*BindRecord) { return h.ch.GetAllBindRecords() } +// EncodeSessionStates implements SessionStatesHandler.EncodeSessionStates interface. +func (h *SessionHandle) EncodeSessionStates(ctx context.Context, sctx sessionctx.Context, sessionStates *sessionstates.SessionStates) error { + bindRecords := h.ch.GetAllBindRecords() + if len(bindRecords) == 0 { + return nil + } + bytes, err := json.Marshal(bindRecords) + if err != nil { + return err + } + sessionStates.Bindings = string(hack.String(bytes)) + return nil +} + +// DecodeSessionStates implements SessionStatesHandler.DecodeSessionStates interface. +func (h *SessionHandle) DecodeSessionStates(ctx context.Context, sctx sessionctx.Context, sessionStates *sessionstates.SessionStates) error { + if len(sessionStates.Bindings) == 0 { + return nil + } + var records []*BindRecord + if err := json.Unmarshal(hack.Slice(sessionStates.Bindings), &records); err != nil { + return err + } + for _, record := range records { + // Restore hints and ID because hints are hard to encode. + if err := record.prepareHints(sctx); err != nil { + return err + } + h.appendBindRecord(parser.DigestNormalized(record.OriginalSQL).String(), record) + } + return nil +} + // Close closes the session handle. func (h *SessionHandle) Close() { for _, bindRecord := range h.ch.GetAllBindRecords() { diff --git a/br/cmd/tidb-lightning-ctl/main.go b/br/cmd/tidb-lightning-ctl/main.go index 08f0c080963f3..4dc70af929083 100644 --- a/br/cmd/tidb-lightning-ctl/main.go +++ b/br/cmd/tidb-lightning-ctl/main.go @@ -155,6 +155,7 @@ func checkpointErrorIgnore(ctx context.Context, cfg *config.Config, tableName st if err != nil { return errors.Trace(err) } + //nolint: errcheck defer cpdb.Close() return errors.Trace(cpdb.IgnoreErrorCheckpoint(ctx, tableName)) @@ -165,6 +166,7 @@ func checkpointErrorDestroy(ctx context.Context, cfg *config.Config, tls *common if err != nil { return errors.Trace(err) } + //nolint: errcheck defer cpdb.Close() target, err := restore.NewTiDBManager(ctx, cfg.TiDB, tls) @@ -222,6 +224,7 @@ func checkpointDump(ctx context.Context, cfg *config.Config, dumpFolder string) if err != nil { return errors.Trace(err) } + //nolint: errcheck defer cpdb.Close() if err := os.MkdirAll(dumpFolder, 0o750); err != nil { @@ -288,6 +291,7 @@ func getLocalStoringTables(ctx context.Context, cfg *config.Config) (err2 error) if err != nil { return errors.Trace(err) } + //nolint: errcheck defer cpdb.Close() tableWithEngine, err := cpdb.GetLocalStoringTables(ctx) diff --git a/br/pkg/conn/conn.go b/br/pkg/conn/conn.go index 48d45e6070ffa..75eef2c1555ab 100755 --- a/br/pkg/conn/conn.go +++ b/br/pkg/conn/conn.go @@ -454,7 +454,7 @@ func (mgr *Mgr) Close() { mgr.dom.Close() } tikv.StoreShuttingDown(1) - mgr.storage.Close() + _ = mgr.storage.Close() } mgr.PdController.Close() diff --git a/br/pkg/gluetidb/glue.go b/br/pkg/gluetidb/glue.go index 150be18171d2b..48d5b05b56a63 100644 --- a/br/pkg/gluetidb/glue.go +++ b/br/pkg/gluetidb/glue.go @@ -120,6 +120,7 @@ func (gs *tidbSession) Execute(ctx context.Context, sql string) error { } func (gs *tidbSession) ExecuteInternal(ctx context.Context, sql string, args ...interface{}) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) rs, err := gs.se.ExecuteInternal(ctx, sql, args...) if err != nil { return errors.Trace(err) @@ -129,6 +130,7 @@ func (gs *tidbSession) ExecuteInternal(ctx context.Context, sql string, args ... // At least call `next` once for triggering theirs side effect. // (Maybe we'd better drain all returned rows?) if rs != nil { + //nolint: errcheck defer rs.Close() c := rs.NewChunk(nil) if err := rs.Next(ctx, c); err != nil { diff --git a/br/pkg/lightning/BUILD.bazel b/br/pkg/lightning/BUILD.bazel index 1cafc9ec5fbef..99d534762fc69 100644 --- a/br/pkg/lightning/BUILD.bazel +++ b/br/pkg/lightning/BUILD.bazel @@ -34,6 +34,7 @@ go_library( "@com_github_prometheus_client_golang//prometheus/collectors", "@com_github_prometheus_client_golang//prometheus/promhttp", "@com_github_shurcool_httpgzip//:httpgzip", + "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", "@org_uber_go_zap//zapcore", ], diff --git a/br/pkg/lightning/backend/BUILD.bazel b/br/pkg/lightning/backend/BUILD.bazel index 87239db5e1442..225c39df4c10b 100644 --- a/br/pkg/lightning/backend/BUILD.bazel +++ b/br/pkg/lightning/backend/BUILD.bazel @@ -18,6 +18,7 @@ go_library( "@com_github_google_uuid//:uuid", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", + "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", ], ) diff --git a/br/pkg/lightning/backend/backend.go b/br/pkg/lightning/backend/backend.go index fcfbb60a5aec7..f8b3e79132aa9 100644 --- a/br/pkg/lightning/backend/backend.go +++ b/br/pkg/lightning/backend/backend.go @@ -17,7 +17,6 @@ package backend import ( "context" "fmt" - "sort" "time" "github.com/google/uuid" @@ -33,6 +32,7 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/table" "go.uber.org/zap" + "golang.org/x/exp/slices" ) const ( @@ -290,12 +290,11 @@ func (be Backend) CheckDiskQuota(quota int64) ( totalMemSize int64, ) { sizes := be.abstract.EngineFileSizes() - sort.Slice(sizes, func(i, j int) bool { - a, b := &sizes[i], &sizes[j] - if a.IsImporting != b.IsImporting { - return a.IsImporting + slices.SortFunc(sizes, func(i, j EngineFileSize) bool { + if i.IsImporting != j.IsImporting { + return i.IsImporting } - return a.DiskSize+a.MemSize < b.DiskSize+b.MemSize + return i.DiskSize+i.MemSize < j.DiskSize+j.MemSize }) for _, size := range sizes { totalDiskSize += size.DiskSize diff --git a/br/pkg/lightning/backend/kv/BUILD.bazel b/br/pkg/lightning/backend/kv/BUILD.bazel index bbf86ff484d9e..f0b8c5545c330 100644 --- a/br/pkg/lightning/backend/kv/BUILD.bazel +++ b/br/pkg/lightning/backend/kv/BUILD.bazel @@ -37,6 +37,7 @@ go_library( "//util/topsql/stmtstats", "@com_github_docker_go_units//:go-units", "@com_github_pingcap_errors//:errors", + "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", "@org_uber_go_zap//zapcore", ], diff --git a/br/pkg/lightning/backend/kv/sql2kv.go b/br/pkg/lightning/backend/kv/sql2kv.go index bd13f27e38954..66be51a19ec5e 100644 --- a/br/pkg/lightning/backend/kv/sql2kv.go +++ b/br/pkg/lightning/backend/kv/sql2kv.go @@ -21,7 +21,6 @@ import ( "fmt" "math" "math/rand" - "sort" "github.com/pingcap/errors" "github.com/pingcap/tidb/br/pkg/lightning/common" @@ -33,7 +32,9 @@ import ( "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/mysql" //nolint: goimports + // Import tidb/planner/core to initialize expression.RewriteAstExpr + _ "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" @@ -42,9 +43,7 @@ import ( "github.com/pingcap/tidb/util/chunk" "go.uber.org/zap" "go.uber.org/zap/zapcore" - - // Import tidb/planner/core to initialize expression.RewriteAstExpr - _ "github.com/pingcap/tidb/planner/core" + "golang.org/x/exp/slices" ) var ExtraHandleColumnInfo = model.NewExtraHandleColInfo() @@ -189,8 +188,8 @@ func collectGeneratedColumns(se *session, meta *model.TableInfo, cols []*table.C } // order the result by column offset so they match the evaluation order. - sort.Slice(genCols, func(i, j int) bool { - return cols[genCols[i].index].Offset < cols[genCols[j].index].Offset + slices.SortFunc(genCols, func(i, j genCol) bool { + return cols[i.index].Offset < cols[j.index].Offset }) return genCols, nil } @@ -450,49 +449,6 @@ func isPKCol(colInfo *model.ColumnInfo) bool { return mysql.HasPriKeyFlag(colInfo.GetFlag()) } -func isRowIDOverflow(meta *model.ColumnInfo, rowID int64) bool { - isUnsigned := mysql.HasUnsignedFlag(meta.GetFlag()) - switch meta.GetType() { - // MEDIUM INT - case mysql.TypeInt24: - if !isUnsigned { - return rowID > mysql.MaxInt24 - } - return rowID > mysql.MaxUint24 - // INT - case mysql.TypeLong: - if !isUnsigned { - return rowID > math.MaxInt32 - } - return rowID > math.MaxUint32 - // SMALLINT - case mysql.TypeShort: - if !isUnsigned { - return rowID > math.MaxInt16 - } - return rowID > math.MaxUint16 - // TINYINT - case mysql.TypeTiny: - if !isUnsigned { - return rowID > math.MaxInt8 - } - return rowID > math.MaxUint8 - // FLOAT - case mysql.TypeFloat: - if !isUnsigned { - return float32(rowID) > math.MaxFloat32 - } - return float64(rowID) > math.MaxFloat32*2 - // DOUBLE - case mysql.TypeDouble: - if !isUnsigned { - return float64(rowID) > math.MaxFloat64 - } - // impossible for rowID exceeding MaxFloat64 - } - return false -} - func (kvcodec *tableKVEncoder) getActualDatum(rowID int64, colIndex int, inputDatum *types.Datum) (types.Datum, error) { var ( value types.Datum @@ -520,11 +476,6 @@ func (kvcodec *tableKVEncoder) getActualDatum(rowID int64, colIndex int, inputDa // handle special values switch { case isAutoIncCol(col.ToInfo()): - // rowID is going to auto-filled the omitted column, - // which should be checked before restore - if isRowIDOverflow(col.ToInfo(), rowID) { - return value, errors.Errorf("PK %d is out of range", rowID) - } // we still need a conversion, e.g. to catch overflow with a TINYINT column. value, err = table.CastValue(kvcodec.se, types.NewIntDatum(rowID), col.ToInfo(), false, false) case isTableAutoRandom(tblMeta) && isPKCol(col.ToInfo()): diff --git a/br/pkg/lightning/backend/local/BUILD.bazel b/br/pkg/lightning/backend/local/BUILD.bazel index 4b3102aca7f34..02358eb492d32 100644 --- a/br/pkg/lightning/backend/local/BUILD.bazel +++ b/br/pkg/lightning/backend/local/BUILD.bazel @@ -69,6 +69,7 @@ go_library( "@org_golang_google_grpc//credentials", "@org_golang_google_grpc//keepalive", "@org_golang_google_grpc//status", + "@org_golang_x_exp//slices", "@org_golang_x_sync//errgroup", "@org_golang_x_time//rate", "@org_uber_go_atomic//:atomic", diff --git a/br/pkg/lightning/backend/local/duplicate.go b/br/pkg/lightning/backend/local/duplicate.go index be446f81b9ba4..b44a6c680f670 100644 --- a/br/pkg/lightning/backend/local/duplicate.go +++ b/br/pkg/lightning/backend/local/duplicate.go @@ -434,6 +434,7 @@ func NewDuplicateManager( // RecordDataConflictError records data conflicts to errorMgr. The key received from stream must be a row key. func (m *DuplicateManager) RecordDataConflictError(ctx context.Context, stream DupKVStream) error { + //nolint: errcheck defer stream.Close() var dataConflictInfos []errormanager.DataConflictInfo for { @@ -498,6 +499,7 @@ func (m *DuplicateManager) saveIndexHandles(ctx context.Context, handles pending // RecordIndexConflictError records index conflicts to errorMgr. The key received from stream must be an index key. func (m *DuplicateManager) RecordIndexConflictError(ctx context.Context, stream DupKVStream, tableID int64, indexInfo *model.IndexInfo) error { + //nolint: errcheck defer stream.Close() indexHandles := makePendingIndexHandlesWithCapacity(0) for { diff --git a/br/pkg/lightning/backend/local/engine.go b/br/pkg/lightning/backend/local/engine.go index 04036e57b16ac..90254f3332fd0 100644 --- a/br/pkg/lightning/backend/local/engine.go +++ b/br/pkg/lightning/backend/local/engine.go @@ -24,7 +24,6 @@ import ( "io" "os" "path/filepath" - "sort" "sync" "time" @@ -33,7 +32,6 @@ import ( "github.com/google/btree" "github.com/google/uuid" "github.com/pingcap/errors" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/br/pkg/lightning/backend" "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" @@ -46,6 +44,7 @@ import ( "github.com/pingcap/tidb/util/hack" "go.uber.org/atomic" "go.uber.org/zap" + "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" ) @@ -733,8 +732,8 @@ func (e *Engine) batchIngestSSTs(metas []*sstMeta) error { if len(metas) == 0 { return nil } - sort.Slice(metas, func(i, j int) bool { - return bytes.Compare(metas[i].minKey, metas[j].minKey) < 0 + slices.SortFunc(metas, func(i, j *sstMeta) bool { + return bytes.Compare(i.minKey, j.minKey) < 0 }) metaLevels := make([][]*sstMeta, 0) @@ -877,6 +876,7 @@ func (e *Engine) loadEngineMeta() error { } return err } + //nolint: errcheck defer closer.Close() if err = json.Unmarshal(jsonBytes, &e.engineMeta); err != nil { @@ -894,8 +894,8 @@ func sortAndMergeRanges(ranges []Range) []Range { return ranges } - sort.Slice(ranges, func(i, j int) bool { - return bytes.Compare(ranges[i].start, ranges[j].start) < 0 + slices.SortFunc(ranges, func(i, j Range) bool { + return bytes.Compare(i.start, j.start) < 0 }) curEnd := ranges[0].end @@ -1003,21 +1003,6 @@ type Writer struct { batchSize int64 lastMetaSeq int32 - prevRowID int64 // only used for appendRowsSorted -} - -func (w *Writer) flushAndNewWriter() error { - var err error - err = w.flush(context.Background()) - if err != nil { - return errors.Trace(err) - } - newWriter, err := w.createSSTWriter() - if err != nil { - return errors.Trace(err) - } - w.writer = newWriter - return nil } func (w *Writer) appendRowsSorted(kvs []common.KvPair) error { @@ -1028,17 +1013,6 @@ func (w *Writer) appendRowsSorted(kvs []common.KvPair) error { } w.writer = writer } - if len(kvs) == 0 { - return nil - } - if w.prevRowID != 0 && kvs[0].RowID > w.prevRowID+1 { - // rowID leap. probably re-alloc id - // should write to different sst - err := w.flushAndNewWriter() - if err != nil { - return err - } - } keyAdapter := w.engine.keyAdapter totalKeySize := 0 @@ -1063,26 +1037,7 @@ func (w *Writer) appendRowsSorted(kvs []common.KvPair) error { } kvs = newKvs } - startIdx := 0 - w.prevRowID = kvs[len(kvs)-1].RowID - for i := 1; i < len(kvs); i++ { - if kvs[i].RowID > kvs[i-1].RowID+1 { - // leap id - err := w.writer.writeKVs(kvs[startIdx:i]) - if err != nil { - return err - } - err = w.flushAndNewWriter() - if err != nil { - return err - } - startIdx = i - } - } - if startIdx < len(kvs) { - return w.writer.writeKVs(kvs[startIdx:]) - } - return nil + return w.writer.writeKVs(kvs) } func (w *Writer) appendRowsUnsorted(ctx context.Context, kvs []common.KvPair) error { @@ -1149,9 +1104,6 @@ func (w *Writer) AppendRows(ctx context.Context, tableName string, columnNames [ } func (w *Writer) flush(ctx context.Context) error { - failpoint.Inject("MockFlushWriter", func() { - failpoint.Return(nil) - }) w.Lock() defer w.Unlock() if w.batchCount == 0 { @@ -1209,8 +1161,8 @@ func (w *Writer) flushKVs(ctx context.Context) error { return errors.Trace(err) } if !w.isWriteBatchSorted { - sort.Slice(w.writeBatch[:w.batchCount], func(i, j int) bool { - return bytes.Compare(w.writeBatch[i].Key, w.writeBatch[j].Key) < 0 + slices.SortFunc(w.writeBatch[:w.batchCount], func(i, j common.KvPair) bool { + return bytes.Compare(i.Key, j.Key) < 0 }) w.isWriteBatchSorted = true } diff --git a/br/pkg/lightning/backend/local/engine_test.go b/br/pkg/lightning/backend/local/engine_test.go index 13c890c028297..c7ffe04b95285 100644 --- a/br/pkg/lightning/backend/local/engine_test.go +++ b/br/pkg/lightning/backend/local/engine_test.go @@ -26,12 +26,9 @@ import ( "github.com/cockroachdb/pebble" "github.com/cockroachdb/pebble/sstable" "github.com/google/uuid" + "github.com/pingcap/tidb/br/pkg/lightning/backend" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/stretchr/testify/require" - - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/br/pkg/lightning/backend" - "github.com/pingcap/tidb/br/pkg/lightning/common" ) func TestIngestSSTWithClosedEngine(t *testing.T) { @@ -87,112 +84,3 @@ func TestIngestSSTWithClosedEngine(t *testing.T) { }, }), errorEngineClosed) } - -func TestAutoSplitSST(t *testing.T) { - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/MockFlushWriter", "return(true)")) - defer func() { - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/MockFlushWriter")) - }() - var err error - dir := os.TempDir() - w := &Writer{ - engine: &Engine{ - sstDir: dir, - keyAdapter: noopKeyAdapter{}, - logger: log.L(), - }, - isKVSorted: true, - isWriteBatchSorted: true, - } - w.engine.closed.Store(false) - w.writer, err = w.createSSTWriter() - require.Nil(t, err) - kvs := []common.KvPair{ - { - Key: []byte("1"), - Val: []byte("val1"), - RowID: 1, - }, - { - Key: []byte("2"), - Val: []byte("val1"), - RowID: 2, - }, - } - prevWriter := w.writer - err = w.appendRowsSorted(kvs) - require.Nil(t, err) - require.True(t, prevWriter == w.writer) - kvs = []common.KvPair{ - { - Key: []byte("10"), - Val: []byte("val10"), - RowID: 10, - }, - { - Key: []byte("11"), - Val: []byte("val11"), - RowID: 11, - }, - } - err = w.appendRowsSorted(kvs) - require.Nil(t, err) - require.False(t, prevWriter == w.writer) // id leap, should flush and create - prevWriter = w.writer - kvs = []common.KvPair{ - { - Key: []byte("12"), - Val: []byte("val12"), - RowID: 10, - }, - { - Key: []byte("13"), - Val: []byte("val13"), - RowID: 11, - }, - { - Key: []byte("15"), - Val: []byte("val15"), - RowID: 15, - }, - } - err = w.appendRowsSorted(kvs) - require.Nil(t, err) - require.False(t, prevWriter == w.writer) // id leap, should flush and create - prevWriter = w.writer - kvs = []common.KvPair{ - { - Key: []byte("16"), - Val: []byte("val16"), - RowID: 16, - }, - { - Key: []byte("17"), - Val: []byte("val17"), - RowID: 17, - }, - { - Key: []byte("19"), - Val: []byte("val19"), - RowID: 19, - }, - { - Key: []byte("20"), - Val: []byte("val20"), - RowID: 20, - }, - { - Key: []byte("22"), - Val: []byte("val22"), - RowID: 22, - }, - { - Key: []byte("23"), - Val: []byte("val23"), - RowID: 22, - }, - } - err = w.appendRowsSorted(kvs) - require.Nil(t, err) - require.False(t, prevWriter == w.writer) // id leap, should flush and create -} diff --git a/br/pkg/lightning/backend/local/iterator.go b/br/pkg/lightning/backend/local/iterator.go index 16c9b647e4f24..e2cb3a447cfbb 100644 --- a/br/pkg/lightning/backend/local/iterator.go +++ b/br/pkg/lightning/backend/local/iterator.go @@ -181,7 +181,7 @@ func (d *dupDetectIter) Close() error { if d.err == nil { d.flush() } - d.writeBatch.Close() + _ = d.writeBatch.Close() return d.iter.Close() } diff --git a/br/pkg/lightning/backend/local/local.go b/br/pkg/lightning/backend/local/local.go index 8a45472dddfcc..d5d9e9bea7b5c 100644 --- a/br/pkg/lightning/backend/local/local.go +++ b/br/pkg/lightning/backend/local/local.go @@ -478,7 +478,7 @@ func (local *local) Close() { local.engines = sync.Map{} for _, engine := range allEngines { - engine.Close() + _ = engine.Close() engine.unlock() } @@ -520,8 +520,7 @@ func (local *local) Close() { local.logger.Warn("remove local db file failed", zap.Error(err)) } } - - local.tikvCli.Close() + _ = local.tikvCli.Close() local.pdCtl.Close() } @@ -763,6 +762,7 @@ func (local *local) WriteToTiKV( regionRange := intersectRange(region.Region, Range{start: start, end: end}) opt := &pebble.IterOptions{LowerBound: regionRange.start, UpperBound: regionRange.end} iter := engine.newKVIter(ctx, opt) + //nolint: errcheck defer iter.Close() stats := rangeStats{} @@ -1020,6 +1020,7 @@ func splitRangeBySizeProps(fullRange Range, sizeProps *sizeProperties, sizeLimit func (local *local) readAndSplitIntoRange(ctx context.Context, engine *Engine, regionSplitSize int64, regionSplitKeys int64) ([]Range, error) { iter := engine.newKVIter(ctx, &pebble.IterOptions{}) + //nolint: errcheck defer iter.Close() iterError := func(e string) error { @@ -1082,6 +1083,7 @@ func (local *local) writeAndIngestByRange( } iter := engine.newKVIter(ctxt, ito) + //nolint: errcheck defer iter.Close() // Needs seek to first because NewIter returns an iterator that is unpositioned hasKey := iter.First() @@ -1504,7 +1506,6 @@ func (local *local) ResolveDuplicateRows(ctx context.Context, tbl table.Table, t logger.Warn("[resolve-dupe] skipping resolution due to selected algorithm. this table will become inconsistent!", zap.Stringer("algorithm", algorithm)) return nil case config.DupeResAlgRemove: - break default: panic(fmt.Sprintf("[resolve-dupe] unknown resolution algorithm %v", algorithm)) } diff --git a/br/pkg/lightning/backend/local/localhelper.go b/br/pkg/lightning/backend/local/localhelper.go index 1672b5f212436..ae736eaaabd37 100644 --- a/br/pkg/lightning/backend/local/localhelper.go +++ b/br/pkg/lightning/backend/local/localhelper.go @@ -39,6 +39,7 @@ import ( "github.com/pingcap/tidb/util/mathutil" "go.uber.org/multierr" "go.uber.org/zap" + "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" "golang.org/x/time/rate" ) @@ -220,8 +221,8 @@ func (local *local) SplitAndScatterRegionByRanges( var err1 error region := sp.region keys := sp.keys - sort.Slice(keys, func(i, j int) bool { - return bytes.Compare(keys[i], keys[j]) < 0 + slices.SortFunc(keys, func(i, j []byte) bool { + return bytes.Compare(i, j) < 0 }) splitRegion := region startIdx := 0 @@ -264,8 +265,8 @@ func (local *local) SplitAndScatterRegionByRanges( log.FromContext(ctx).Info("batch split region", zap.Uint64("region_id", splitRegion.Region.Id), zap.Int("keys", endIdx-startIdx), zap.Binary("firstKey", keys[startIdx]), zap.Binary("end", keys[endIdx-1])) - sort.Slice(newRegions, func(i, j int) bool { - return bytes.Compare(newRegions[i].Region.StartKey, newRegions[j].Region.StartKey) < 0 + slices.SortFunc(newRegions, func(i, j *split.RegionInfo) bool { + return bytes.Compare(i.Region.StartKey, j.Region.StartKey) < 0 }) syncLock.Lock() scatterRegions = append(scatterRegions, newRegions...) @@ -319,8 +320,8 @@ func (local *local) SplitAndScatterRegionByRanges( if len(retryKeys) == 0 { break } else { - sort.Slice(retryKeys, func(i, j int) bool { - return bytes.Compare(retryKeys[i], retryKeys[j]) < 0 + slices.SortFunc(retryKeys, func(i, j []byte) bool { + return bytes.Compare(i, j) < 0 }) minKey = codec.EncodeBytes([]byte{}, retryKeys[0]) maxKey = codec.EncodeBytes([]byte{}, nextKey(retryKeys[len(retryKeys)-1])) @@ -359,7 +360,7 @@ func fetchTableRegionSizeStats(ctx context.Context, db *sql.DB, tableID int64) ( if err != nil { return errors.Trace(err) } - + //nolint: errcheck defer rows.Close() var ( regionID uint64 diff --git a/br/pkg/lightning/checkpoints/BUILD.bazel b/br/pkg/lightning/checkpoints/BUILD.bazel index 241ec95201e76..35d6b216b7d49 100644 --- a/br/pkg/lightning/checkpoints/BUILD.bazel +++ b/br/pkg/lightning/checkpoints/BUILD.bazel @@ -26,6 +26,7 @@ go_library( "//util/sqlexec", "@com_github_joho_sqltocsv//:sqltocsv", "@com_github_pingcap_errors//:errors", + "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", ], ) diff --git a/br/pkg/lightning/checkpoints/checkpoints.go b/br/pkg/lightning/checkpoints/checkpoints.go index 30ab72b0298f0..5ac74aeac86d2 100644 --- a/br/pkg/lightning/checkpoints/checkpoints.go +++ b/br/pkg/lightning/checkpoints/checkpoints.go @@ -38,6 +38,7 @@ import ( "github.com/pingcap/tidb/br/pkg/version/build" "github.com/pingcap/tidb/util/mathutil" "go.uber.org/zap" + "golang.org/x/exp/slices" ) type CheckpointStatus uint8 @@ -522,7 +523,7 @@ func OpenCheckpointsDB(ctx context.Context, cfg *config.Config) (DB, error) { } cpdb, err := NewMySQLCheckpointsDB(ctx, db, cfg.Checkpoint.Schema) if err != nil { - db.Close() + _ = db.Close() return nil, errors.Trace(err) } return cpdb, nil @@ -549,12 +550,14 @@ func IsCheckpointsDBExists(ctx context.Context, cfg *config.Config) (bool, error if err != nil { return false, errors.Trace(err) } + //nolint: errcheck defer db.Close() checkSQL := "SHOW DATABASES WHERE `DATABASE` = ?" rows, err := db.QueryContext(ctx, checkSQL, cfg.Checkpoint.Schema) if err != nil { return false, errors.Trace(err) } + //nolint: errcheck defer rows.Close() result := rows.Next() if err := rows.Err(); err != nil { @@ -664,6 +667,7 @@ func (cpdb *MySQLCheckpointsDB) Initialize(ctx context.Context, cfg *config.Conf if err != nil { return errors.Trace(err) } + //nolint: errcheck defer taskStmt.Close() _, err = taskStmt.ExecContext(ctx, cfg.TaskID, cfg.Mydumper.SourceDir, cfg.TikvImporter.Backend, cfg.TikvImporter.Addr, cfg.TiDB.Host, cfg.TiDB.Port, cfg.TiDB.PdAddr, cfg.TikvImporter.SortedKVDir, @@ -682,6 +686,7 @@ func (cpdb *MySQLCheckpointsDB) Initialize(ctx context.Context, cfg *config.Conf if err != nil { return errors.Trace(err) } + //nolint: errcheck defer stmt.Close() for _, db := range dbInfo { @@ -745,6 +750,7 @@ func (cpdb *MySQLCheckpointsDB) Get(ctx context.Context, tableName string) (*Tab if err != nil { return errors.Trace(err) } + //nolint: errcheck defer engineRows.Close() for engineRows.Next() { var ( @@ -769,6 +775,7 @@ func (cpdb *MySQLCheckpointsDB) Get(ctx context.Context, tableName string) (*Tab if err != nil { return errors.Trace(err) } + //nolint: errcheck defer chunkRows.Close() for chunkRows.Next() { var ( @@ -831,12 +838,14 @@ func (cpdb *MySQLCheckpointsDB) InsertEngineCheckpoints(ctx context.Context, tab if err != nil { return errors.Trace(err) } + //nolint: errcheck defer engineStmt.Close() chunkStmt, err := tx.PrepareContext(c, fmt.Sprintf(ReplaceChunkTemplate, cpdb.schema, CheckpointTableNameChunk)) if err != nil { return errors.Trace(err) } + //nolint: errcheck defer chunkStmt.Close() for engineID, engine := range checkpoints { @@ -883,26 +892,31 @@ func (cpdb *MySQLCheckpointsDB) Update(taskCtx context.Context, checkpointDiffs if e != nil { return errors.Trace(e) } + //nolint: errcheck defer chunkStmt.Close() rebaseStmt, e := tx.PrepareContext(c, rebaseQuery) if e != nil { return errors.Trace(e) } + //nolint: errcheck defer rebaseStmt.Close() tableStatusStmt, e := tx.PrepareContext(c, tableStatusQuery) if e != nil { return errors.Trace(e) } + //nolint: errcheck defer tableStatusStmt.Close() tableChecksumStmt, e := tx.PrepareContext(c, tableChecksumQuery) if e != nil { return errors.Trace(e) } + //nolint: errcheck defer tableChecksumStmt.Close() engineStatusStmt, e := tx.PrepareContext(c, engineStatusQuery) if e != nil { return errors.Trace(e) } + //nolint: errcheck defer engineStatusStmt.Close() for tableName, cpd := range checkpointDiffs { if cpd.hasStatus { @@ -1204,8 +1218,8 @@ func (cpdb *FileCheckpointsDB) Get(_ context.Context, tableName string) (*TableC }) } - sort.Slice(engine.Chunks, func(i, j int) bool { - return engine.Chunks[i].Key.less(&engine.Chunks[j].Key) + slices.SortFunc(engine.Chunks, func(i, j *ChunkCheckpoint) bool { + return i.Key.less(&j.Key) }) cp.Engines[engineID] = engine @@ -1408,6 +1422,7 @@ func (cpdb *MySQLCheckpointsDB) GetLocalStoringTables(ctx context.Context) (map[ if err != nil { return errors.Trace(err) } + //nolint: errcheck defer rows.Close() for rows.Next() { var ( @@ -1519,6 +1534,7 @@ func (cpdb *MySQLCheckpointsDB) DestroyErrorCheckpoint(ctx context.Context, tabl if e != nil { return errors.Trace(e) } + //nolint: errcheck defer rows.Close() for rows.Next() { var dtc DestroyedTableCheckpoint @@ -1566,6 +1582,7 @@ func (cpdb *MySQLCheckpointsDB) DumpTables(ctx context.Context, writer io.Writer if err != nil { return errors.Trace(err) } + //nolint: errcheck defer rows.Close() return errors.Trace(sqltocsv.Write(writer, rows)) @@ -1585,6 +1602,7 @@ func (cpdb *MySQLCheckpointsDB) DumpEngines(ctx context.Context, writer io.Write if err != nil { return errors.Trace(err) } + //nolint: errcheck defer rows.Close() return errors.Trace(sqltocsv.Write(writer, rows)) @@ -1616,6 +1634,7 @@ func (cpdb *MySQLCheckpointsDB) DumpChunks(ctx context.Context, writer io.Writer if err != nil { return errors.Trace(err) } + //nolint: errcheck defer rows.Close() return errors.Trace(sqltocsv.Write(writer, rows)) diff --git a/br/pkg/lightning/checkpoints/glue_checkpoint.go b/br/pkg/lightning/checkpoints/glue_checkpoint.go index b0f5278c7e89a..dcb14fcf1d6cc 100644 --- a/br/pkg/lightning/checkpoints/glue_checkpoint.go +++ b/br/pkg/lightning/checkpoints/glue_checkpoint.go @@ -192,6 +192,7 @@ func (g GlueCheckpointsDB) TaskCheckpoint(ctx context.Context) (*TaskCheckpoint, return errors.Trace(err) } r := rs[0] + //nolint: errcheck defer r.Close() req := r.NewChunk(nil) err = r.Next(ctx, req) @@ -247,7 +248,7 @@ func (g GlueCheckpointsDB) Get(ctx context.Context, tableName string) (*TableChe for { err = r.Next(ctx, req) if err != nil { - r.Close() + _ = r.Close() return err } if req.NumRows() == 0 { @@ -262,7 +263,7 @@ func (g GlueCheckpointsDB) Get(ctx context.Context, tableName string) (*TableChe } } } - r.Close() + _ = r.Close() // 2. Populate the chunks. sql = fmt.Sprintf(ReadChunkTemplate, g.schema, CheckpointTableNameChunk) @@ -277,7 +278,7 @@ func (g GlueCheckpointsDB) Get(ctx context.Context, tableName string) (*TableChe for { err = r.Next(ctx, req) if err != nil { - r.Close() + _ = r.Close() return err } if req.NumRows() == 0 { @@ -306,13 +307,13 @@ func (g GlueCheckpointsDB) Get(ctx context.Context, tableName string) (*TableChe value.FileMeta.Path = value.Key.Path value.Checksum = verify.MakeKVChecksum(kvcBytes, kvcKVs, kvcChecksum) if err := json.Unmarshal(colPerm, &value.ColumnPermutation); err != nil { - r.Close() + _ = r.Close() return errors.Trace(err) } cp.Engines[engineID].Chunks = append(cp.Engines[engineID].Chunks, value) } } - r.Close() + _ = r.Close() // 3. Fill in the remaining table info sql = fmt.Sprintf(ReadTableRemainTemplate, g.schema, CheckpointTableNameTable) @@ -322,6 +323,7 @@ func (g GlueCheckpointsDB) Get(ctx context.Context, tableName string) (*TableChe return errors.Trace(err) } r = rs[0] + //nolint: errcheck defer r.Close() req = r.NewChunk(nil) err = r.Next(ctx, req) @@ -713,7 +715,7 @@ func (g GlueCheckpointsDB) DestroyErrorCheckpoint(ctx context.Context, tableName for { err = r.Next(ctx, req) if err != nil { - r.Close() + _ = r.Close() return err } if req.NumRows() == 0 { @@ -728,7 +730,7 @@ func (g GlueCheckpointsDB) DestroyErrorCheckpoint(ctx context.Context, tableName targetTables = append(targetTables, dtc) } } - r.Close() + _ = r.Close() if _, e := s.Execute(c, deleteChunkQuery); e != nil { return errors.Trace(e) @@ -791,7 +793,7 @@ func drainFirstRecordSet(ctx context.Context, rss []sqlexec.RecordSet) ([]chunk. for { err := rs.Next(ctx, req) if err != nil || req.NumRows() == 0 { - rs.Close() + _ = rs.Close() return rows, err } iter := chunk.NewIterator4Chunk(req) diff --git a/br/pkg/lightning/common/util.go b/br/pkg/lightning/common/util.go index 40b7160ed5e30..67a26fb3ab411 100644 --- a/br/pkg/lightning/common/util.go +++ b/br/pkg/lightning/common/util.go @@ -21,9 +21,11 @@ import ( "encoding/json" "fmt" "io" + "net" "net/http" "net/url" "os" + "strconv" "strings" "syscall" "time" @@ -57,8 +59,9 @@ type MySQLConnectParam struct { } func (param *MySQLConnectParam) ToDSN() string { - dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/?charset=utf8mb4&sql_mode='%s'&maxAllowedPacket=%d&tls=%s", - param.User, param.Password, param.Host, param.Port, + hostPort := net.JoinHostPort(param.Host, strconv.Itoa(param.Port)) + dsn := fmt.Sprintf("%s:%s@tcp(%s)/?charset=utf8mb4&sql_mode='%s'&maxAllowedPacket=%d&tls=%s", + param.User, param.Password, hostPort, param.SQLMode, param.MaxAllowedPacket, param.TLS) for k, v := range param.Vars { diff --git a/br/pkg/lightning/common/util_test.go b/br/pkg/lightning/common/util_test.go index cb13a10db9d5d..c7c95b44f69bf 100644 --- a/br/pkg/lightning/common/util_test.go +++ b/br/pkg/lightning/common/util_test.go @@ -99,6 +99,9 @@ func TestToDSN(t *testing.T) { }, } require.Equal(t, "root:123456@tcp(127.0.0.1:4000)/?charset=utf8mb4&sql_mode='strict'&maxAllowedPacket=1234&tls=cluster&tidb_distsql_scan_concurrency='1'", param.ToDSN()) + + param.Host = "::1" + require.Equal(t, "root:123456@tcp([::1]:4000)/?charset=utf8mb4&sql_mode='strict'&maxAllowedPacket=1234&tls=cluster&tidb_distsql_scan_concurrency='1'", param.ToDSN()) } type mockDriver struct { diff --git a/br/pkg/lightning/config/bytesize_test.go b/br/pkg/lightning/config/bytesize_test.go index 56637486275a4..0c05d9aa7a9a7 100644 --- a/br/pkg/lightning/config/bytesize_test.go +++ b/br/pkg/lightning/config/bytesize_test.go @@ -89,11 +89,11 @@ func TestByteSizeTOMLDecode(t *testing.T) { }, { input: "x = ['100000']", - err: "toml: cannot load TOML value.*", + err: "toml: incompatible types:.*", }, { input: "x = { size = '100000' }", - err: "toml: cannot load TOML value.*", + err: "toml: incompatible types:.*", }, } diff --git a/br/pkg/lightning/config/config.go b/br/pkg/lightning/config/config.go index b0ffe32fa3cd5..0066895568550 100644 --- a/br/pkg/lightning/config/config.go +++ b/br/pkg/lightning/config/config.go @@ -1156,7 +1156,7 @@ func (cfg *Config) CheckAndAdjustSecurity() error { return common.ErrInvalidConfig.GenWithStack("cannot set `tidb.tls` to 'cluster' without a [security] section") } case "false", "skip-verify", "preferred": - break + return nil default: return common.ErrInvalidConfig.GenWithStack("unsupported `tidb.tls` config %s", cfg.TiDB.TLS) } diff --git a/br/pkg/lightning/config/config_test.go b/br/pkg/lightning/config/config_test.go index 555cbed109f1d..845b325e7fcfd 100644 --- a/br/pkg/lightning/config/config_test.go +++ b/br/pkg/lightning/config/config_test.go @@ -517,7 +517,7 @@ func TestInvalidTOML(t *testing.T) { delimiter = '\' backslash-escape = true `)) - require.EqualError(t, err, "Near line 2 (last key parsed ''): expected '.' or '=', but got '[' instead") + require.EqualError(t, err, "toml: line 2: expected '.' or '=', but got '[' instead") } func TestTOMLUnusedKeys(t *testing.T) { @@ -674,7 +674,7 @@ func TestLoadFromInvalidConfig(t *testing.T) { ConfigFileContent: []byte("invalid toml"), }) require.Error(t, err) - require.Regexp(t, "Near line 1.*", err.Error()) + require.Regexp(t, "line 1.*", err.Error()) } func TestTomlPostRestore(t *testing.T) { diff --git a/br/pkg/lightning/errormanager/errormanager.go b/br/pkg/lightning/errormanager/errormanager.go index b900d19c3a431..43035716d729c 100644 --- a/br/pkg/lightning/errormanager/errormanager.go +++ b/br/pkg/lightning/errormanager/errormanager.go @@ -25,15 +25,14 @@ import ( "github.com/jedib0t/go-pretty/v6/table" "github.com/jedib0t/go-pretty/v6/text" "github.com/pingcap/errors" - "go.uber.org/multierr" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" - "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/br/pkg/lightning/config" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/redact" "github.com/pingcap/tidb/br/pkg/utils" + "go.uber.org/multierr" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" ) const ( diff --git a/br/pkg/lightning/errormanager/errormanager_test.go b/br/pkg/lightning/errormanager/errormanager_test.go index 38f81b51f0299..88808e35628b8 100644 --- a/br/pkg/lightning/errormanager/errormanager_test.go +++ b/br/pkg/lightning/errormanager/errormanager_test.go @@ -25,12 +25,11 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + "github.com/pingcap/tidb/br/pkg/lightning/config" "github.com/pingcap/tidb/br/pkg/lightning/log" + "github.com/pingcap/tidb/br/pkg/utils" "github.com/stretchr/testify/require" "go.uber.org/atomic" - - "github.com/pingcap/tidb/br/pkg/lightning/config" - "github.com/pingcap/tidb/br/pkg/utils" ) func TestInit(t *testing.T) { diff --git a/br/pkg/lightning/glue/glue.go b/br/pkg/lightning/glue/glue.go index 9cbe37a8f9230..f7f95eee888f8 100644 --- a/br/pkg/lightning/glue/glue.go +++ b/br/pkg/lightning/glue/glue.go @@ -59,7 +59,7 @@ type sqlConnSession struct { } func (session *sqlConnSession) Close() { - session.conn.Close() + _ = session.conn.Close() } func (session *sqlConnSession) Execute(ctx context.Context, sql string) ([]sqlexec.RecordSet, error) { diff --git a/br/pkg/lightning/lightning.go b/br/pkg/lightning/lightning.go index 6e0f63f4df463..0b48b5b4e3e81 100644 --- a/br/pkg/lightning/lightning.go +++ b/br/pkg/lightning/lightning.go @@ -28,7 +28,6 @@ import ( "net/http" "net/http/pprof" "os" - "sort" "strconv" "strings" "sync" @@ -59,6 +58,7 @@ import ( "github.com/shurcooL/httpgzip" "go.uber.org/zap" "go.uber.org/zap/zapcore" + "golang.org/x/exp/slices" ) type Lightning struct { @@ -870,8 +870,8 @@ func checkSystemRequirement(cfg *config.Config, dbsMeta []*mydump.MDDatabaseMeta tableTotalSizes = append(tableTotalSizes, tb.TotalSize) } } - sort.Slice(tableTotalSizes, func(i, j int) bool { - return tableTotalSizes[i] > tableTotalSizes[j] + slices.SortFunc(tableTotalSizes, func(i, j int64) bool { + return i > j }) topNTotalSize := int64(0) for i := 0; i < len(tableTotalSizes) && i < cfg.App.TableConcurrency; i++ { @@ -912,6 +912,7 @@ func CheckpointRemove(ctx context.Context, cfg *config.Config, tableName string) if err != nil { return errors.Trace(err) } + //nolint: errcheck defer cpdb.Close() // try to remove the metadata first. diff --git a/br/pkg/lightning/mydump/BUILD.bazel b/br/pkg/lightning/mydump/BUILD.bazel index 24d3545571ac2..1736f87af679f 100644 --- a/br/pkg/lightning/mydump/BUILD.bazel +++ b/br/pkg/lightning/mydump/BUILD.bazel @@ -31,7 +31,6 @@ go_library( "//util/slice", "//util/table-filter", "@com_github_pingcap_errors//:errors", - "@com_github_pingcap_failpoint//:failpoint", "@com_github_xitongsys_parquet_go//parquet", "@com_github_xitongsys_parquet_go//reader", "@com_github_xitongsys_parquet_go//source", diff --git a/br/pkg/lightning/mydump/charset_convertor.go b/br/pkg/lightning/mydump/charset_convertor.go index 53a77816bb5e3..fa5f1ee5ef540 100644 --- a/br/pkg/lightning/mydump/charset_convertor.go +++ b/br/pkg/lightning/mydump/charset_convertor.go @@ -19,10 +19,9 @@ import ( "unicode/utf8" "github.com/pingcap/errors" + "github.com/pingcap/tidb/br/pkg/lightning/config" "golang.org/x/text/encoding" "golang.org/x/text/encoding/simplifiedchinese" - - "github.com/pingcap/tidb/br/pkg/lightning/config" ) // CharsetConvertor is used to convert a character set to utf8mb4 encoding. diff --git a/br/pkg/lightning/mydump/loader.go b/br/pkg/lightning/mydump/loader.go index 30f4f14c1464c..40091c61b2d03 100644 --- a/br/pkg/lightning/mydump/loader.go +++ b/br/pkg/lightning/mydump/loader.go @@ -39,21 +39,19 @@ type MDDatabaseMeta struct { } func (m *MDDatabaseMeta) GetSchema(ctx context.Context, store storage.ExternalStorage) string { - schema, err := ExportStatement(ctx, store, m.SchemaFile, m.charSet) - if err != nil { - log.FromContext(ctx).Warn("failed to extract table schema", - zap.String("Path", m.SchemaFile.FileMeta.Path), - log.ShortError(err), - ) - schema = nil - } - schemaStr := strings.TrimSpace(string(schema)) - // set default if schema sql is empty - if len(schemaStr) == 0 { - schemaStr = "CREATE DATABASE IF NOT EXISTS " + common.EscapeIdentifier(m.Name) + if m.SchemaFile.FileMeta.Path != "" { + schema, err := ExportStatement(ctx, store, m.SchemaFile, m.charSet) + if err != nil { + log.FromContext(ctx).Warn("failed to extract table schema", + zap.String("Path", m.SchemaFile.FileMeta.Path), + log.ShortError(err), + ) + } else if schemaStr := strings.TrimSpace(string(schema)); schemaStr != "" { + return schemaStr + } } - - return schemaStr + // set default if schema sql is empty or failed to extract. + return "CREATE DATABASE IF NOT EXISTS " + common.EscapeIdentifier(m.Name) } type MDTableMeta struct { diff --git a/br/pkg/lightning/mydump/loader_test.go b/br/pkg/lightning/mydump/loader_test.go index 0f7079a1f95df..e256a685f20ee 100644 --- a/br/pkg/lightning/mydump/loader_test.go +++ b/br/pkg/lightning/mydump/loader_test.go @@ -21,6 +21,7 @@ import ( "testing" "github.com/pingcap/tidb/br/pkg/lightning/config" + "github.com/pingcap/tidb/br/pkg/lightning/log" md "github.com/pingcap/tidb/br/pkg/lightning/mydump" "github.com/pingcap/tidb/br/pkg/storage" filter "github.com/pingcap/tidb/util/table-filter" @@ -181,13 +182,16 @@ func TestTableInfoNotFound(t *testing.T) { loader, err := md.NewMyDumpLoader(ctx, s.cfg) require.NoError(t, err) for _, dbMeta := range loader.GetDatabases() { - dbSQL := dbMeta.GetSchema(ctx, store) + logger, buffer := log.MakeTestLogger() + logCtx := log.NewContext(ctx, logger) + dbSQL := dbMeta.GetSchema(logCtx, store) require.Equal(t, "CREATE DATABASE IF NOT EXISTS `db`", dbSQL) for _, tblMeta := range dbMeta.Tables { - sql, err := tblMeta.GetSchema(ctx, store) + sql, err := tblMeta.GetSchema(logCtx, store) require.Equal(t, "", sql) require.NoError(t, err) } + require.NotContains(t, buffer.Stripped(), "failed to extract table schema") } } diff --git a/br/pkg/lightning/mydump/region.go b/br/pkg/lightning/mydump/region.go index 04cc75e5567ae..b4f2537fb2507 100644 --- a/br/pkg/lightning/mydump/region.go +++ b/br/pkg/lightning/mydump/region.go @@ -22,7 +22,6 @@ import ( "time" "github.com/pingcap/errors" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/br/pkg/lightning/config" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/worker" @@ -272,12 +271,6 @@ func makeSourceFileRegion( if !isCsvFile { divisor += 2 } - sizePerRow, err := GetSampledAvgRowSize(&fi, cfg, ioWorkers, store) - if err == nil && sizePerRow != 0 { - log.FromContext(ctx).Warn("fail to sample file", zap.String("path", fi.FileMeta.Path), zap.Error(err)) - divisor = sizePerRow - } - log.FromContext(ctx).Debug("avg row size", zap.String("path", fi.FileMeta.Path), zap.Int64("size per row", sizePerRow)) // If a csv file is overlarge, we need to split it into multiple regions. // Note: We can only split a csv file whose format is strict. // We increase the check threshold by 1/10 of the `max-region-size` because the source file size dumped by tools @@ -299,10 +292,6 @@ func makeSourceFileRegion( RowIDMax: fi.FileMeta.FileSize / divisor, }, } - failpoint.Inject("MockInaccurateRowID", func() { - // only allocates 5 rows but contains 10 rows - tableRegion.Chunk.RowIDMax = 5 - }) if tableRegion.Size() > tableRegionSizeWarningThreshold { log.FromContext(ctx).Warn( @@ -313,55 +302,6 @@ func makeSourceFileRegion( return []*TableRegion{tableRegion}, []float64{float64(fi.FileMeta.FileSize)}, nil } -func GetSampledAvgRowSize( - fileInfo *FileInfo, - cfg *config.Config, - ioWorkers *worker.Pool, - store storage.ExternalStorage, -) (int64, error) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - reader, err := store.Open(ctx, fileInfo.FileMeta.Path) - if err != nil { - return 0, err - } - var parser Parser - switch fileInfo.FileMeta.Type { - case SourceTypeCSV: - hasHeader := cfg.Mydumper.CSV.Header - charsetConvertor, err := NewCharsetConvertor(cfg.Mydumper.DataCharacterSet, cfg.Mydumper.DataInvalidCharReplace) - if err != nil { - return 0, err - } - parser, err = NewCSVParser(ctx, &cfg.Mydumper.CSV, reader, int64(cfg.Mydumper.ReadBlockSize), ioWorkers, hasHeader, charsetConvertor) - if err != nil { - return 0, err - } - case SourceTypeSQL: - parser = NewChunkParser(ctx, cfg.TiDB.SQLMode, reader, int64(cfg.Mydumper.ReadBlockSize), ioWorkers) - default: - return 0, errors.Errorf("source file %s is none of csv, sql, or parquet file", fileInfo.FileMeta.Path) - } - totalBytes := 0 - totalRows := 0 - defaultSampleRows := 10 // todo: may be configurable - for i := 0; i < defaultSampleRows; i++ { - err = parser.ReadRow() - if err != nil && errors.Cause(err) == io.EOF { - break - } else if err != nil { - return 0, err - } - totalBytes += parser.LastRow().Length - totalRows++ - } - if totalRows > 0 { - return int64(totalBytes) / int64(totalRows), nil - } else { - return 0, nil - } -} - // because parquet files can't seek efficiently, there is no benefit in split. // parquet file are column orient, so the offset is read line number func makeParquetFileRegion( @@ -441,9 +381,6 @@ func SplitLargeFile( } for { curRowsCnt := (endOffset - startOffset) / divisor - if curRowsCnt == 0 && endOffset != startOffset { - curRowsCnt = 1 - } rowIDMax := prevRowIdxMax + curRowsCnt if endOffset != dataFile.FileMeta.FileSize { r, err := store.Open(ctx, dataFile.FileMeta.Path) diff --git a/br/pkg/lightning/mydump/region_test.go b/br/pkg/lightning/mydump/region_test.go index 37ba4e4028e39..a1dbb9f290a69 100644 --- a/br/pkg/lightning/mydump/region_test.go +++ b/br/pkg/lightning/mydump/region_test.go @@ -40,8 +40,6 @@ import ( */ func TestTableRegion(t *testing.T) { cfg := newConfigWithSourceDir("./examples") - // specify ReadBlockSize because we need to sample files - cfg.Mydumper.ReadBlockSize = config.ReadBlockSize loader, _ := NewMyDumpLoader(context.Background(), cfg) dbMeta := loader.GetDatabases()[0] @@ -384,90 +382,3 @@ func TestSplitLargeFileOnlyOneChunk(t *testing.T) { require.Equal(t, columns, regions[i].Chunk.Columns) } } - -func TestSampleAndGetAvgRowSize(t *testing.T) { - // It's more difficult to estimate sizes of SQL files than csv files, - // because when reading the first row of them, parser may read other info (e.g. table name) - // so that make it hard to get good estimate, especially when files have few rows. - sqlFiles := []string{ - // 1. long table name, values: - // 1.1 short and even len - "INSERT INTO `test_db_mock_long.test_table_very_long_name` VALUES (1),(2);", - // 1.2 short and not even - "INSERT INTO `test_db_mock_long.test_table_very_long_name` VALUES (123452123,1234123125),(2,1);", - "INSERT INTO `test_db_mock_long.test_table_very_long_name` VALUES (2,1),(123452123,1234123125);", - // 1.3 long and even - "INSERT INTO `test_db_mock_long.test_table_very_long_name` VALUES (123452123,1234123125),(1234123125,12341231251);", - // 1.4 long but not even - "INSERT INTO `test_db_mock_long.test_table_very_long_name` VALUES ('abcdefghidgjla','lkjadsfasfdkjl'),('1111111','1');", - // 2. short table name, values: - // 2.1 short and even len - "INSERT INTO `a` VALUES (1),(2);", - // 2.2 short and not even - "INSERT INTO `a` VALUES (123452123,1234123125),(2,1);", - "INSERT INTO `a` VALUES (2,1),(123452123,1234123125);", - // 2.3 long and even - "INSERT INTO `a` VALUES (123452123,1234123125),(1234123125,12341231251);", - // 2.4 long but not even - "INSERT INTO `a` VALUES ('abcdefghidgjla','lkjadsfasfdkjl'),('1111111','1');", - } - - csvFiles := []string{ - // even and short - "a,b,c\r\n1,2,3\r\n4,5,6\r\n", - // not even but short - "a,b,c\r\n1112,1234,1923\r\n1,2,3", - // even and long - "a,b,c\r\n14712312,123122,1231233\r\n4456364,34525,423426\r\n", - // not even but long - "a,b,c\r\nsadlk;fja;lskdfj;alksdfj,sdlk;fjaksld;fja;l,qpoiwuepqou\r\n0,0,0\r\n", - } - testFunc := func(files []string, fileType SourceType) { - for _, file := range files { - dir := t.TempDir() - - var fileName string - if fileType == SourceTypeCSV { - fileName = "test.csv" - } else { - fileName = "test.sql" - } - filePath := filepath.Join(dir, fileName) - - content := []byte(file) - err := os.WriteFile(filePath, content, 0o644) - require.Nil(t, err) - dataFileInfo, err := os.Stat(filePath) - require.Nil(t, err) - fileSize := dataFileInfo.Size() - - cfg := newConfigWithSourceDir(dir) - loader, _ := NewMyDumpLoader(context.Background(), cfg) - ioWorkers := worker.NewPool(context.Background(), 1, "io") - - // specify ReadBlockSize because we need to sample files - cfg.Mydumper.ReadBlockSize = config.ReadBlockSize - fileInfo := FileInfo{ - FileMeta: SourceFileMeta{ - Path: fileName, - Type: fileType, - FileSize: fileSize, - }, - } - cfg.Mydumper.CSV = config.CSVConfig{ - Separator: ",", - Delimiter: `"`, - Header: true, - NotNull: false, - Null: `\N`, - BackslashEscape: true, - TrimLastSep: false, - } - size, err := GetSampledAvgRowSize(&fileInfo, cfg, ioWorkers, loader.GetStore()) - require.Nil(t, err) - require.GreaterOrEqual(t, fileSize/size, int64(2)) - } - } - testFunc(sqlFiles, SourceTypeSQL) - testFunc(csvFiles, SourceTypeCSV) -} diff --git a/br/pkg/lightning/mydump/router.go b/br/pkg/lightning/mydump/router.go index c3a6ff3aae161..98ecd4c63e08a 100644 --- a/br/pkg/lightning/mydump/router.go +++ b/br/pkg/lightning/mydump/router.go @@ -7,12 +7,11 @@ import ( "strings" "github.com/pingcap/errors" + "github.com/pingcap/tidb/br/pkg/lightning/config" + "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/util/filter" "github.com/pingcap/tidb/util/slice" "go.uber.org/zap" - - "github.com/pingcap/tidb/br/pkg/lightning/config" - "github.com/pingcap/tidb/br/pkg/lightning/log" ) type SourceType int diff --git a/br/pkg/lightning/restore/BUILD.bazel b/br/pkg/lightning/restore/BUILD.bazel index b8c674deee501..80befee3774fd 100644 --- a/br/pkg/lightning/restore/BUILD.bazel +++ b/br/pkg/lightning/restore/BUILD.bazel @@ -66,6 +66,7 @@ go_library( "@com_github_tikv_client_go_v2//oracle", "@com_github_tikv_pd_client//:client", "@org_golang_x_exp//maps", + "@org_golang_x_exp//slices", "@org_golang_x_sync//errgroup", "@org_uber_go_atomic//:atomic", "@org_uber_go_multierr//:multierr", diff --git a/br/pkg/lightning/restore/check_info.go b/br/pkg/lightning/restore/check_info.go index 442fae5a3e18b..2be105a157fac 100644 --- a/br/pkg/lightning/restore/check_info.go +++ b/br/pkg/lightning/restore/check_info.go @@ -22,7 +22,6 @@ import ( "io" "path/filepath" "reflect" - "sort" "strconv" "strings" "sync" @@ -50,6 +49,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/mathutil" "go.uber.org/zap" + "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" ) @@ -291,8 +291,8 @@ func (rc *Controller) checkRegionDistribution(ctx context.Context) error { if len(stores) <= 1 { return nil } - sort.Slice(stores, func(i, j int) bool { - return stores[i].Status.RegionCount < stores[j].Status.RegionCount + slices.SortFunc(stores, func(i, j *pdtypes.StoreInfo) bool { + return i.Status.RegionCount < j.Status.RegionCount }) minStore := stores[0] maxStore := stores[len(stores)-1] @@ -629,6 +629,7 @@ func (rc *Controller) readFirstRow(ctx context.Context, dataFileMeta mydump.Sour default: panic(fmt.Sprintf("unknown file type '%s'", dataFileMeta.Type)) } + //nolint: errcheck defer parser.Close() err = parser.ReadRow() @@ -995,6 +996,7 @@ func (rc *Controller) sampleDataFromTable( default: panic(fmt.Sprintf("file '%s' with unknown source type '%s'", sampleFile.Path, sampleFile.Type.String())) } + //nolint: errcheck defer parser.Close() logTask := log.FromContext(ctx).With(zap.String("table", tableMeta.Name)).Begin(zap.InfoLevel, "sample file") igCols, err := rc.cfg.Mydumper.IgnoreColumns.GetIgnoreColumns(dbName, tableMeta.Name, rc.cfg.Mydumper.CaseSensitive) @@ -1154,7 +1156,7 @@ loop: if len(tableNames) > 0 { // sort the failed names - sort.Strings(tableNames) + slices.Sort(tableNames) msg := fmt.Sprintf("table(s) [%s] are not empty", strings.Join(tableNames, ", ")) rc.checkTemplate.Collect(Critical, false, msg) } diff --git a/br/pkg/lightning/restore/chunk_restore_test.go b/br/pkg/lightning/restore/chunk_restore_test.go index 59d083d85561c..2a9a42434c77b 100644 --- a/br/pkg/lightning/restore/chunk_restore_test.go +++ b/br/pkg/lightning/restore/chunk_restore_test.go @@ -73,7 +73,7 @@ func (s *chunkRestoreSuite) SetupTest() { } var err error - s.cr, err = newChunkRestore(context.Background(), 1, s.cfg, &chunk, w, s.store, nil, nil) + s.cr, err = newChunkRestore(context.Background(), 1, s.cfg, &chunk, w, s.store, nil) require.NoError(s.T(), err) } diff --git a/br/pkg/lightning/restore/meta_manager.go b/br/pkg/lightning/restore/meta_manager.go index b94bde8208be6..4391f29b8c146 100644 --- a/br/pkg/lightning/restore/meta_manager.go +++ b/br/pkg/lightning/restore/meta_manager.go @@ -78,11 +78,6 @@ func (b *dbMetaMgrBuilder) TableMetaMgr(tr *TableRestore) tableMetaMgr { type tableMetaMgr interface { InitTableMeta(ctx context.Context) error AllocTableRowIDs(ctx context.Context, rawRowIDMax int64) (*verify.KVChecksum, int64, error) - // ReallocTableRowIDs reallocates the row IDs of a table. - // It returns new rowIDBase and maxRowID or any error it encounters. - // Note that noopTableMetaMgr has a noop implementation of this function. - // If maxRowID is 0, caller should maintain rowIDBase and maxRowID itself. - ReallocTableRowIDs(ctx context.Context, newRowIDCount int64) (int64, int64, error) UpdateTableStatus(ctx context.Context, status metaStatus) error UpdateTableBaseChecksum(ctx context.Context, checksum *verify.KVChecksum) error CheckAndUpdateLocalChecksum(ctx context.Context, checksum *verify.KVChecksum, hasLocalDupes bool) ( @@ -165,56 +160,12 @@ func parseMetaStatus(s string) (metaStatus, error) { } } -func (m *dbTableMetaMgr) ReallocTableRowIDs(ctx context.Context, newRowIDCount int64) (int64, int64, error) { - conn, err := m.session.Conn(ctx) - if err != nil { - return 0, 0, errors.Trace(err) - } - defer conn.Close() - exec := &common.SQLWithRetry{ - DB: m.session, - Logger: m.tr.logger, - } - err = exec.Exec(ctx, "enable pessimistic transaction", "SET SESSION tidb_txn_mode = 'pessimistic';") - if err != nil { - return 0, 0, errors.Annotate(err, "enable pessimistic transaction failed") - } - var ( - maxRowIDMax int64 - newRowIDMax int64 - ) - err = exec.Transact(ctx, "realloc table rowID", func(ctx context.Context, tx *sql.Tx) error { - row := tx.QueryRowContext( - ctx, - fmt.Sprintf("SELECT MAX(row_id_max) from %s WHERE table_id = ? FOR UPDATE", m.tableName), - m.tr.tableInfo.ID, - ) - if row.Err() != nil { - return errors.Trace(err) - } - if err := row.Scan(&maxRowIDMax); err != nil { - return errors.Trace(err) - } - newRowIDMax = maxRowIDMax + newRowIDCount - // nolint:gosec - query := fmt.Sprintf("UPDATE %s SET row_id_max = ? WHERE table_id = ? AND task_id = ?", m.tableName) - if _, err := tx.ExecContext(ctx, query, newRowIDMax, m.tr.tableInfo.ID, m.taskID); err != nil { - return err - } - return nil - }) - if err != nil { - return 0, 0, errors.Trace(err) - } - // newRowIDBase = maxRowIDMax + 1 - return maxRowIDMax + 1, newRowIDMax, nil -} - func (m *dbTableMetaMgr) AllocTableRowIDs(ctx context.Context, rawRowIDMax int64) (*verify.KVChecksum, int64, error) { conn, err := m.session.Conn(ctx) if err != nil { return nil, 0, errors.Trace(err) } + //nolint: errcheck defer conn.Close() exec := &common.SQLWithRetry{ DB: m.session, @@ -417,6 +368,7 @@ func (m *dbTableMetaMgr) CheckAndUpdateLocalChecksum(ctx context.Context, checks if err != nil { return false, false, nil, errors.Trace(err) } + //nolint: errcheck defer conn.Close() exec := &common.SQLWithRetry{ DB: m.session, @@ -688,6 +640,7 @@ func (m *dbTaskMetaMgr) CheckTasksExclusively(ctx context.Context, action func(t if err != nil { return errors.Trace(err) } + //nolint: errcheck defer conn.Close() exec := &common.SQLWithRetry{ DB: m.session, @@ -746,6 +699,7 @@ func (m *dbTaskMetaMgr) CheckAndPausePdSchedulers(ctx context.Context) (pdutil.U cancel() return nil, errors.Trace(err) } + //nolint: errcheck defer conn.Close() exec := &common.SQLWithRetry{ DB: m.session, @@ -881,6 +835,7 @@ func (m *dbTaskMetaMgr) CheckAndFinishRestore(ctx context.Context, finished bool if err != nil { return false, false, errors.Trace(err) } + //nolint: errcheck defer conn.Close() exec := &common.SQLWithRetry{ DB: m.session, @@ -1097,12 +1052,6 @@ func (m noopTableMetaMgr) InitTableMeta(ctx context.Context) error { return nil } -func (m noopTableMetaMgr) ReallocTableRowIDs(ctx context.Context, _ int64) (int64, int64, error) { - // we don't need to reconcile rowIDs across all the instances - // barring using parallel import - return 0, 0, nil -} - func (m noopTableMetaMgr) AllocTableRowIDs(ctx context.Context, rawRowIDMax int64) (*verify.KVChecksum, int64, error) { return nil, 0, nil } diff --git a/br/pkg/lightning/restore/meta_manager_test.go b/br/pkg/lightning/restore/meta_manager_test.go index 23102b56f07a6..8480bf077d6de 100644 --- a/br/pkg/lightning/restore/meta_manager_test.go +++ b/br/pkg/lightning/restore/meta_manager_test.go @@ -384,35 +384,3 @@ func TestSingleTaskMetaMgr(t *testing.T) { }) require.NoError(t, err) } - -func TestReallocTableRowIDs(t *testing.T) { - s, clean := newMetaMgrSuite(t) - defer clean() - - ctx := context.WithValue(context.Background(), &checksumManagerKey, s.checksumMgr) - - rows := [][]driver.Value{ - {int64(1), int64(998), int64(1008), uint64(0), uint64(0), uint64(0), metaStatusRowIDAllocated.String()}, - } - checksum := verification.MakeKVChecksum(2, 1, 3) - s.prepareMock(rows, nil, nil, &checksum, nil) - - ck, rowIDBase, err := s.mgr.AllocTableRowIDs(ctx, 10) - require.NoError(t, err) - require.Equal(t, int64(998), rowIDBase) - require.Equal(t, &checksum, ck) - require.Equal(t, 1, s.checksumMgr.callCnt) - s.mockDB.ExpectExec("SET SESSION tidb_txn_mode = 'pessimistic';"). - WillReturnResult(sqlmock.NewResult(int64(0), int64(0))) - - s.mockDB.ExpectBegin() - s.mockDB.ExpectQuery("\\QSELECT MAX(row_id_max) from `test`.`table_meta` WHERE table_id = ? FOR UPDATE\\E").WithArgs(int64(1)). - WillReturnRows(sqlmock.NewRows([]string{"row_id_max"}).AddRow(1008)) - s.mockDB.ExpectExec("\\QUPDATE `test`.`table_meta` SET row_id_max = ? WHERE table_id = ? AND task_id = ?\\E").WithArgs(int64(1018), int64(1), int64(1)). - WillReturnResult(sqlmock.NewResult(int64(0), int64(1))) - s.mockDB.ExpectCommit() - newBase, newMax, err := s.mgr.ReallocTableRowIDs(context.Background(), 10) - require.Nil(t, err) - require.Equal(t, int64(1009), newBase) - require.Equal(t, int64(1018), newMax) -} diff --git a/br/pkg/lightning/restore/restore.go b/br/pkg/lightning/restore/restore.go index 6246c27ef411b..9cff45ce84de9 100644 --- a/br/pkg/lightning/restore/restore.go +++ b/br/pkg/lightning/restore/restore.go @@ -916,7 +916,6 @@ func (rc *Controller) saveStatusCheckpoint(ctx context.Context, tableName string switch { case err == nil: - break case utils.MessageIsRetryableStorageError(err.Error()), common.IsContextCanceledError(err): // recoverable error, should not be recorded in checkpoint // which will prevent lightning from automatically recovering @@ -1611,9 +1610,6 @@ func (tr *TableRestore) restoreTable( rowIDMax = engine.Chunks[len(engine.Chunks)-1].Chunk.RowIDMax } } - if rowIDMax > tr.curMaxRowID { - tr.curMaxRowID = rowIDMax - } db, _ := rc.tidbGlue.GetDB() versionStr, err := version.FetchVersion(ctx, db) if err != nil { @@ -2056,16 +2052,9 @@ func (rc *Controller) DataCheck(ctx context.Context) error { } type chunkRestore struct { - parser mydump.Parser - index int - chunk *checkpoints.ChunkCheckpoint - originalRowIDMax int64 - curRowIDBase int64 - curRowIDMax int64 - tableRestore *TableRestore - - rowCount int - curAccmRowSize uint64 // has a maximum of 18446744.07370955 TB + parser mydump.Parser + index int + chunk *checkpoints.ChunkCheckpoint } func newChunkRestore( @@ -2076,7 +2065,6 @@ func newChunkRestore( ioWorkers *worker.Pool, store storage.ExternalStorage, tableInfo *checkpoints.TidbTableInfo, - tableRestore *TableRestore, ) (*chunkRestore, error) { blockBufSize := int64(cfg.Mydumper.ReadBlockSize) @@ -2123,16 +2111,14 @@ func newChunkRestore( } return &chunkRestore{ - parser: parser, - index: index, - chunk: chunk, - originalRowIDMax: chunk.Chunk.RowIDMax, - tableRestore: tableRestore, + parser: parser, + index: index, + chunk: chunk, }, nil } func (cr *chunkRestore) close() { - cr.parser.Close() + _ = cr.parser.Close() } func getColumnNames(tableInfo *model.TableInfo, permutation []int) []string { @@ -2180,52 +2166,13 @@ type deliverResult struct { err error } -func (cr *chunkRestore) adjustRowID(rowID int64, rc *Controller) (int64, error) { - if rowID <= cr.originalRowIDMax { - // no need to ajust - return rowID, nil - } - // need to adjust rowID - // rowID should be within [curRowIDBase, curRowIDMax] - if cr.curRowIDBase == 0 || cr.curRowIDBase > cr.curRowIDMax { - logger := cr.tableRestore.logger.With( - zap.String("tableName", cr.tableRestore.tableName), - zap.Int("fileIndex", cr.index), - zap.Stringer("path", &cr.chunk.Key), - zap.String("task", "re-allocate rowID"), - ) - logger.Info("start re-allocating") - // 1. curRowIDBase == 0 -> no previous re-allocation - // 2. curRowIDBase > curRowIDMax -> run out of allocated IDs - pos, _ := cr.parser.Pos() - leftFileSize := cr.chunk.Chunk.EndOffset - pos - avgRowSize := cr.curAccmRowSize / uint64(cr.rowCount) - newRowIDCount := leftFileSize/int64(avgRowSize) + 1 // plus the current row - newBase, newMax, err := cr.tableRestore.allocateRowIDs(newRowIDCount, rc) - if err != nil { - logger.Error("fail to re-allocate rowIDs", zap.Error(err)) - return 0, err - } - cr.curRowIDBase = newBase - cr.curRowIDMax = newMax - } - rowID = cr.curRowIDBase - cr.curRowIDBase++ - return rowID, nil -} - -func (cr *chunkRestore) updateRowStats(rowSize int) { - cr.curAccmRowSize += uint64(rowSize) - cr.rowCount++ -} - //nolint:nakedret // TODO: refactor func (cr *chunkRestore) deliverLoop( ctx context.Context, kvsCh <-chan []deliveredKVs, t *TableRestore, engineID int32, - dataWriter, indexWriter *backend.LocalEngineWriter, + dataEngine, indexEngine *backend.LocalEngineWriter, rc *Controller, ) (deliverTotalDur time.Duration, err error) { deliverLogger := t.logger.With( @@ -2249,6 +2196,7 @@ func (cr *chunkRestore) deliverLoop( startOffset := cr.chunk.Chunk.Offset currOffset := startOffset rowID := cr.chunk.Chunk.PrevRowIDMax + populate: for dataChecksum.SumSize()+indexChecksum.SumSize() < minDeliverBytes { select { @@ -2283,7 +2231,7 @@ func (cr *chunkRestore) deliverLoop( for !rc.diskQuotaLock.TryRLock() { // try to update chunk checkpoint, this can help save checkpoint after importing when disk-quota is triggered if !dataSynced { - dataSynced = cr.maybeSaveCheckpoint(rc, t, engineID, cr.chunk, dataWriter, indexWriter) + dataSynced = cr.maybeSaveCheckpoint(rc, t, engineID, cr.chunk, dataEngine, indexEngine) } time.Sleep(time.Millisecond) } @@ -2292,14 +2240,14 @@ func (cr *chunkRestore) deliverLoop( // Write KVs into the engine start := time.Now() - if err = dataWriter.WriteRows(ctx, columns, dataKVs); err != nil { + if err = dataEngine.WriteRows(ctx, columns, dataKVs); err != nil { if !common.IsContextCanceledError(err) { deliverLogger.Error("write to data engine failed", log.ShortError(err)) } return errors.Trace(err) } - if err = indexWriter.WriteRows(ctx, columns, indexKVs); err != nil { + if err = indexEngine.WriteRows(ctx, columns, indexKVs); err != nil { if !common.IsContextCanceledError(err) { deliverLogger.Error("write to index engine failed", log.ShortError(err)) } @@ -2341,7 +2289,7 @@ func (cr *chunkRestore) deliverLoop( if currOffset > lastOffset || dataChecksum.SumKVS() != 0 || indexChecksum.SumKVS() != 0 { // No need to save checkpoint if nothing was delivered. - dataSynced = cr.maybeSaveCheckpoint(rc, t, engineID, cr.chunk, dataWriter, indexWriter) + dataSynced = cr.maybeSaveCheckpoint(rc, t, engineID, cr.chunk, dataEngine, indexEngine) } failpoint.Inject("SlowDownWriteRows", func() { deliverLogger.Warn("Slowed down write rows") @@ -2512,11 +2460,6 @@ func (cr *chunkRestore) encodeLoop( encodeDurStart := time.Now() lastRow := cr.parser.LastRow() // sql -> kv - if lastRow.RowID, err = cr.adjustRowID(lastRow.RowID, rc); err != nil { - return - } - cr.updateRowStats(lastRow.Length) - rowID = lastRow.RowID kvs, encodeErr := kvEncoder.Encode(logger, lastRow.Row, lastRow.RowID, cr.chunk.ColumnPermutation, cr.chunk.Key.Path, curOffset) encodeDur += time.Since(encodeDurStart) @@ -2579,7 +2522,7 @@ func (cr *chunkRestore) restore( ctx context.Context, t *TableRestore, engineID int32, - dataWriter, indexWriter *backend.LocalEngineWriter, + dataEngine, indexEngine *backend.LocalEngineWriter, rc *Controller, ) error { // Create the encoder. @@ -2600,7 +2543,7 @@ func (cr *chunkRestore) restore( go func() { defer close(deliverCompleteCh) - dur, err := cr.deliverLoop(ctx, kvsCh, t, engineID, dataWriter, indexWriter, rc) + dur, err := cr.deliverLoop(ctx, kvsCh, t, engineID, dataEngine, indexEngine, rc) select { case <-ctx.Done(): case deliverCompleteCh <- deliverResult{dur, err}: diff --git a/br/pkg/lightning/restore/restore_schema_test.go b/br/pkg/lightning/restore/restore_schema_test.go index 7bce917eb828c..d7a585f0c10a3 100644 --- a/br/pkg/lightning/restore/restore_schema_test.go +++ b/br/pkg/lightning/restore/restore_schema_test.go @@ -23,10 +23,6 @@ import ( "github.com/DATA-DOG/go-sqlmock" "github.com/golang/mock/gomock" "github.com/pingcap/errors" - filter "github.com/pingcap/tidb/util/table-filter" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "github.com/pingcap/tidb/br/pkg/lightning/backend" "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" "github.com/pingcap/tidb/br/pkg/lightning/config" @@ -39,6 +35,9 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" tmock "github.com/pingcap/tidb/util/mock" + filter "github.com/pingcap/tidb/util/table-filter" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" ) type restoreSchemaSuite struct { diff --git a/br/pkg/lightning/restore/table_restore.go b/br/pkg/lightning/restore/table_restore.go index 491a59fa1c33b..31a83238c77fe 100644 --- a/br/pkg/lightning/restore/table_restore.go +++ b/br/pkg/lightning/restore/table_restore.go @@ -16,7 +16,6 @@ package restore import ( "context" - "sort" "strings" "sync" "time" @@ -43,6 +42,7 @@ import ( "github.com/pingcap/tidb/util/mathutil" "go.uber.org/multierr" "go.uber.org/zap" + "golang.org/x/exp/slices" ) type TableRestore struct { @@ -56,8 +56,6 @@ type TableRestore struct { logger log.Logger ignoreColumns map[string]struct{} - rowIDLock sync.Mutex - curMaxRowID int64 } func NewTableRestore( @@ -150,9 +148,6 @@ func (tr *TableRestore) RebaseChunkRowIDs(cp *checkpoints.TableCheckpoint, rowID for _, chunk := range engine.Chunks { chunk.Chunk.PrevRowIDMax += rowIDBase chunk.Chunk.RowIDMax += rowIDBase - if chunk.Chunk.RowIDMax > tr.curMaxRowID { - tr.curMaxRowID = chunk.Chunk.RowIDMax - } } } } @@ -290,7 +285,7 @@ func (tr *TableRestore) restoreEngines(pCtx context.Context, rc *Controller, cp for engineID, engine := range cp.Engines { allEngines = append(allEngines, engineCheckpoint{engineID: engineID, checkpoint: engine}) } - sort.Slice(allEngines, func(i, j int) bool { return allEngines[i].engineID < allEngines[j].engineID }) + slices.SortFunc(allEngines, func(i, j engineCheckpoint) bool { return i.engineID < j.engineID }) for _, ecp := range allEngines { engineID := ecp.engineID @@ -510,7 +505,7 @@ func (tr *TableRestore) restoreEngine( // 2. sql -> kvs // 3. load kvs data (into kv deliver server) // 4. flush kvs data (into tikv node) - cr, err := newChunkRestore(ctx, chunkIndex, rc.cfg, chunk, rc.ioWorkers, rc.store, tr.tableInfo, tr) + cr, err := newChunkRestore(ctx, chunkIndex, rc.cfg, chunk, rc.ioWorkers, rc.store, tr.tableInfo) if err != nil { setError(err) break @@ -1053,31 +1048,3 @@ func estimateCompactionThreshold(cp *checkpoints.TableCheckpoint, factor int64) return threshold } - -func (tr *TableRestore) allocateRowIDs(newRowCount int64, rc *Controller) (int64, int64, error) { - tr.rowIDLock.Lock() - defer tr.rowIDLock.Unlock() - metaMgr := rc.metaMgrBuilder.TableMetaMgr(tr) - // try to re-allocate from downstream - // if we are using parallel import, rowID should be reconciled globally. - // Otherwise, this function will simply return 0. - newRowIDBase, newRowIDMax, err := metaMgr.ReallocTableRowIDs(context.Background(), newRowCount) - if err != nil { - return 0, 0, err - } - // TODO: refinement: currently, when we're not using SSTMode + incremental, - // metadata of the table restore is not maintained globally. - // So we have to deviate this two disparate situations here and make - // code complexer. - var rowIDBase int64 - if newRowIDMax != 0 { - // re-alloc from downstream - rowIDBase = newRowIDBase - tr.curMaxRowID = newRowIDMax - } else { - // single import mode: re-allocate rowID from memory - rowIDBase = tr.curMaxRowID + 1 - tr.curMaxRowID += newRowCount - } - return rowIDBase, tr.curMaxRowID, nil -} diff --git a/br/pkg/lightning/restore/table_restore_test.go b/br/pkg/lightning/restore/table_restore_test.go index 87aa389c7167b..5a8799ad43002 100644 --- a/br/pkg/lightning/restore/table_restore_test.go +++ b/br/pkg/lightning/restore/table_restore_test.go @@ -198,7 +198,6 @@ func (s *tableRestoreSuite) TestPopulateChunks() { Engines: make(map[int32]*checkpoints.EngineCheckpoint), } - s.cfg.Mydumper.CSV.Header = false rc := &Controller{cfg: s.cfg, ioWorkers: worker.NewPool(context.Background(), 1, "io"), store: s.store} err := s.tr.populateChunks(context.Background(), rc, cp) require.NoError(s.T(), err) @@ -217,7 +216,7 @@ func (s *tableRestoreSuite) TestPopulateChunks() { Offset: 0, EndOffset: 37, PrevRowIDMax: 0, - RowIDMax: 1, + RowIDMax: 7, // 37 bytes with 3 columns can store at most 7 rows. }, Timestamp: 1234567897, }, @@ -227,8 +226,8 @@ func (s *tableRestoreSuite) TestPopulateChunks() { Chunk: mydump.Chunk{ Offset: 0, EndOffset: 37, - PrevRowIDMax: 1, - RowIDMax: 2, + PrevRowIDMax: 7, + RowIDMax: 14, }, Timestamp: 1234567897, }, @@ -238,8 +237,8 @@ func (s *tableRestoreSuite) TestPopulateChunks() { Chunk: mydump.Chunk{ Offset: 0, EndOffset: 37, - PrevRowIDMax: 2, - RowIDMax: 3, + PrevRowIDMax: 14, + RowIDMax: 21, }, Timestamp: 1234567897, }, @@ -254,8 +253,8 @@ func (s *tableRestoreSuite) TestPopulateChunks() { Chunk: mydump.Chunk{ Offset: 0, EndOffset: 37, - PrevRowIDMax: 3, - RowIDMax: 4, + PrevRowIDMax: 21, + RowIDMax: 28, }, Timestamp: 1234567897, }, @@ -265,8 +264,8 @@ func (s *tableRestoreSuite) TestPopulateChunks() { Chunk: mydump.Chunk{ Offset: 0, EndOffset: 37, - PrevRowIDMax: 4, - RowIDMax: 5, + PrevRowIDMax: 28, + RowIDMax: 35, }, Timestamp: 1234567897, }, @@ -276,8 +275,8 @@ func (s *tableRestoreSuite) TestPopulateChunks() { Chunk: mydump.Chunk{ Offset: 0, EndOffset: 37, - PrevRowIDMax: 5, - RowIDMax: 6, + PrevRowIDMax: 35, + RowIDMax: 42, }, Timestamp: 1234567897, }, @@ -292,8 +291,8 @@ func (s *tableRestoreSuite) TestPopulateChunks() { Chunk: mydump.Chunk{ Offset: 0, EndOffset: 14, - PrevRowIDMax: 6, - RowIDMax: 10, + PrevRowIDMax: 42, + RowIDMax: 46, }, Timestamp: 1234567897, }, @@ -473,7 +472,7 @@ func (s *tableRestoreSuite) TestPopulateChunksCSVHeader() { Offset: 0, EndOffset: 14, PrevRowIDMax: 0, - RowIDMax: 4, // 14 bytes and 3 byte for each row + RowIDMax: 4, // 37 bytes with 3 columns can store at most 7 rows. }, Timestamp: 1234567897, }, @@ -484,7 +483,7 @@ func (s *tableRestoreSuite) TestPopulateChunksCSVHeader() { Offset: 0, EndOffset: 10, PrevRowIDMax: 4, - RowIDMax: 9, // 10 bytes and 2 byte for each row + RowIDMax: 7, }, Timestamp: 1234567897, }, @@ -495,8 +494,8 @@ func (s *tableRestoreSuite) TestPopulateChunksCSVHeader() { Chunk: mydump.Chunk{ Offset: 6, EndOffset: 52, - PrevRowIDMax: 9, - RowIDMax: 13, + PrevRowIDMax: 7, + RowIDMax: 20, Columns: []string{"a", "b", "c"}, }, @@ -509,8 +508,8 @@ func (s *tableRestoreSuite) TestPopulateChunksCSVHeader() { Chunk: mydump.Chunk{ Offset: 52, EndOffset: 60, - PrevRowIDMax: 13, - RowIDMax: 14, + PrevRowIDMax: 20, + RowIDMax: 22, Columns: []string{"a", "b", "c"}, }, Timestamp: 1234567897, @@ -522,8 +521,8 @@ func (s *tableRestoreSuite) TestPopulateChunksCSVHeader() { Chunk: mydump.Chunk{ Offset: 6, EndOffset: 48, - PrevRowIDMax: 14, - RowIDMax: 17, + PrevRowIDMax: 22, + RowIDMax: 35, Columns: []string{"c", "a", "b"}, }, Timestamp: 1234567897, @@ -540,8 +539,8 @@ func (s *tableRestoreSuite) TestPopulateChunksCSVHeader() { Chunk: mydump.Chunk{ Offset: 48, EndOffset: 101, - PrevRowIDMax: 17, - RowIDMax: 20, + PrevRowIDMax: 35, + RowIDMax: 48, Columns: []string{"c", "a", "b"}, }, Timestamp: 1234567897, @@ -553,8 +552,8 @@ func (s *tableRestoreSuite) TestPopulateChunksCSVHeader() { Chunk: mydump.Chunk{ Offset: 101, EndOffset: 102, - PrevRowIDMax: 20, - RowIDMax: 21, + PrevRowIDMax: 48, + RowIDMax: 48, Columns: []string{"c", "a", "b"}, }, Timestamp: 1234567897, @@ -566,8 +565,8 @@ func (s *tableRestoreSuite) TestPopulateChunksCSVHeader() { Chunk: mydump.Chunk{ Offset: 4, EndOffset: 59, - PrevRowIDMax: 21, - RowIDMax: 23, + PrevRowIDMax: 48, + RowIDMax: 61, Columns: []string{"b", "c"}, }, Timestamp: 1234567897, @@ -584,8 +583,8 @@ func (s *tableRestoreSuite) TestPopulateChunksCSVHeader() { Chunk: mydump.Chunk{ Offset: 59, EndOffset: 60, - PrevRowIDMax: 23, - RowIDMax: 24, + PrevRowIDMax: 61, + RowIDMax: 61, Columns: []string{"b", "c"}, }, Timestamp: 1234567897, diff --git a/br/pkg/pdutil/pd_serial_test.go b/br/pkg/pdutil/pd_serial_test.go index 608830fe190fe..b3cd714bb53b9 100644 --- a/br/pkg/pdutil/pd_serial_test.go +++ b/br/pkg/pdutil/pd_serial_test.go @@ -13,6 +13,7 @@ import ( "net/http/httptest" "net/url" "strings" + "sync" "testing" "time" @@ -239,11 +240,21 @@ func TestPauseSchedulersByKeyRange(t *testing.T) { labelExpires := make(map[string]time.Time) + var ( + mu sync.Mutex + deleted bool + ) + httpSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + defer mu.Unlock() + if deleted { + return + } if r.Method == http.MethodDelete { ruleID := strings.TrimPrefix(r.URL.Path, "/"+regionLabelPrefix+"/") - print(ruleID) delete(labelExpires, ruleID) + deleted = true return } var labelRule LabelRule diff --git a/br/pkg/restore/BUILD.bazel b/br/pkg/restore/BUILD.bazel index 94dd07ccb1384..e18abc5e82b59 100644 --- a/br/pkg/restore/BUILD.bazel +++ b/br/pkg/restore/BUILD.bazel @@ -73,6 +73,7 @@ go_library( "@org_golang_google_grpc//credentials", "@org_golang_google_grpc//keepalive", "@org_golang_google_grpc//status", + "@org_golang_x_exp//slices", "@org_golang_x_sync//errgroup", "@org_uber_go_multierr//:multierr", "@org_uber_go_zap//:zap", diff --git a/br/pkg/restore/client.go b/br/pkg/restore/client.go index f2cd2c49b9983..4ab1b04e49b38 100644 --- a/br/pkg/restore/client.go +++ b/br/pkg/restore/client.go @@ -11,7 +11,6 @@ import ( "encoding/json" "fmt" "math" - "sort" "strconv" "strings" "sync" @@ -52,6 +51,7 @@ import ( pd "github.com/tikv/pd/client" "go.uber.org/zap" "go.uber.org/zap/zapcore" + "golang.org/x/exp/slices" "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/grpc/backoff" @@ -315,7 +315,7 @@ func (rc *Client) SetStorage(ctx context.Context, backend *backuppb.StorageBacke func (rc *Client) InitClients(backend *backuppb.StorageBackend, isRawKvMode bool) { metaClient := NewSplitClient(rc.pdClient, rc.tlsConf, isRawKvMode) importCli := NewImportClient(metaClient, rc.tlsConf, rc.keepaliveConf) - rc.fileImporter = NewFileImporter(metaClient, importCli, backend, isRawKvMode, rc.rateLimit) + rc.fileImporter = NewFileImporter(metaClient, importCli, backend, isRawKvMode) } func (rc *Client) SetRawKVClient(c *RawKVBatchClient) { @@ -582,8 +582,8 @@ func (rc *Client) CreateTables( newTables = append(newTables, et.Table) } // Let's ensure that it won't break the original order. - sort.Slice(newTables, func(i, j int) bool { - return tbMapping[newTables[i].Name.String()] < tbMapping[newTables[j].Name.String()] + slices.SortFunc(newTables, func(i, j *model.TableInfo) bool { + return tbMapping[i.Name.String()] < tbMapping[j.Name.String()] }) select { @@ -826,8 +826,8 @@ func (rc *Client) createTablesInWorkerPool(ctx context.Context, dom *domain.Doma // ExecDDLs executes the queries of the ddl jobs. func (rc *Client) ExecDDLs(ctx context.Context, ddlJobs []*model.Job) error { // Sort the ddl jobs by schema version in ascending order. - sort.Slice(ddlJobs, func(i, j int) bool { - return ddlJobs[i].BinlogInfo.SchemaVersion < ddlJobs[j].BinlogInfo.SchemaVersion + slices.SortFunc(ddlJobs, func(i, j *model.Job) bool { + return i.BinlogInfo.SchemaVersion < j.BinlogInfo.SchemaVersion }) for _, job := range ddlJobs { @@ -843,17 +843,50 @@ func (rc *Client) ExecDDLs(ctx context.Context, ddlJobs []*model.Job) error { return nil } -func (rc *Client) setSpeedLimit(ctx context.Context) error { - if !rc.hasSpeedLimited && rc.rateLimit != 0 { +// Mock the call of setSpeedLimit function +func MockCallSetSpeedLimit(ctx context.Context, fakeImportClient ImporterClient, rc *Client, concurrency uint) error { + rc.SetRateLimit(42) + rc.SetConcurrency(concurrency) + rc.hasSpeedLimited = false + rc.fileImporter = NewFileImporter(nil, fakeImportClient, nil, false) + return rc.setSpeedLimit(ctx, rc.rateLimit) +} + +func (rc *Client) ResetSpeedLimit(ctx context.Context) error { + rc.hasSpeedLimited = false + err := rc.setSpeedLimit(ctx, 0) + if err != nil { + return errors.Trace(err) + } + return nil +} + +func (rc *Client) setSpeedLimit(ctx context.Context, rateLimit uint64) error { + if !rc.hasSpeedLimited { stores, err := conn.GetAllTiKVStores(ctx, rc.pdClient, conn.SkipTiFlash) if err != nil { return errors.Trace(err) } + + eg, ectx := errgroup.WithContext(ctx) for _, store := range stores { - err = rc.fileImporter.setDownloadSpeedLimit(ctx, store.GetId()) - if err != nil { + if err := ectx.Err(); err != nil { return errors.Trace(err) } + + finalStore := store + rc.workerPool.ApplyOnErrorGroup(eg, + func() error { + err = rc.fileImporter.setDownloadSpeedLimit(ectx, finalStore.GetId(), rateLimit) + if err != nil { + return errors.Trace(err) + } + return nil + }) + } + + if err := eg.Wait(); err != nil { + return errors.Trace(err) } rc.hasSpeedLimited = true } @@ -926,7 +959,7 @@ func (rc *Client) RestoreSSTFiles( } eg, ectx := errgroup.WithContext(ctx) - err = rc.setSpeedLimit(ctx) + err = rc.setSpeedLimit(ctx, rc.rateLimit) if err != nil { return errors.Trace(err) } @@ -1050,38 +1083,52 @@ func (rc *Client) switchTiKVMode(ctx context.Context, mode import_sstpb.SwitchMo } bfConf := backoff.DefaultConfig bfConf.MaxDelay = time.Second * 3 + + eg, ectx := errgroup.WithContext(ctx) for _, store := range stores { - opt := grpc.WithInsecure() - if rc.tlsConf != nil { - opt = grpc.WithTransportCredentials(credentials.NewTLS(rc.tlsConf)) - } - gctx, cancel := context.WithTimeout(ctx, time.Second*5) - connection, err := grpc.DialContext( - gctx, - store.GetAddress(), - opt, - grpc.WithBlock(), - grpc.FailOnNonTempDialError(true), - grpc.WithConnectParams(grpc.ConnectParams{Backoff: bfConf}), - // we don't need to set keepalive timeout here, because the connection lives - // at most 5s. (shorter than minimal value for keepalive time!) - ) - cancel() - if err != nil { + if err := ectx.Err(); err != nil { return errors.Trace(err) } - client := import_sstpb.NewImportSSTClient(connection) - _, err = client.SwitchMode(ctx, &import_sstpb.SwitchModeRequest{ - Mode: mode, - }) - if err != nil { - return errors.Trace(err) - } - err = connection.Close() - if err != nil { - log.Error("close grpc connection failed in switch mode", zap.Error(err)) - continue - } + + finalStore := store + rc.workerPool.ApplyOnErrorGroup(eg, + func() error { + opt := grpc.WithInsecure() + if rc.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(rc.tlsConf)) + } + gctx, cancel := context.WithTimeout(ectx, time.Second*5) + connection, err := grpc.DialContext( + gctx, + finalStore.GetAddress(), + opt, + grpc.WithBlock(), + grpc.FailOnNonTempDialError(true), + grpc.WithConnectParams(grpc.ConnectParams{Backoff: bfConf}), + // we don't need to set keepalive timeout here, because the connection lives + // at most 5s. (shorter than minimal value for keepalive time!) + ) + cancel() + if err != nil { + return errors.Trace(err) + } + client := import_sstpb.NewImportSSTClient(connection) + _, err = client.SwitchMode(ctx, &import_sstpb.SwitchModeRequest{ + Mode: mode, + }) + if err != nil { + return errors.Trace(err) + } + err = connection.Close() + if err != nil { + log.Error("close grpc connection failed in switch mode", zap.Error(err)) + } + return nil + }) + } + + if err = eg.Wait(); err != nil { + return errors.Trace(err) } return nil } @@ -1923,6 +1970,7 @@ func (rc *Client) GenGlobalID(ctx context.Context) (int64, error) { var id int64 storage := rc.GetDomain().Store() + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) err := kv.RunInNewTxn( ctx, storage, @@ -1942,6 +1990,7 @@ func (rc *Client) GenGlobalIDs(ctx context.Context, n int) ([]int64, error) { ids := make([]int64, 0) storage := rc.GetDomain().Store() + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) err := kv.RunInNewTxn( ctx, storage, @@ -1961,6 +2010,7 @@ func (rc *Client) UpdateSchemaVersion(ctx context.Context) error { storage := rc.GetDomain().Store() var schemaVersion int64 + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) if err := kv.RunInNewTxn( ctx, storage, diff --git a/br/pkg/restore/client_test.go b/br/pkg/restore/client_test.go index 334ac67387f5d..08e57e83a7095 100644 --- a/br/pkg/restore/client_test.go +++ b/br/pkg/restore/client_test.go @@ -4,11 +4,15 @@ package restore_test import ( "context" + "fmt" "math" + "sort" "strconv" + "sync" "testing" "time" + "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/br/pkg/gluetidb" "github.com/pingcap/tidb/br/pkg/metautil" @@ -240,3 +244,110 @@ func TestPreCheckTableTiFlashReplicas(t *testing.T) { require.Nil(t, tables[i].Info.TiFlashReplica) } } + +// Mock ImporterClient interface +type FakeImporterClient struct { + restore.ImporterClient +} + +// Record the stores that have communicated +type RecordStores struct { + mu sync.Mutex + stores []uint64 +} + +func NewRecordStores() RecordStores { + return RecordStores{stores: make([]uint64, 0)} +} + +func (r *RecordStores) put(id uint64) { + r.mu.Lock() + defer r.mu.Unlock() + r.stores = append(r.stores, id) +} + +func (r *RecordStores) sort() { + sort.Slice(r.stores, func(i, j int) bool { + return r.stores[i] < r.stores[j] + }) +} + +var recordStores RecordStores + +const ( + SET_SPEED_LIMIT_ERROR = 999999 + WORKING_TIME = 100 +) + +func (fakeImportCli FakeImporterClient) SetDownloadSpeedLimit( + ctx context.Context, + storeID uint64, + req *import_sstpb.SetDownloadSpeedLimitRequest, +) (*import_sstpb.SetDownloadSpeedLimitResponse, error) { + if storeID == SET_SPEED_LIMIT_ERROR { + return nil, fmt.Errorf("storeID:%v ERROR.", storeID) + } + + time.Sleep(WORKING_TIME * time.Millisecond) // simulate doing 100 ms work + recordStores.put(storeID) + return nil, nil +} + +func TestSetSpeedLimit(t *testing.T) { + mockStores := []*metapb.Store{ + {Id: 1}, + {Id: 2}, + {Id: 3}, + {Id: 4}, + {Id: 5}, + {Id: 6}, + {Id: 7}, + {Id: 8}, + {Id: 9}, + {Id: 10}, + } + + // 1. The cost of concurrent communication is expected to be less than the cost of serial communication. + client := restore.NewRestoreClient(fakePDClient{ + stores: mockStores, + }, nil, defaultKeepaliveCfg, false) + ctx := context.Background() + + recordStores = NewRecordStores() + start := time.Now() + err := restore.MockCallSetSpeedLimit(ctx, FakeImporterClient{}, client, 10) + cost := time.Since(start) + require.NoError(t, err) + + recordStores.sort() + t.Logf("Total Cost: %v\n", cost) + t.Logf("Has Communicated: %v\n", recordStores.stores) + + serialCost := len(mockStores) * WORKING_TIME + require.Less(t, cost, time.Duration(serialCost)*time.Millisecond) + require.Equal(t, len(mockStores), len(recordStores.stores)) + for i := 0; i < len(recordStores.stores); i++ { + require.Equal(t, mockStores[i].Id, recordStores.stores[i]) + } + + // 2. Expect the number of communicated stores to be less than the length of the mockStore + // Because subsequent unstarted communications are aborted when an error is encountered. + recordStores = NewRecordStores() + mockStores[5].Id = SET_SPEED_LIMIT_ERROR // setting a fault store + client = restore.NewRestoreClient(fakePDClient{ + stores: mockStores, + }, nil, defaultKeepaliveCfg, false) + + // Concurrency needs to be less than the number of stores + err = restore.MockCallSetSpeedLimit(ctx, FakeImporterClient{}, client, 2) + require.Error(t, err) + t.Log(err) + + recordStores.sort() + sort.Slice(mockStores, func(i, j int) bool { return mockStores[i].Id < mockStores[j].Id }) + t.Logf("Has Communicated: %v\n", recordStores.stores) + require.Less(t, len(recordStores.stores), len(mockStores)) + for i := 0; i < len(recordStores.stores); i++ { + require.Equal(t, mockStores[i].Id, recordStores.stores[i]) + } +} diff --git a/br/pkg/restore/db.go b/br/pkg/restore/db.go index 377b2ed5dd78f..8b5c619bd1e55 100644 --- a/br/pkg/restore/db.go +++ b/br/pkg/restore/db.go @@ -5,7 +5,6 @@ package restore import ( "context" "fmt" - "sort" "sync" "github.com/pingcap/errors" @@ -18,6 +17,7 @@ import ( "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx/variable" "go.uber.org/zap" + "golang.org/x/exp/slices" ) // DB is a TiDB instance, not thread-safe. @@ -376,8 +376,8 @@ func (db *DB) ensureTablePlacementPolicies(ctx context.Context, tableInfo *model // FilterDDLJobs filters ddl jobs. func FilterDDLJobs(allDDLJobs []*model.Job, tables []*metautil.Table) (ddlJobs []*model.Job) { // Sort the ddl jobs by schema version in descending order. - sort.Slice(allDDLJobs, func(i, j int) bool { - return allDDLJobs[i].BinlogInfo.SchemaVersion > allDDLJobs[j].BinlogInfo.SchemaVersion + slices.SortFunc(allDDLJobs, func(i, j *model.Job) bool { + return i.BinlogInfo.SchemaVersion > j.BinlogInfo.SchemaVersion }) dbs := getDatabases(tables) for _, db := range dbs { diff --git a/br/pkg/restore/import.go b/br/pkg/restore/import.go index eb29b63b47f2f..63ac21f4b2e1c 100644 --- a/br/pkg/restore/import.go +++ b/br/pkg/restore/import.go @@ -238,7 +238,6 @@ type FileImporter struct { metaClient SplitClient importClient ImporterClient backend *backuppb.StorageBackend - rateLimit uint64 isRawKvMode bool rawStartKey []byte @@ -252,14 +251,12 @@ func NewFileImporter( importClient ImporterClient, backend *backuppb.StorageBackend, isRawKvMode bool, - rateLimit uint64, ) FileImporter { return FileImporter{ metaClient: metaClient, backend: backend, importClient: importClient, isRawKvMode: isRawKvMode, - rateLimit: rateLimit, } } @@ -499,9 +496,9 @@ func (importer *FileImporter) ImportSSTFiles( return errors.Trace(err) } -func (importer *FileImporter) setDownloadSpeedLimit(ctx context.Context, storeID uint64) error { +func (importer *FileImporter) setDownloadSpeedLimit(ctx context.Context, storeID, rateLimit uint64) error { req := &import_sstpb.SetDownloadSpeedLimitRequest{ - SpeedLimit: importer.rateLimit, + SpeedLimit: rateLimit, } _, err := importer.importClient.SetDownloadSpeedLimit(ctx, storeID, req) return errors.Trace(err) diff --git a/br/pkg/restore/rawkv_client.go b/br/pkg/restore/rawkv_client.go index 4541261a05be8..2495a8a2922ce 100644 --- a/br/pkg/restore/rawkv_client.go +++ b/br/pkg/restore/rawkv_client.go @@ -64,7 +64,7 @@ func NewRawKVBatchClient( // Close closes the RawKVBatchClient. func (c *RawKVBatchClient) Close() { - c.rawkvClient.Close() + _ = c.rawkvClient.Close() } // SetColumnFamily set the columnFamily for the client. diff --git a/br/pkg/restore/rawkv_client_test.go b/br/pkg/restore/rawkv_client_test.go index 1864aca1b0a77..1458ce04ff822 100644 --- a/br/pkg/restore/rawkv_client_test.go +++ b/br/pkg/restore/rawkv_client_test.go @@ -12,10 +12,9 @@ import ( berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/restore" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/util/codec" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/rawkv" - - "github.com/pingcap/tidb/util/codec" ) // fakeRawkvClient is a mock for rawkv.client diff --git a/br/pkg/storage/azblob.go b/br/pkg/storage/azblob.go index 08f25cbe06bf1..24c3b06930025 100644 --- a/br/pkg/storage/azblob.go +++ b/br/pkg/storage/azblob.go @@ -12,15 +12,14 @@ import ( "path" "strings" - "github.com/google/uuid" - "github.com/spf13/pflag" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/google/uuid" "github.com/pingcap/errors" backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/log" berrors "github.com/pingcap/tidb/br/pkg/errors" + "github.com/spf13/pflag" "go.uber.org/zap" ) diff --git a/br/pkg/storage/compress.go b/br/pkg/storage/compress.go index 34e4796b34810..fc0976420996b 100644 --- a/br/pkg/storage/compress.go +++ b/br/pkg/storage/compress.go @@ -8,7 +8,6 @@ import ( "io" "github.com/pingcap/errors" - berrors "github.com/pingcap/tidb/br/pkg/errors" ) diff --git a/br/pkg/storage/local.go b/br/pkg/storage/local.go index c7c16bbc7259e..0004648899aa8 100644 --- a/br/pkg/storage/local.go +++ b/br/pkg/storage/local.go @@ -108,6 +108,7 @@ func (l *LocalStorage) URI() string { // Open a Reader by file path, path is a relative path to base path. func (l *LocalStorage) Open(ctx context.Context, path string) (ExternalFileReader, error) { + //nolint: gosec return os.Open(filepath.Join(l.base, path)) } diff --git a/br/pkg/storage/s3.go b/br/pkg/storage/s3.go index b75da76505123..3bdcf7cab4d2b 100644 --- a/br/pkg/storage/s3.go +++ b/br/pkg/storage/s3.go @@ -27,11 +27,10 @@ import ( "github.com/pingcap/errors" backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/log" - "github.com/spf13/pflag" - "go.uber.org/zap" - berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" + "github.com/spf13/pflag" + "go.uber.org/zap" ) const ( diff --git a/br/pkg/storage/writer.go b/br/pkg/storage/writer.go index e029b553952c2..55fb4e8ac319b 100644 --- a/br/pkg/storage/writer.go +++ b/br/pkg/storage/writer.go @@ -159,7 +159,7 @@ func (u *bufferedWriter) Write(ctx context.Context, p []byte) (int, error) { continue } } - u.buf.Flush() + _ = u.buf.Flush() err := u.uploadChunk(ctx) if err != nil { return 0, errors.Trace(err) diff --git a/br/pkg/stream/client.go b/br/pkg/stream/client.go index 5eadee73af36d..3ae88605ca7f1 100644 --- a/br/pkg/stream/client.go +++ b/br/pkg/stream/client.go @@ -10,14 +10,13 @@ import ( "github.com/gogo/protobuf/proto" "github.com/pingcap/errors" - "github.com/pingcap/log" - "go.uber.org/zap" - backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/log" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/redact" "github.com/pingcap/tidb/kv" clientv3 "go.etcd.io/etcd/client/v3" + "go.uber.org/zap" ) // MetaDataClient is the client for operations over metadata. diff --git a/br/pkg/stream/integration_test.go b/br/pkg/stream/integration_test.go index cbb253b1fcc60..92a465172afec 100644 --- a/br/pkg/stream/integration_test.go +++ b/br/pkg/stream/integration_test.go @@ -10,10 +10,9 @@ import ( "net/url" "testing" - berrors "github.com/pingcap/tidb/br/pkg/errors" - "github.com/pingcap/errors" "github.com/pingcap/log" + berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/stream" diff --git a/br/pkg/task/BUILD.bazel b/br/pkg/task/BUILD.bazel index 334babfd6d85c..9cdc23114f2be 100644 --- a/br/pkg/task/BUILD.bazel +++ b/br/pkg/task/BUILD.bazel @@ -57,6 +57,7 @@ go_library( "@io_etcd_go_etcd_client_v3//:client", "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//keepalive", + "@org_golang_x_exp//slices", "@org_uber_go_multierr//:multierr", "@org_uber_go_zap//:zap", "@org_uber_go_zap//zapcore", diff --git a/br/pkg/task/backup.go b/br/pkg/task/backup.go index de94b2d385ad5..311f57e9de020 100644 --- a/br/pkg/task/backup.go +++ b/br/pkg/task/backup.go @@ -137,7 +137,7 @@ func (cfg *BackupConfig) ParseFromFlags(flags *pflag.FlagSet) error { if err != nil { return errors.Trace(err) } - cfg.BackupTS, err = ParseTSString(backupTS) + cfg.BackupTS, err = ParseTSString(backupTS, false) if err != nil { return errors.Trace(err) } @@ -528,7 +528,7 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig } // ParseTSString port from tidb setSnapshotTS. -func ParseTSString(ts string) (uint64, error) { +func ParseTSString(ts string, tzCheck bool) (uint64, error) { if len(ts) == 0 { return 0, nil } @@ -540,6 +540,12 @@ func ParseTSString(ts string) (uint64, error) { sc := &stmtctx.StatementContext{ TimeZone: loc, } + if tzCheck { + tzIdx, _, _, _, _ := types.GetTimezone(ts) + if tzIdx < 0 { + return 0, errors.Errorf("must set timezone when using datetime format ts") + } + } t, err := types.ParseTime(sc, ts, mysql.TypeTimestamp, types.MaxFsp) if err != nil { return 0, errors.Trace(err) diff --git a/br/pkg/task/backup_test.go b/br/pkg/task/backup_test.go index ef3f33d27ee54..25d6de8ee66fb 100644 --- a/br/pkg/task/backup_test.go +++ b/br/pkg/task/backup_test.go @@ -16,21 +16,40 @@ func TestParseTSString(t *testing.T) { err error ) - ts, err = ParseTSString("") + ts, err = ParseTSString("", false) require.NoError(t, err) require.Zero(t, ts) - ts, err = ParseTSString("400036290571534337") + ts, err = ParseTSString("400036290571534337", false) require.NoError(t, err) require.Equal(t, uint64(400036290571534337), ts) - ts, err = ParseTSString("2021-01-01 01:42:23") + ts, err = ParseTSString("2021-01-01 01:42:23", false) require.NoError(t, err) localTime := time.Date(2021, time.Month(1), 1, 1, 42, 23, 0, time.Local) - localTimestamp := localTime.Unix() localTSO := uint64((localTimestamp << 18) * 1000) require.Equal(t, localTSO, ts) + + _, err = ParseTSString("2021-01-01 01:42:23", true) + require.Error(t, err) + require.Regexp(t, "must set timezone*", err.Error()) + + ts, err = ParseTSString("2021-01-01 01:42:23+00:00", true) + require.NoError(t, err) + localTime = time.Date(2021, time.Month(1), 1, 1, 42, 23, 0, time.UTC) + localTimestamp = localTime.Unix() + localTSO = uint64((localTimestamp << 18) * 1000) + require.Equal(t, localTSO, ts) + + ts, err = ParseTSString("2021-01-01 01:42:23+08:00", true) + require.NoError(t, err) + secondsEastOfUTC := int((8 * time.Hour).Seconds()) + beijing := time.FixedZone("Beijing Time", secondsEastOfUTC) + localTime = time.Date(2021, time.Month(1), 1, 1, 42, 23, 0, beijing) + localTimestamp = localTime.Unix() + localTSO = uint64((localTimestamp << 18) * 1000) + require.Equal(t, localTSO, ts) } func TestParseCompressionType(t *testing.T) { diff --git a/br/pkg/task/restore.go b/br/pkg/task/restore.go index 1e6cbd2432065..3c7095bc1df4e 100644 --- a/br/pkg/task/restore.go +++ b/br/pkg/task/restore.go @@ -16,6 +16,7 @@ import ( berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/httputil" + "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/metautil" "github.com/pingcap/tidb/br/pkg/pdutil" "github.com/pingcap/tidb/br/pkg/restore" @@ -61,6 +62,7 @@ const ( defaultPDConcurrency = 1 defaultBatchFlushInterval = 16 * time.Second defaultFlagDdlBatchSize = 128 + resetSpeedLimitRetryTimes = 3 ) const ( @@ -182,14 +184,14 @@ func (cfg *RestoreConfig) ParseStreamRestoreFlags(flags *pflag.FlagSet) error { if err != nil { return errors.Trace(err) } - if cfg.StartTS, err = ParseTSString(tsString); err != nil { + if cfg.StartTS, err = ParseTSString(tsString, true); err != nil { return errors.Trace(err) } tsString, err = flags.GetString(FlagStreamRestoreTS) if err != nil { return errors.Trace(err) } - if cfg.RestoreTS, err = ParseTSString(tsString); err != nil { + if cfg.RestoreTS, err = ParseTSString(tsString, true); err != nil { return errors.Trace(err) } @@ -614,6 +616,25 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf finish = dropToBlackhole(ctx, afterRestoreStream, errCh, updateCh) } + // Reset speed limit. ResetSpeedLimit must be called after client.InitBackupMeta has been called. + defer func() { + var resetErr error + // In future we may need a mechanism to set speed limit in ttl. like what we do in switchmode. TODO + for retry := 0; retry < resetSpeedLimitRetryTimes; retry++ { + resetErr = client.ResetSpeedLimit(ctx) + if resetErr != nil { + log.Warn("failed to reset speed limit, retry it", + zap.Int("retry time", retry), logutil.ShortError(resetErr)) + time.Sleep(time.Duration(retry+3) * time.Second) + continue + } + break + } + if resetErr != nil { + log.Error("failed to reset speed limit", zap.Error(resetErr)) + } + }() + select { case err = <-errCh: err = multierr.Append(err, multierr.Combine(restore.Exhaust(errCh)...)) diff --git a/br/pkg/task/stream.go b/br/pkg/task/stream.go index 7d8b717f07e0d..3683854dc2169 100644 --- a/br/pkg/task/stream.go +++ b/br/pkg/task/stream.go @@ -20,7 +20,6 @@ import ( "encoding/json" "fmt" "net/http" - "sort" "strings" "time" @@ -48,6 +47,7 @@ import ( "github.com/tikv/client-go/v2/oracle" "go.uber.org/zap" "go.uber.org/zap/zapcore" + "golang.org/x/exp/slices" ) const ( @@ -189,7 +189,7 @@ func (cfg *StreamConfig) ParseStreamTruncateFromFlags(flags *pflag.FlagSet) erro if err != nil { return errors.Trace(err) } - if cfg.Until, err = ParseTSString(tsString); err != nil { + if cfg.Until, err = ParseTSString(tsString, true); err != nil { return errors.Trace(err) } if cfg.SkipPrompt, err = flags.GetBool(flagYes); err != nil { @@ -213,7 +213,7 @@ func (cfg *StreamConfig) ParseStreamStartFromFlags(flags *pflag.FlagSet) error { return errors.Trace(err) } - if cfg.StartTS, err = ParseTSString(tsString); err != nil { + if cfg.StartTS, err = ParseTSString(tsString, true); err != nil { return errors.Trace(err) } @@ -222,7 +222,7 @@ func (cfg *StreamConfig) ParseStreamStartFromFlags(flags *pflag.FlagSet) error { return errors.Trace(err) } - if cfg.EndTS, err = ParseTSString(tsString); err != nil { + if cfg.EndTS, err = ParseTSString(tsString, true); err != nil { return errors.Trace(err) } @@ -386,8 +386,8 @@ func (s *streamMgr) buildObserveRanges(ctx context.Context) ([]kv.KeyRange, erro mRange := stream.BuildObserveMetaRange() rs := append([]kv.KeyRange{*mRange}, dRanges...) - sort.Slice(rs, func(i, j int) bool { - return bytes.Compare(rs[i].StartKey, rs[j].StartKey) < 0 + slices.SortFunc(rs, func(i, j kv.KeyRange) bool { + return bytes.Compare(i.StartKey, j.StartKey) < 0 }) return rs, nil diff --git a/br/pkg/task/stream_test.go b/br/pkg/task/stream_test.go index 3b13f8657c0e2..3313351132585 100644 --- a/br/pkg/task/stream_test.go +++ b/br/pkg/task/stream_test.go @@ -25,7 +25,6 @@ import ( "github.com/pingcap/errors" backuppb "github.com/pingcap/kvproto/pkg/brpb" - "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/br/pkg/conn" "github.com/pingcap/tidb/br/pkg/pdutil" diff --git a/br/pkg/trace/BUILD.bazel b/br/pkg/trace/BUILD.bazel index f02690cdb63ab..f73c227abc668 100644 --- a/br/pkg/trace/BUILD.bazel +++ b/br/pkg/trace/BUILD.bazel @@ -11,6 +11,7 @@ go_library( "@com_github_pingcap_log//:log", "@com_sourcegraph_sourcegraph_appdash//:appdash", "@com_sourcegraph_sourcegraph_appdash//opentracing", + "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", ], ) diff --git a/br/pkg/trace/tracing.go b/br/pkg/trace/tracing.go index a7b11db06a867..ff961367ff282 100644 --- a/br/pkg/trace/tracing.go +++ b/br/pkg/trace/tracing.go @@ -7,7 +7,6 @@ import ( "fmt" "os" "path/filepath" - "sort" "text/tabwriter" "time" @@ -15,6 +14,7 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pingcap/log" "go.uber.org/zap" + "golang.org/x/exp/slices" "sourcegraph.com/sourcegraph/appdash" traceImpl "sourcegraph.com/sourcegraph/appdash/opentracing" ) @@ -88,12 +88,12 @@ func dfsTree(t *appdash.Trace, prefix string, isLast bool, tub *tabby.Tabby) { tub.AddLine(prefix+suffix+t.Span.Name(), start.Format("15:04:05.000000"), duration.String()) // Sort events by their start time - sort.Slice(t.Sub, func(i, j int) bool { + slices.SortFunc(t.Sub, func(i, j *appdash.Trace) bool { var istart, jstart time.Time - if ievent, err := t.Sub[i].TimespanEvent(); err == nil { + if ievent, err := i.TimespanEvent(); err == nil { istart = ievent.Start() } - if jevent, err := t.Sub[j].TimespanEvent(); err == nil { + if jevent, err := j.TimespanEvent(); err == nil { jstart = jevent.Start() } return istart.Before(jstart) diff --git a/br/tests/lightning_auto_random_default/run.sh b/br/tests/lightning_auto_random_default/run.sh index c54ca6ac7ee0e..41b9798de4560 100644 --- a/br/tests/lightning_auto_random_default/run.sh +++ b/br/tests/lightning_auto_random_default/run.sh @@ -40,10 +40,10 @@ for backend in tidb local; do check_contains 'inc: 6' NEXT_AUTO_RAND_VAL=7 else - check_contains 'inc: 6' - check_contains 'inc: 7' - check_contains 'inc: 8' - NEXT_AUTO_RAND_VAL=9 + check_contains 'inc: 25' + check_contains 'inc: 26' + check_contains 'inc: 27' + NEXT_AUTO_RAND_VAL=28 fi # tidb backend randomly generate the auto-random bit for each statement, so with 2 statements, diff --git a/br/tests/lightning_realloc_id/config.toml b/br/tests/lightning_realloc_id/config.toml deleted file mode 100644 index f32593b43b798..0000000000000 --- a/br/tests/lightning_realloc_id/config.toml +++ /dev/null @@ -1,3 +0,0 @@ -[tikv-importer] -incremental-import = true -backend = 'local' diff --git a/br/tests/lightning_realloc_id/config1.toml b/br/tests/lightning_realloc_id/config1.toml deleted file mode 100644 index d2152b47c922a..0000000000000 --- a/br/tests/lightning_realloc_id/config1.toml +++ /dev/null @@ -1,2 +0,0 @@ -[tikv-importer] -backend = 'local' diff --git a/br/tests/lightning_realloc_id/config2.toml b/br/tests/lightning_realloc_id/config2.toml deleted file mode 100644 index f32593b43b798..0000000000000 --- a/br/tests/lightning_realloc_id/config2.toml +++ /dev/null @@ -1,3 +0,0 @@ -[tikv-importer] -incremental-import = true -backend = 'local' diff --git a/br/tests/lightning_realloc_id/data/db-schema-create.sql b/br/tests/lightning_realloc_id/data/db-schema-create.sql deleted file mode 100644 index c88b0e3150e76..0000000000000 --- a/br/tests/lightning_realloc_id/data/db-schema-create.sql +++ /dev/null @@ -1 +0,0 @@ -create database db; \ No newline at end of file diff --git a/br/tests/lightning_realloc_id/data/db.test-schema.sql b/br/tests/lightning_realloc_id/data/db.test-schema.sql deleted file mode 100644 index 0490cd81e1c2e..0000000000000 --- a/br/tests/lightning_realloc_id/data/db.test-schema.sql +++ /dev/null @@ -1,4 +0,0 @@ -create table db.test( - id int auto_increment unique key, - a int primary key -); \ No newline at end of file diff --git a/br/tests/lightning_realloc_id/data/db.test.000000000.csv b/br/tests/lightning_realloc_id/data/db.test.000000000.csv deleted file mode 100644 index f2ce71fb561c5..0000000000000 --- a/br/tests/lightning_realloc_id/data/db.test.000000000.csv +++ /dev/null @@ -1,11 +0,0 @@ -a -100 -101 -102 -103 -104 -105 -106 -107 -108 -109 \ No newline at end of file diff --git a/br/tests/lightning_realloc_id/data/db.test.000000001.sql b/br/tests/lightning_realloc_id/data/db.test.000000001.sql deleted file mode 100644 index 611b5f5dbeba6..0000000000000 --- a/br/tests/lightning_realloc_id/data/db.test.000000001.sql +++ /dev/null @@ -1,11 +0,0 @@ -insert into db.test(a) values -(200), -(201), -(202), -(203), -(204), -(205), -(206), -(207), -(208), -(209); \ No newline at end of file diff --git a/br/tests/lightning_realloc_id/data1/db-schema-create.sql b/br/tests/lightning_realloc_id/data1/db-schema-create.sql deleted file mode 100644 index c88b0e3150e76..0000000000000 --- a/br/tests/lightning_realloc_id/data1/db-schema-create.sql +++ /dev/null @@ -1 +0,0 @@ -create database db; \ No newline at end of file diff --git a/br/tests/lightning_realloc_id/data1/db.test-schema.sql b/br/tests/lightning_realloc_id/data1/db.test-schema.sql deleted file mode 100644 index 0490cd81e1c2e..0000000000000 --- a/br/tests/lightning_realloc_id/data1/db.test-schema.sql +++ /dev/null @@ -1,4 +0,0 @@ -create table db.test( - id int auto_increment unique key, - a int primary key -); \ No newline at end of file diff --git a/br/tests/lightning_realloc_id/data1/db.test.000000000.csv b/br/tests/lightning_realloc_id/data1/db.test.000000000.csv deleted file mode 100644 index 70ae8fd5a20a7..0000000000000 --- a/br/tests/lightning_realloc_id/data1/db.test.000000000.csv +++ /dev/null @@ -1,11 +0,0 @@ -a -300 -301 -302 -303 -304 -305 -306 -307 -308 -309 \ No newline at end of file diff --git a/br/tests/lightning_realloc_id/data1/db.test.000000001.sql b/br/tests/lightning_realloc_id/data1/db.test.000000001.sql deleted file mode 100644 index 461cf4c3fccaf..0000000000000 --- a/br/tests/lightning_realloc_id/data1/db.test.000000001.sql +++ /dev/null @@ -1,11 +0,0 @@ -insert into db.test(a) values -(400), -(401), -(402), -(403), -(404), -(405), -(406), -(407), -(408), -(409); \ No newline at end of file diff --git a/br/tests/lightning_realloc_id/data2/db.test.000000000.csv b/br/tests/lightning_realloc_id/data2/db.test.000000000.csv deleted file mode 100644 index 12d1d9e0bc948..0000000000000 --- a/br/tests/lightning_realloc_id/data2/db.test.000000000.csv +++ /dev/null @@ -1,11 +0,0 @@ -a -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 \ No newline at end of file diff --git a/br/tests/lightning_realloc_id/run.sh b/br/tests/lightning_realloc_id/run.sh deleted file mode 100644 index eead3b2fc1f33..0000000000000 --- a/br/tests/lightning_realloc_id/run.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/sh -# -# Copyright 2022 PingCAP, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Basic check for whether partitioned tables work. - -set -eu -check_cluster_version 4 0 0 'local backend' -LOG_FILE1="$TEST_DIR/lightning-realloc-import1.log" -LOG_FILE2="$TEST_DIR/lightning-realloc-import2.log" -LOG_FILE3="$TEST_DIR/lightning-realloc-import3.log" - -function run_lightning_expecting_fail() { - set +e - run_lightning "$@" - ERRCODE=$? - set -e - [ "$ERRCODE" != 0 ] -} - -function check_result() { - run_sql 'SHOW DATABASES;' - check_contains 'Database: db'; - run_sql 'SHOW TABLES IN db;' - check_contains 'Tables_in_db: test' - run_sql 'SELECT count(*) FROM db.test;' - check_contains 'count(*): 20' - run_sql 'SELECT * FROM db.test;' - check_contains 'id: 15' - check_contains 'id: 20' -} - -function parallel_import() { - run_lightning -d "tests/$TEST_NAME/data" \ - --sorted-kv-dir "$TEST_DIR/lightning_realloc_import.sorted1" \ - --log-file "$LOG_FILE1" \ - --config "tests/$TEST_NAME/config.toml" & - pid1="$!" - run_lightning -d "tests/$TEST_NAME/data1" \ - --sorted-kv-dir "$TEST_DIR/lightning_realloc_import.sorted2" \ - --log-file "$LOG_FILE2" \ - --config "tests/$TEST_NAME/config.toml" & - pid2="$!" - wait "$pid1" "$pid2" -} - -function overflow_import() { - run_sql 'create database if not exists db' - run_sql 'create table db.test(id int auto_increment primary key, a int)' - run_sql 'alter table db.test auto_increment=2147483640' # too few available rowID - echo "lightning stdout:" > "$TEST_DIR/sql_res.$TEST_NAME.txt" - run_lightning_expecting_fail -d "tests/$TEST_NAME/data2" \ - --sorted-kv-dir "$TEST_DIR/lightning_realloc_import.sorted3" \ - --log-file "$LOG_FILE3" \ - --config "tests/$TEST_NAME/config2.toml" 2>&1 | tee -a "$TEST_DIR/sql_res.$TEST_NAME.txt" - if ! grep -q "out of range" "$TEST_DIR/sql_res.$TEST_NAME.txt"; then - echo "TEST FAILED: OUTPUT DOES NOT CONTAIN 'out of range'" - exit 1 - fi -} - -function check_parallel_result() { - run_sql 'SHOW DATABASES;' - check_contains 'Database: db'; - run_sql 'SHOW TABLES IN db;' - check_contains 'Tables_in_db: test' - run_sql 'SELECT count(*) FROM db.test;' - check_contains 'count(*): 40' -} - -run_sql 'DROP DATABASE IF EXISTS db;' -export GO_FAILPOINTS='github.com/pingcap/tidb/br/pkg/lightning/mydump/MockInaccurateRowID=return(true)' -run_lightning --config "tests/$TEST_NAME/config1.toml" -check_result -run_sql 'DROP DATABASE IF EXISTS db;' -parallel_import -check_parallel_result -run_sql 'DROP DATABASE IF EXISTS db;' -overflow_import -run_sql 'DROP DATABASE IF EXISTS db;' -unset GO_FAILPOINTS \ No newline at end of file diff --git a/build/BUILD.bazel b/build/BUILD.bazel index 6957be918b42b..396f4e943ed06 100644 --- a/build/BUILD.bazel +++ b/build/BUILD.bazel @@ -1,20 +1,59 @@ package(default_visibility = ["//visibility:public"]) load("@io_bazel_rules_go//go:def.bzl", "nogo") +load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") load("//build/linter/staticcheck:def.bzl", "staticcheck_analyzers") +bool_flag( + name = "with_nogo_flag", + build_setting_default = False, + visibility = ["//visibility:public"], +) + +config_setting( + name = "with_nogo", + flag_values = { + ":with_nogo_flag": "true", + }, + visibility = ["//visibility:public"], +) + STATICHECK_ANALYZERS = [ + "S1000", + "S1001", "S1002", + "S1003", "S1004", + "S1005", + "S1006", "S1007", + "S1008", "S1009", "S1010", + "S1011", "S1012", + "S1016", + "S1017", + "S1018", "S1019", "S1020", "S1021", + "S1023", "S1024", + "S1025", + "S1028", + "S1029", "S1030", + "S1031", + "S1032", + "S1033", + "S1034", + "S1035", + "S1036", + "S1037", + "S1038", + "S1039", + "S1040", "SA2000", "SA2001", "SA2003", @@ -43,48 +82,58 @@ nogo( config = ":nogo_config.json", visibility = ["//visibility:public"], # must have public visibility deps = [ - # https://github.com/golang/go/issues/48525 - # "@com_github_timakin_bodyclose//passes/bodyclose:go_default_library", - "@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library", - "@org_golang_x_tools//go/analysis/passes/assign:go_default_library", - "@org_golang_x_tools//go/analysis/passes/atomic:go_default_library", - "@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library", - "@org_golang_x_tools//go/analysis/passes/bools:go_default_library", - # https://github.com/golang/go/issues/48525 - # "@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library", - "@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library", - # https://github.com/bazelbuild/rules_go/issues/2396 - # "@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library", - "@org_golang_x_tools//go/analysis/passes/composite:go_default_library", - "@org_golang_x_tools//go/analysis/passes/copylock:go_default_library", - "@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library", - "@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library", - "@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library", - "@org_golang_x_tools//go/analysis/passes/findcall:go_default_library", - "@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library", - "@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library", - "@org_golang_x_tools//go/analysis/passes/inspect:go_default_library", - "@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library", - "@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library", - "@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library", - #"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library", - "@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library", - "@org_golang_x_tools//go/analysis/passes/printf:go_default_library", - "@org_golang_x_tools//go/analysis/passes/shift:go_default_library", - "@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library", - "@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library", - "@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library", - "@org_golang_x_tools//go/analysis/passes/structtag:go_default_library", - "@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library", - "@org_golang_x_tools//go/analysis/passes/tests:go_default_library", - "@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library", - "@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library", - "@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library", - "@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library", - "//build/linter/durationcheck:durationcheck", - "//build/linter/exportloopref:exportloopref", - "//build/linter/gofmt:gofmt", - "//build/linter/ineffassign:ineffassign", - "//build/linter/prealloc:prealloc", - ] + staticcheck_analyzers(STATICHECK_ANALYZERS), + # https://github.com/golang/go/issues/48525 + # "@com_github_timakin_bodyclose//passes/bodyclose:go_default_library", + "@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library", + "@org_golang_x_tools//go/analysis/passes/assign:go_default_library", + "@org_golang_x_tools//go/analysis/passes/atomic:go_default_library", + "@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library", + "@org_golang_x_tools//go/analysis/passes/bools:go_default_library", + # https://github.com/golang/go/issues/48525 + # "@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library", + "@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library", + # https://github.com/bazelbuild/rules_go/issues/2396 + # "@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library", + "@org_golang_x_tools//go/analysis/passes/composite:go_default_library", + "@org_golang_x_tools//go/analysis/passes/copylock:go_default_library", + "@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library", + "@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library", + "@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library", + "@org_golang_x_tools//go/analysis/passes/findcall:go_default_library", + "@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library", + "@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library", + "@org_golang_x_tools//go/analysis/passes/inspect:go_default_library", + "@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library", + "@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library", + "@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library", + #"@org_golang_x_tools//go/analysis/passes/nilness:go_default_library", + "@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library", + "@org_golang_x_tools//go/analysis/passes/printf:go_default_library", + "@org_golang_x_tools//go/analysis/passes/shift:go_default_library", + "@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library", + "@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library", + "@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library", + "@org_golang_x_tools//go/analysis/passes/structtag:go_default_library", + "@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library", + "@org_golang_x_tools//go/analysis/passes/tests:go_default_library", + "@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library", + "@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library", + "@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library", + "@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library", + "//build/linter/asciicheck:asciicheck", + "//build/linter/durationcheck:durationcheck", + "//build/linter/exportloopref:exportloopref", + "//build/linter/gofmt:gofmt", + "//build/linter/gci:gci", + "//build/linter/gosec:gosec", + "//build/linter/ineffassign:ineffassign", + "//build/linter/misspell:misspell", + "//build/linter/prealloc:prealloc", + "//build/linter/predeclared:predeclared", + "//build/linter/unconvert:unconvert", + ] + staticcheck_analyzers(STATICHECK_ANALYZERS) + + select({ + "//build:with_nogo": ["//build/linter/errcheck:errcheck"], + "//conditions:default": [], + }), ) diff --git a/build/linter/asciicheck/BUILD.bazel b/build/linter/asciicheck/BUILD.bazel new file mode 100644 index 0000000000000..f686bc348ca2c --- /dev/null +++ b/build/linter/asciicheck/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "asciicheck", + srcs = ["analysis.go"], + importpath = "github.com/pingcap/tidb/build/linter/asciicheck", + visibility = ["//visibility:public"], + deps = ["@com_github_tdakkota_asciicheck//:asciicheck"], +) diff --git a/build/linter/asciicheck/analysis.go b/build/linter/asciicheck/analysis.go new file mode 100644 index 0000000000000..c78c5db3fd066 --- /dev/null +++ b/build/linter/asciicheck/analysis.go @@ -0,0 +1,20 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package asciicheck + +import "github.com/tdakkota/asciicheck" + +// Analyzer is the analyzer struct of asciicheck. +var Analyzer = asciicheck.NewAnalyzer() diff --git a/build/linter/errcheck/BUILD.bazel b/build/linter/errcheck/BUILD.bazel new file mode 100644 index 0000000000000..98383d61766a5 --- /dev/null +++ b/build/linter/errcheck/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "errcheck", + srcs = ["analyzer.go"], + embedsrcs = ["errcheck_excludes.txt"], + importpath = "github.com/pingcap/tidb/build/linter/errcheck", + visibility = ["//visibility:public"], + deps = [ + "//build/linter/util", + "@com_github_kisielk_errcheck//errcheck", + ], +) diff --git a/build/linter/errcheck/analyzer.go b/build/linter/errcheck/analyzer.go new file mode 100644 index 0000000000000..ea5049689059e --- /dev/null +++ b/build/linter/errcheck/analyzer.go @@ -0,0 +1,38 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errcheck + +import ( + "embed" + "log" + + "github.com/kisielk/errcheck/errcheck" + "github.com/pingcap/tidb/build/linter/util" +) + +// Analyzer is the analyzer struct of errcheck. +var Analyzer = errcheck.Analyzer + +//go:embed errcheck_excludes.txt +var excludesContent embed.FS + +func init() { + data, _ := excludesContent.ReadFile("errcheck_excludes.txt") + err := Analyzer.Flags.Set("excludes", string(data)) + if err != nil { + log.Fatal(err) + } + util.SkipAnalyzer(Analyzer) +} diff --git a/build/linter/errcheck/errcheck_excludes.txt b/build/linter/errcheck/errcheck_excludes.txt new file mode 100644 index 0000000000000..c6492845207f9 --- /dev/null +++ b/build/linter/errcheck/errcheck_excludes.txt @@ -0,0 +1,20 @@ +fmt.Fprint +fmt.Fprintf +fmt.Fprintln +fmt.Sscanf +(*os.File).Close +(io.Closer).Close +(net.Conn).Close +(*cloud.google.com/go/storage.Reader).Close +(*strings.Builder).WriteByte +(*strings.Builder).WriteRune +(*strings.Builder).WriteString +(*strings.Builder).Write +(*bufio.Writer).Flush +(*google.golang.org/grpc.ClientConn).Close +(*github.com/pingcap/tidb/util/sqlexec.RecordSet).Close +(*github.com/pingcap/tidb/br/pkg/lightning/mydump.blockParser).Close +(*github.com/pingcap/tidb/kv.Storage).Close +(*database/sql.DB).Close +(*database/sql.Rows).Close +(*database/sql.Stmt).Close diff --git a/build/linter/gci/BUILD.bazel b/build/linter/gci/BUILD.bazel new file mode 100644 index 0000000000000..9318955d516bd --- /dev/null +++ b/build/linter/gci/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "gci", + srcs = ["analysis.go"], + importpath = "github.com/pingcap/tidb/build/linter/gci", + visibility = ["//visibility:public"], + deps = [ + "@com_github_daixiang0_gci//pkg/configuration", + "@com_github_daixiang0_gci//pkg/gci", + "@org_golang_x_tools//go/analysis", + ], +) diff --git a/build/linter/gci/analysis.go b/build/linter/gci/analysis.go new file mode 100644 index 0000000000000..6ac0854302160 --- /dev/null +++ b/build/linter/gci/analysis.go @@ -0,0 +1,65 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gci + +import ( + "fmt" + "sync" + + "github.com/daixiang0/gci/pkg/configuration" + "github.com/daixiang0/gci/pkg/gci" + "golang.org/x/tools/go/analysis" +) + +// Analyzer is the analyzer struct of gci. +var Analyzer = &analysis.Analyzer{ + Name: "gci", + Doc: "Gci controls golang package import order and makes it always deterministic.", + Run: run, +} + +func run(pass *analysis.Pass) (any, error) { + fileNames := make([]string, 0, len(pass.Files)) + for _, f := range pass.Files { + pos := pass.Fset.PositionFor(f.Pos(), false) + fileNames = append(fileNames, pos.Filename) + } + var rawCfg gci.GciStringConfiguration + rawCfg.Cfg = configuration.FormatterConfiguration{ + NoInlineComments: false, + NoPrefixComments: false, + Debug: false, + } + cfg, _ := rawCfg.Parse() + var diffs []string + var lock sync.Mutex + err := gci.DiffFormattedFilesToArray(fileNames, *cfg, &diffs, &lock) + if err != nil { + return nil, err + } + + for _, diff := range diffs { + if diff == "" { + continue + } + + pass.Report(analysis.Diagnostic{ + Pos: 1, + Message: fmt.Sprintf("\n%s", diff), + }) + } + + return nil, nil +} diff --git a/build/linter/gosec/BUILD.bazel b/build/linter/gosec/BUILD.bazel new file mode 100644 index 0000000000000..e0a64e71a0af6 --- /dev/null +++ b/build/linter/gosec/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "gosec", + srcs = ["analysis.go"], + importpath = "github.com/pingcap/tidb/build/linter/gosec", + visibility = ["//visibility:public"], + deps = [ + "//build/linter/util", + "@com_github_golangci_golangci_lint//pkg/result", + "@com_github_golangci_gosec//:gosec", + "@com_github_golangci_gosec//rules", + "@org_golang_x_tools//go/analysis", + "@org_golang_x_tools//go/loader", + ], +) diff --git a/build/linter/gosec/analysis.go b/build/linter/gosec/analysis.go new file mode 100644 index 0000000000000..2e93dbbdd3d59 --- /dev/null +++ b/build/linter/gosec/analysis.go @@ -0,0 +1,110 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gosec + +import ( + "fmt" + "go/token" + "go/types" + "io/ioutil" + "log" + "strconv" + + "github.com/golangci/golangci-lint/pkg/result" + "github.com/golangci/gosec" + "github.com/golangci/gosec/rules" + "github.com/pingcap/tidb/build/linter/util" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/loader" +) + +// Name is the name of the analyzer. +const Name = "gosec" + +// Analyzer is the analyzer struct of gosec. +var Analyzer = &analysis.Analyzer{ + Name: Name, + Doc: "Inspects source code for security problems", + Run: run, +} + +func init() { + util.SkipAnalyzer(Analyzer) +} + +func run(pass *analysis.Pass) (interface{}, error) { + gasConfig := gosec.NewConfig() + enabledRules := rules.Generate(func(id string) bool { + if id == "G104" || id == "G103" || id == "G101" || id == "G201" { + return true + } + return false + }) + logger := log.New(ioutil.Discard, "", 0) + analyzer := gosec.NewAnalyzer(gasConfig, logger) + analyzer.LoadRules(enabledRules.Builders()) + + var createdPkgs []*loader.PackageInfo + createdPkgs = append(createdPkgs, util.MakeFakeLoaderPackageInfo(pass)) + allPkgs := make(map[*types.Package]*loader.PackageInfo) + for _, pkg := range createdPkgs { + pkg := pkg + allPkgs[pkg.Pkg] = pkg + } + prog := &loader.Program{ + Fset: pass.Fset, + Imported: nil, // not used without .Created in any linter + Created: createdPkgs, // all initial packages + AllPackages: allPkgs, // all initial packages and their depndencies + } + + analyzer.ProcessProgram(prog) + issues, _ := analyzer.Report() + if len(issues) == 0 { + return nil, nil + } + severity, confidence := gosec.Low, gosec.Low + issues = filterIssues(issues, severity, confidence) + for _, i := range issues { + fileContent, tf, err := util.ReadFile(pass.Fset, i.File) + if err != nil { + panic(err) + } + text := fmt.Sprintf("[%s] %s: %s", Name, i.RuleID, i.What) // TODO: use severity and confidence + var r *result.Range + line, err := strconv.Atoi(i.Line) + if err != nil { + r = &result.Range{} + if n, rerr := fmt.Sscanf(i.Line, "%d-%d", &r.From, &r.To); rerr != nil || n != 2 { + continue + } + line = r.From + } + + pass.Reportf(token.Pos(tf.Base()+util.FindOffset(string(fileContent), line, 1)), text) + } + + return nil, nil +} + +func filterIssues(issues []*gosec.Issue, severity, confidence gosec.Score) []*gosec.Issue { + res := make([]*gosec.Issue, 0) + for _, issue := range issues { + if issue.Severity >= severity && issue.Confidence >= confidence { + res = append(res, issue) + } + } + return res +} diff --git a/build/linter/misspell/BUILD.bazel b/build/linter/misspell/BUILD.bazel new file mode 100644 index 0000000000000..6dfdff3fc3c78 --- /dev/null +++ b/build/linter/misspell/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "misspell", + srcs = ["analyzer.go"], + importpath = "github.com/pingcap/tidb/build/linter/misspell", + visibility = ["//visibility:public"], + deps = [ + "//build/linter/util", + "@com_github_golangci_misspell//:misspell", + "@org_golang_x_tools//go/analysis", + ], +) diff --git a/build/linter/misspell/analyzer.go b/build/linter/misspell/analyzer.go new file mode 100644 index 0000000000000..3bf4dd16ae77c --- /dev/null +++ b/build/linter/misspell/analyzer.go @@ -0,0 +1,92 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package misspell + +import ( + "fmt" + "go/token" + + "github.com/golangci/misspell" + "github.com/pingcap/tidb/build/linter/util" + "golang.org/x/tools/go/analysis" +) + +// Name is the name of the analyzer. +const Name = "misspell" + +// Analyzer is the analyzer struct of misspell. +var Analyzer = &analysis.Analyzer{ + Name: Name, + Doc: "Checks the spelling error in code", + Run: run, +} + +func init() { + util.SkipAnalyzer(Analyzer) +} + +// Misspell is the config of misspell. +type Misspell struct { + Locale string + IgnoreWords []string `mapstructure:"ignore-words"` +} + +func run(pass *analysis.Pass) (interface{}, error) { + r := misspell.Replacer{ + Replacements: misspell.DictMain, + } + + // Figure out regional variations + settings := &Misspell{ + Locale: "", + } + + if len(settings.IgnoreWords) != 0 { + r.RemoveRule(settings.IgnoreWords) + } + + r.Compile() + files := make([]string, 0, len(pass.Files)) + for _, file := range pass.Files { + pos := pass.Fset.PositionFor(file.Pos(), false) + files = append(files, pos.Filename) + } + for _, f := range files { + err := runOnFile(f, &r, pass) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +func runOnFile(fileName string, r *misspell.Replacer, pass *analysis.Pass) error { + fileContent, tf, err := util.ReadFile(pass.Fset, fileName) + if err != nil { + return fmt.Errorf("can't get file %s contents: %s", fileName, err) + } + + // use r.Replace, not r.ReplaceGo because r.ReplaceGo doesn't find + // issues inside strings: it searches only inside comments. r.Replace + // searches all words: it treats input as a plain text. A standalone misspell + // tool uses r.Replace by default. + _, diffs := r.Replace(string(fileContent)) + for _, diff := range diffs { + text := fmt.Sprintf("[%s] `%s` is a misspelling of `%s`", Name, diff.Original, diff.Corrected) + pass.Reportf(token.Pos(tf.Base()+util.FindOffset(string(fileContent), diff.Line, diff.Column)), text) + } + return nil +} diff --git a/build/linter/predeclared/BUILD.bazel b/build/linter/predeclared/BUILD.bazel new file mode 100644 index 0000000000000..5efbe386f0f13 --- /dev/null +++ b/build/linter/predeclared/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "predeclared", + srcs = ["analysis.go"], + importpath = "github.com/pingcap/tidb/build/linter/predeclared", + visibility = ["//visibility:public"], + deps = [ + "//build/linter/util", + "@com_github_nishanths_predeclared//passes/predeclared", + ], +) diff --git a/build/linter/predeclared/analysis.go b/build/linter/predeclared/analysis.go new file mode 100644 index 0000000000000..605b631553f24 --- /dev/null +++ b/build/linter/predeclared/analysis.go @@ -0,0 +1,27 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package predeclared + +import ( + "github.com/nishanths/predeclared/passes/predeclared" + "github.com/pingcap/tidb/build/linter/util" +) + +// Analyzer is the analyzer struct of predeclared. +var Analyzer = predeclared.Analyzer + +func init() { + util.SkipAnalyzer(Analyzer) +} diff --git a/build/linter/unconvert/BUILD.bazel b/build/linter/unconvert/BUILD.bazel new file mode 100644 index 0000000000000..23ececa09cd70 --- /dev/null +++ b/build/linter/unconvert/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "unconvert", + srcs = ["analysis.go"], + importpath = "github.com/pingcap/tidb/build/linter/unconvert", + visibility = ["//visibility:public"], + deps = [ + "//build/linter/util", + "@com_github_golangci_unconvert//:unconvert", + "@org_golang_x_tools//go/analysis", + "@org_golang_x_tools//go/loader", + ], +) diff --git a/build/linter/unconvert/analysis.go b/build/linter/unconvert/analysis.go new file mode 100644 index 0000000000000..89da08bccd4aa --- /dev/null +++ b/build/linter/unconvert/analysis.go @@ -0,0 +1,61 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package unconvert + +import ( + "fmt" + "go/token" + "go/types" + + unconvertAPI "github.com/golangci/unconvert" + "github.com/pingcap/tidb/build/linter/util" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/loader" +) + +// Name is the name of the analyzer. +const Name = "unconvert" + +// Analyzer is the analyzer struct of unconvert. +var Analyzer = &analysis.Analyzer{ + Name: Name, + Doc: "Remove unnecessary type conversions", + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + var createdPkgs []*loader.PackageInfo + createdPkgs = append(createdPkgs, util.MakeFakeLoaderPackageInfo(pass)) + allPkgs := map[*types.Package]*loader.PackageInfo{} + for _, pkg := range createdPkgs { + pkg := pkg + allPkgs[pkg.Pkg] = pkg + } + prog := &loader.Program{ + Fset: pass.Fset, + Imported: nil, // not used without .Created in any linter + Created: createdPkgs, // all initial packages + AllPackages: allPkgs, // all initial packages and their depndencies + } + positions := unconvertAPI.Run(prog) + if len(positions) == 0 { + return nil, nil + } + + for _, pos := range positions { + pass.Reportf(token.Pos(pos.Offset), fmt.Sprintf("[%s] Unnecessary conversion", Name)) + } + return nil, nil +} diff --git a/build/linter/util/BUILD.bazel b/build/linter/util/BUILD.bazel index 4ac3fec064d07..f8e81695c03eb 100644 --- a/build/linter/util/BUILD.bazel +++ b/build/linter/util/BUILD.bazel @@ -8,5 +8,6 @@ go_library( deps = [ "@co_honnef_go_tools//analysis/report", "@org_golang_x_tools//go/analysis", + "@org_golang_x_tools//go/loader", ], ) diff --git a/build/linter/util/util.go b/build/linter/util/util.go index d476173a973a0..afa56c515613f 100644 --- a/build/linter/util/util.go +++ b/build/linter/util/util.go @@ -18,10 +18,12 @@ import ( "fmt" "go/ast" "go/token" + "io/ioutil" "reflect" "strings" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/loader" "honnef.co/go/tools/analysis/report" ) @@ -147,3 +149,59 @@ func FormatCode(code string) string { return fmt.Sprintf("`%s`", code) } + +// MakeFakeLoaderPackageInfo creates a fake loader.PackageInfo for a given package. +func MakeFakeLoaderPackageInfo(pass *analysis.Pass) *loader.PackageInfo { + var errs []error + + typeInfo := pass.TypesInfo + + return &loader.PackageInfo{ + Pkg: pass.Pkg, + Importable: true, // not used + TransitivelyErrorFree: true, // not used + + // use compiled (preprocessed) go files AST; + // AST linters use not preprocessed go files AST + Files: pass.Files, + Errors: errs, + Info: *typeInfo, + } +} + +// ReadFile reads a file and adds it to the FileSet +// so that we can report errors against it using lineStart. +func ReadFile(fset *token.FileSet, filename string) ([]byte, *token.File, error) { + //nolint: gosec + content, err := ioutil.ReadFile(filename) + if err != nil { + return nil, nil, err + } + tf := fset.AddFile(filename, -1, len(content)) + tf.SetLinesForContent(content) + return content, tf, nil +} + +// FindOffset returns the offset of a given position in a file. +func FindOffset(fileText string, line, column int) int { + // we count our current line and column position + currentCol := 1 + currentLine := 1 + + for offset, ch := range fileText { + // see if we found where we wanted to go to + if currentLine == line && currentCol == column { + return offset + } + + // line break - increment the line counter and reset the column + if ch == '\n' { + currentLine++ + currentCol = 1 + } else { + currentCol++ + } + + } + return -1 //not found +} diff --git a/build/nogo_config.json b/build/nogo_config.json index cefdb5fe4aa11..b766d3ec45a14 100644 --- a/build/nogo_config.json +++ b/build/nogo_config.json @@ -1,4 +1,11 @@ { + "asciicheck": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code", + "br/pkg/lightning/web/res_vfsdata.go": "ignore code" + } + }, "asmdecl": { "exclude_files": { "/external/": "no need to vet third party code", @@ -74,6 +81,12 @@ ".*_generated\\.go$": "ignore generated code" } }, + "deadcode": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, "deepequalerrors": { "exclude_files": { "/external/": "no need to vet third party code", @@ -83,6 +96,7 @@ "durationcheck": { "exclude_files": { "/external/": "no need to vet third party code", + "/rules_go_work-*": "ignore generated code", ".*_generated\\.go$": "ignore generated code" } }, @@ -92,6 +106,20 @@ ".*_generated\\.go$": "ignore generated code" } }, + "errcheck": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code", + ".*_test\\.go$": "ignore generated code", + "util/logutil": "ignore util/logutil code", + "tools/": "ignore tools code", + "/src/net/conf.go": "ignore code", + "/rules_go_work-*": "ignore generated code", + "GOROOT/": "ignore code", + "/parser/": "ignore code", + ".*_/testmain\\.go$": "ignore code" + } + }, "exportloopref": { "exclude_files": { "/external/": "no need to vet third party code", @@ -114,6 +142,34 @@ ".*failpoint_binding__.go$": "ignore generated code" } }, + "gci": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code", + "/cgo/": "ignore cgo code", + ".*\\.pb\\.go$": "generated code", + "/rules_go_work-*": "ignore generated code", + ".*test_/testmain\\.go$": "ignore generated code", + ".*failpoint_binding__.go$": "ignore generated code", + "util/printer/printer.go": "ignore util/printer code" + } + }, + "gosec": { + "exclude_files": { + "external/": "no need to vet third party code", + "parser/goyacc/": "ignore goyacc code", + ".*_test\\.go$": "ignore generated code", + "/cgo/": "ignore cgo code", + "/rules_go_work-*": "ignore generated code", + "tools/check/ut.go": "ignore tools/check code", + "tools/check/xprog.go": "ignore tools/check code", + "cmd/pluginpkg/pluginpkg.go": "ignore cmd/pluginpkg code", + "tools/check/xprog.go:": "ignore tools/check code", + "cmd/explaintest/main.go": "ignore cmd/explaintest code", + "GOROOT/": "ignore code", + ".*_generated\\.go$": "ignore generated code" + } + }, "httpresponse": { "exclude_files": { "/external/": "no need to vet third party code", @@ -150,6 +206,13 @@ ".*_generated\\.go$": "ignore generated code" } }, + "misspell": { + "exclude_files": { + "/cgo/": "ignore cgo code", + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, "nilfunc": { "exclude_files": { "/external/": "no need to vet third party code", @@ -210,6 +273,15 @@ ".*_generated\\.go$": "ignore generated code" } }, + "unconvert": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*\\.pb\\.go$": "generated code", + "parser/parser.go": "generated code", + "/cgo/": "no need to vet third party code for cgo", + ".*_generated\\.go$": "ignore generated code" + } + }, "unmarshal": { "exclude_files": { "/external/": "no need to vet third party code", @@ -230,24 +302,60 @@ "parser/digester_test.go": "ignore code" } }, + "S1000": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1001": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, "S1002": { "exclude_files": { "/external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, + "S1003": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, "S1004": { "exclude_files": { "/external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, + "S1005": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1006": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, "S1007": { "exclude_files": { "/external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, + "S1008": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, "S1009": { "exclude_files": { "/external/": "no need to vet third party code", @@ -260,12 +368,54 @@ ".*_generated\\.go$": "ignore generated code" } }, + "S1011": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, "S1012": { "exclude_files": { "/external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, + "S1013": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1014": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1015": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1016": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1017": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1018": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, "S1019": { "exclude_files": { "/external/": "no need to vet third party code", @@ -286,18 +436,122 @@ "tools/check/ut.go": "ignore code" } }, + "S1022": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1023": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code", + "parser/parser.go": "ignore code" + } + }, "S1024": { "exclude_files": { "/external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, + "S1025": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1026": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1027": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1028": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1029": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, "S1030": { "exclude_files": { "/external/": "no need to vet third party code", ".*_generated\\.go$": "ignore generated code" } }, + "S1031": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1032": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1033": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1034": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1035": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1036": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1037": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1038": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1039": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code" + } + }, + "S1040": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code", + "parser/parser.go": "ignore generated code" + } + }, "SA2000": { "exclude_files": { "/external/": "no need to vet third party code", @@ -431,5 +685,14 @@ "parser/yy_parser.go": "ignore generated code", "/cgo/": "no need to vet third party code for cgo" } + }, + "predeclared": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code", + "parser/yy_parser.go": "ignore generated code", + "parser/parser.go": "ignore generated code", + "/cgo/": "no need to vet third party code for cgo" + } } } diff --git a/build/patches/com_github_kisielk_errcheck.patch b/build/patches/com_github_kisielk_errcheck.patch new file mode 100644 index 0000000000000..c7d6e2de505a1 --- /dev/null +++ b/build/patches/com_github_kisielk_errcheck.patch @@ -0,0 +1,59 @@ +diff -urN a/errcheck/analyzer.go b/errcheck/analyzer.go +--- a/errcheck/analyzer.go ++++ b/errcheck/analyzer.go +@@ -3,9 +3,9 @@ package errcheck + import ( + "fmt" + "go/ast" +- "go/token" + "reflect" + "regexp" ++ "strings" + + "golang.org/x/tools/go/analysis" + ) +@@ -21,6 +22,7 @@ var ( + argBlank bool + argAsserts bool + argExcludeFile string ++ argExcludes string + argExcludeOnly bool + ) + +@@ -28,6 +30,7 @@ func init() { + Analyzer.Flags.BoolVar(&argBlank, "blank", false, "if true, check for errors assigned to blank identifier") + Analyzer.Flags.BoolVar(&argAsserts, "assert", false, "if true, check for ignored type assertion results") + Analyzer.Flags.StringVar(&argExcludeFile, "exclude", "", "Path to a file containing a list of functions to exclude from checking") ++ Analyzer.Flags.StringVar(&argExcludes, "excludes", "", "Contents of the exclude file as a string (overrides -exclude)") + Analyzer.Flags.BoolVar(&argExcludeOnly, "excludeonly", false, "Use only excludes from exclude file") + } + +@@ -39,7 +42,14 @@ func runAnalyzer(pass *analysis.Pass) (interface{}, error) { + exclude[name] = true + } + } +- if argExcludeFile != "" { ++ if argExcludes != "" { ++ for _, name := range strings.Split(argExcludes, "\n") { ++ if strings.HasPrefix(name, "//") || name == "" { ++ continue ++ } ++ exclude[name] = true ++ } ++ } else if argExcludeFile != "" { + excludes, err := ReadExcludes(argExcludeFile) + if err != nil { + return nil, fmt.Errorf("Could not read exclude file: %v\n", err) +--- a/errcheck/analyzer.go ++++ b/errcheck/analyzer.go +@@ -65,8 +65,9 @@ func runAnalyzer(pass *analysis.Pass) (interface{}, error) { + ast.Walk(v, f) + + for _, err := range v.errors { ++ fsetFile := pass.Fset.File(f.Pos()) + pass.Report(analysis.Diagnostic{ +- Pos: token.Pos(int(f.Pos()) + err.Pos.Offset), ++ Pos: fsetFile.Pos(err.Pos.Offset), + Message: "unchecked error", + }) + } diff --git a/cmd/benchraw/main.go b/cmd/benchraw/main.go index 3bcce7dec621c..80f1a1d2289bc 100644 --- a/cmd/benchraw/main.go +++ b/cmd/benchraw/main.go @@ -30,7 +30,6 @@ import ( "github.com/pingcap/tidb/parser/terror" "github.com/tikv/client-go/v2/config" "github.com/tikv/client-go/v2/rawkv" - "go.uber.org/zap" ) diff --git a/cmd/explaintest/r/new_character_set_invalid.result b/cmd/explaintest/r/new_character_set_invalid.result index 92e43d6c747fa..bb9ae4aa6db2b 100644 --- a/cmd/explaintest/r/new_character_set_invalid.result +++ b/cmd/explaintest/r/new_character_set_invalid.result @@ -21,3 +21,35 @@ a b c ? ? ? 中文?中文 asdf?fdsa 字符集?字符集 @@ @@ @@ +set @@sql_mode = default; +drop table t; +create table t(f set(0xD2BB, 0xC8FD), e enum(0xBAEC,0x6A59)); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `f` set('Ò»','??') DEFAULT NULL, + `e` enum('??','jY') DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table t; +create table t( e enum(0xBAEC,0x6A59)); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `e` enum('??','jY') DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table t; +create table t(f set(0xD2BB, 0xC8FD), e enum(0xBAEC,0x6A59)) collate gbk_bin; +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `f` set('一','三') COLLATE gbk_bin DEFAULT NULL, + `e` enum('红','jY') COLLATE gbk_bin DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=gbk COLLATE=gbk_bin +drop table t; +create table t( e enum(0xBAEC,0x6A59)) collate gbk_bin; +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `e` enum('红','jY') COLLATE gbk_bin DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=gbk COLLATE=gbk_bin +set @@sql_mode = ''; diff --git a/cmd/explaintest/r/show.result b/cmd/explaintest/r/show.result new file mode 100644 index 0000000000000..6dfbf77c4af44 --- /dev/null +++ b/cmd/explaintest/r/show.result @@ -0,0 +1,4 @@ +show tables like '%xx'; +Tables_in_test (%xx) +show databases like '%xx'; +Database (%xx) diff --git a/cmd/explaintest/t/new_character_set_invalid.test b/cmd/explaintest/t/new_character_set_invalid.test index eaed9ba78c518..369f72362cfc7 100644 --- a/cmd/explaintest/t/new_character_set_invalid.test +++ b/cmd/explaintest/t/new_character_set_invalid.test @@ -15,3 +15,18 @@ insert into t values ('À', 'ø', '😂'); insert into t values ('中文À中文', 'asdføfdsa', '字符集😂字符集'); insert into t values (0x4040ffff, 0x4040ffff, 0x4040ffff); select * from t; + +set @@sql_mode = default; +drop table t; +create table t(f set(0xD2BB, 0xC8FD), e enum(0xBAEC,0x6A59)); +show create table t; +drop table t; +create table t( e enum(0xBAEC,0x6A59)); +show create table t; +drop table t; +create table t(f set(0xD2BB, 0xC8FD), e enum(0xBAEC,0x6A59)) collate gbk_bin; +show create table t; +drop table t; +create table t( e enum(0xBAEC,0x6A59)) collate gbk_bin; +show create table t; +set @@sql_mode = ''; diff --git a/cmd/explaintest/t/show.test b/cmd/explaintest/t/show.test new file mode 100644 index 0000000000000..b90131d18f861 --- /dev/null +++ b/cmd/explaintest/t/show.test @@ -0,0 +1,3 @@ +# test show output field name +show tables like '%xx'; +show databases like '%xx'; diff --git a/config/const.go b/config/const.go index 42c314cd64cef..9196e1b9929d8 100644 --- a/config/const.go +++ b/config/const.go @@ -16,6 +16,3 @@ package config // DefRowsForSampleRate is default sample rows used to calculate samplerate. const DefRowsForSampleRate = 110000 - -// TrackMemWhenExceeds is the threshold when memory usage needs to be tracked. -const TrackMemWhenExceeds = 104857600 // 100MB diff --git a/ddl/BUILD.bazel b/ddl/BUILD.bazel index 807f65715202b..3327bf550acb9 100644 --- a/ddl/BUILD.bazel +++ b/ddl/BUILD.bazel @@ -12,6 +12,7 @@ go_library( "ddl_tiflash_api.go", "ddl_worker.go", "delete_range.go", + "delete_range_util.go", "foreign_key.go", "generated_column.go", "index.go", @@ -105,6 +106,7 @@ go_library( "@com_github_tikv_client_go_v2//tikv", "@com_github_tikv_client_go_v2//tikvrpc", "@io_etcd_go_etcd_client_v3//:client", + "@org_golang_x_exp//slices", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", ], @@ -112,6 +114,7 @@ go_library( go_test( name = "ddl_test", + timeout = "short", srcs = [ "attributes_sql_test.go", "callback_test.go", diff --git a/ddl/backfilling.go b/ddl/backfilling.go index ab39a1cff1b65..0b4ff4b6d554e 100644 --- a/ddl/backfilling.go +++ b/ddl/backfilling.go @@ -607,6 +607,7 @@ func (w *worker) writePhysicalTableRecord(t table.PhysicalTable, bfWorkerType ba defer func() { closeBackfillWorkers(backfillWorkers) }() + jc := w.jobContext(job) for { kvRanges, err := splitTableRanges(t, reorgInfo.d.store, startKey, endKey) @@ -647,19 +648,19 @@ func (w *worker) writePhysicalTableRecord(t table.PhysicalTable, bfWorkerType ba switch bfWorkerType { case typeAddIndexWorker: - idxWorker := newAddIndexWorker(sessCtx, w, i, t, indexInfo, decodeColMap, reorgInfo) + idxWorker := newAddIndexWorker(sessCtx, w, i, t, indexInfo, decodeColMap, reorgInfo, jc) idxWorker.priority = job.Priority backfillWorkers = append(backfillWorkers, idxWorker.backfillWorker) go idxWorker.backfillWorker.run(reorgInfo.d, idxWorker, job) case typeUpdateColumnWorker: // Setting InCreateOrAlterStmt tells the difference between SELECT casting and ALTER COLUMN casting. sessCtx.GetSessionVars().StmtCtx.InCreateOrAlterStmt = true - updateWorker := newUpdateColumnWorker(sessCtx, i, t, oldColInfo, colInfo, decodeColMap, reorgInfo) + updateWorker := newUpdateColumnWorker(sessCtx, i, t, oldColInfo, colInfo, decodeColMap, reorgInfo, jc) updateWorker.priority = job.Priority backfillWorkers = append(backfillWorkers, updateWorker.backfillWorker) go updateWorker.backfillWorker.run(reorgInfo.d, updateWorker, job) case typeCleanUpIndexWorker: - idxWorker := newCleanUpIndexWorker(sessCtx, w, i, t, decodeColMap, reorgInfo) + idxWorker := newCleanUpIndexWorker(sessCtx, w, i, t, decodeColMap, reorgInfo, jc) idxWorker.priority = job.Priority backfillWorkers = append(backfillWorkers, idxWorker.backfillWorker) go idxWorker.backfillWorker.run(reorgInfo.d, idxWorker, job) @@ -733,6 +734,8 @@ func iterateSnapshotRows(ctx *JobContext, store kv.Storage, priority int, t tabl ver := kv.Version{Ver: version} snap := store.GetSnapshot(ver) snap.SetOption(kv.Priority, priority) + snap.SetOption(kv.RequestSourceInternal, true) + snap.SetOption(kv.RequestSourceType, ctx.ddlJobSourceType()) if tagger := ctx.getResourceGroupTaggerForTopSQL(); tagger != nil { snap.SetOption(kv.ResourceGroupTagger, tagger) } @@ -778,6 +781,8 @@ func getRangeEndKey(ctx *JobContext, store kv.Storage, priority int, t table.Tab if tagger := ctx.getResourceGroupTaggerForTopSQL(); tagger != nil { snap.SetOption(kv.ResourceGroupTagger, tagger) } + snap.SetOption(kv.RequestSourceInternal, true) + snap.SetOption(kv.RequestSourceType, ctx.ddlJobSourceType()) it, err := snap.IterReverse(endKey.Next()) if err != nil { return nil, errors.Trace(err) diff --git a/ddl/cancel_test.go b/ddl/cancel_test.go index 9c53952534bb3..64179c19a8eca 100644 --- a/ddl/cancel_test.go +++ b/ddl/cancel_test.go @@ -176,21 +176,19 @@ var allTestCase = []testCancelJob{ {"alter table t add column c41 bigint, add column c42 bigint", true, subStates{model.StateWriteReorganization, model.StateNone}, true, true, nil}, {"alter table t add column c41 bigint, add column c42 bigint", false, subStates{model.StatePublic, model.StatePublic}, false, true, nil}, // Drop columns. - // TODO: fix schema state. - {"alter table t drop column c41, drop column c42", true, model.StateNone, true, false, nil}, - {"alter table t drop column c41, drop column c42", false, model.StateDeleteOnly, true, false, nil}, - {"alter table t drop column c41, drop column c42", false, model.StateDeleteOnly, false, true, []string{"alter table t add column c41 bigint, add column c42 bigint"}}, - {"alter table t drop column c41, drop column c42", false, model.StateWriteOnly, true, true, []string{"alter table t add column c41 bigint, add column c42 bigint"}}, - {"alter table t drop column c41, drop column c42", false, model.StateDeleteReorganization, true, true, []string{"alter table t add column c41 bigint, add column c42 bigint"}}, - {"alter table t drop column c41, drop column c42", false, model.StatePublic, false, true, []string{"alter table t add column c41 bigint, add column c42 bigint"}}, + {"alter table t drop column c41, drop column c42", true, subStates{model.StatePublic, model.StatePublic}, true, false, nil}, + {"alter table t drop column c41, drop column c42", false, subStates{model.StateDeleteOnly, model.StateDeleteOnly}, true, false, nil}, + {"alter table t drop column c41, drop column c42", false, subStates{model.StateDeleteOnly, model.StateDeleteOnly}, false, true, []string{"alter table t add column c41 bigint, add column c42 bigint"}}, + {"alter table t drop column c41, drop column c42", false, subStates{model.StateWriteOnly, model.StateDeleteOnly}, true, true, []string{"alter table t add column c41 bigint, add column c42 bigint"}}, + {"alter table t drop column c41, drop column c42", false, subStates{model.StateDeleteReorganization, model.StateDeleteOnly}, true, true, []string{"alter table t add column c41 bigint, add column c42 bigint"}}, + {"alter table t drop column c41, drop column c42", false, subStates{model.StateNone, model.StateDeleteOnly}, false, true, []string{"alter table t add column c41 bigint, add column c42 bigint"}}, // Drop columns with index. - // TODO: fix schema state. - {"alter table t drop column c41, drop column c42", true, model.StateNone, true, false, []string{"alter table t add column c41 bigint, add column c42 bigint", "alter table t add index drop_columns_idx(c41)"}}, - {"alter table t drop column c41, drop column c42", false, model.StateDeleteOnly, true, false, nil}, - {"alter table t drop column c41, drop column c42", false, model.StateDeleteOnly, false, true, []string{"alter table t add column c41 bigint, add column c42 bigint", "alter table t add index drop_columns_idx(c41)"}}, - {"alter table t drop column c41, drop column c42", false, model.StateWriteOnly, true, true, []string{"alter table t add column c41 bigint, add column c42 bigint", "alter table t add index drop_columns_idx(c41)"}}, - {"alter table t drop column c41, drop column c42", false, model.StateDeleteReorganization, true, true, []string{"alter table t add column c41 bigint, add column c42 bigint", "alter table t add index drop_columns_idx(c41)"}}, - {"alter table t drop column c41, drop column c42", false, model.StatePublic, false, true, []string{"alter table t add column c41 bigint, add column c42 bigint", "alter table t add index drop_columns_idx(c41)"}}, + {"alter table t drop column c41, drop column c42", true, subStates{model.StatePublic, model.StatePublic}, true, false, []string{"alter table t add column c41 bigint, add column c42 bigint", "alter table t add index drop_columns_idx(c41)"}}, + {"alter table t drop column c41, drop column c42", false, subStates{model.StateDeleteOnly, model.StateDeleteOnly}, true, false, nil}, + {"alter table t drop column c41, drop column c42", false, subStates{model.StateDeleteOnly, model.StateDeleteOnly}, false, true, []string{"alter table t add column c41 bigint, add column c42 bigint", "alter table t add index drop_columns_idx(c41)"}}, + {"alter table t drop column c41, drop column c42", false, subStates{model.StateWriteOnly, model.StateDeleteOnly}, true, true, []string{"alter table t add column c41 bigint, add column c42 bigint", "alter table t add index drop_columns_idx(c41)"}}, + {"alter table t drop column c41, drop column c42", false, subStates{model.StateDeleteReorganization, model.StateDeleteOnly}, true, true, []string{"alter table t add column c41 bigint, add column c42 bigint", "alter table t add index drop_columns_idx(c41)"}}, + {"alter table t drop column c41, drop column c42", false, subStates{model.StateNone, model.StateDeleteOnly}, false, true, []string{"alter table t add column c41 bigint, add column c42 bigint", "alter table t add index drop_columns_idx(c41)"}}, // Alter index visibility. {"alter table t alter index idx_v invisible", true, model.StateNone, true, false, []string{"alter table t add index idx_v(c1)"}}, {"alter table t alter index idx_v invisible", false, model.StatePublic, false, true, nil}, @@ -208,14 +206,13 @@ var allTestCase = []testCancelJob{ {"alter table t_partition drop partition p6", false, model.StateDeleteReorganization, true, true, []string{"alter table t_partition add partition (partition p6 values less than (8192))"}}, {"alter table t_partition drop partition p6", false, model.StateNone, true, true, []string{"alter table t_partition add partition (partition p6 values less than (8192))"}}, // Drop indexes. - // TODO: fix schema state. - {"alter table t drop index mul_idx1, drop index mul_idx2", true, model.StateNone, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, - {"alter table t drop index mul_idx1, drop index mul_idx2", false, model.StateWriteOnly, true, false, nil}, - {"alter table t drop index mul_idx1, drop index mul_idx2", false, model.StateWriteOnly, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, - {"alter table t drop index mul_idx1, drop index mul_idx2", false, model.StateDeleteOnly, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, - {"alter table t drop index mul_idx1, drop index mul_idx2", false, model.StateDeleteOnly, false, true, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, - {"alter table t drop index mul_idx1, drop index mul_idx2", false, model.StateDeleteReorganization, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, - {"alter table t drop index mul_idx1, drop index mul_idx2", false, model.StateDeleteReorganization, false, true, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, + {"alter table t drop index mul_idx1, drop index mul_idx2", true, subStates{model.StatePublic, model.StatePublic}, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, + {"alter table t drop index mul_idx1, drop index mul_idx2", false, subStates{model.StateWriteOnly, model.StateWriteOnly}, true, false, nil}, + {"alter table t drop index mul_idx1, drop index mul_idx2", false, subStates{model.StateWriteOnly, model.StateWriteOnly}, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, + {"alter table t drop index mul_idx1, drop index mul_idx2", false, subStates{model.StateDeleteOnly, model.StateWriteOnly}, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, + {"alter table t drop index mul_idx1, drop index mul_idx2", false, subStates{model.StateDeleteOnly, model.StateWriteOnly}, false, true, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, + {"alter table t drop index mul_idx1, drop index mul_idx2", false, subStates{model.StateDeleteReorganization, model.StateWriteOnly}, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, + {"alter table t drop index mul_idx1, drop index mul_idx2", false, subStates{model.StateDeleteReorganization, model.StateWriteOnly}, false, true, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, // Alter db placement. {"alter database db_placement placement policy = 'alter_x'", true, model.StateNone, true, false, []string{"create placement policy alter_x PRIMARY_REGION=\"cn-east-1\", REGIONS=\"cn-east-1\";", "create database db_placement"}}, {"alter database db_placement placement policy = 'alter_x'", false, model.StatePublic, false, true, nil}, @@ -306,7 +303,6 @@ func TestCancel(t *testing.T) { for _, prepareSQL := range tc.prepareSQL { tk.MustExec(prepareSQL) } - cancel = false cancelWhenReorgNotStart = true registHook(hook, true) diff --git a/ddl/column.go b/ddl/column.go index 6b9a00ea5b3db..29b16c17140fd 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -51,38 +51,6 @@ import ( "go.uber.org/zap" ) -// adjustColumnInfoInDropColumn is used to set the correct position of column info when dropping column. -// 1. The offset of column should to be set to the last of the columns. -// 2. The dropped column is moved to the end of tblInfo.Columns, due to it was not public any more. -func adjustColumnInfoInDropColumn(tblInfo *model.TableInfo, offset int) { - oldCols := tblInfo.Columns - // Adjust column offset. - offsetChanged := make(map[int]int, len(oldCols)-offset-1) - for i := offset + 1; i < len(oldCols); i++ { - offsetChanged[oldCols[i].Offset] = i - 1 - oldCols[i].Offset = i - 1 - } - oldCols[offset].Offset = len(oldCols) - 1 - // For expression index, we drop hidden columns and index simultaneously. - // So we need to change the offset of expression index. - offsetChanged[offset] = len(oldCols) - 1 - // Update index column offset info. - // TODO: There may be some corner cases for index column offsets, we may check this later. - for _, idx := range tblInfo.Indices { - for _, col := range idx.Columns { - newOffset, ok := offsetChanged[col.Offset] - if ok { - col.Offset = newOffset - } - } - } - newCols := make([]*model.ColumnInfo, 0, len(oldCols)) - newCols = append(newCols, oldCols[:offset]...) - newCols = append(newCols, oldCols[offset+1:]...) - newCols = append(newCols, oldCols[offset]) - tblInfo.Columns = newCols -} - func createColumnInfoWithPosCheck(tblInfo *model.TableInfo, colInfo *model.ColumnInfo, pos *ast.ColumnPosition) (*model.ColumnInfo, *ast.ColumnPosition, int, error) { // Check column name duplicate. cols := tblInfo.Columns @@ -250,56 +218,6 @@ func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) return ver, errors.Trace(err) } -func checkAddColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, []*model.ColumnInfo, []*ast.ColumnPosition, []int, []bool, error) { - schemaID := job.SchemaID - tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) - if err != nil { - return nil, nil, nil, nil, nil, nil, errors.Trace(err) - } - columns := []*model.ColumnInfo{} - positions := []*ast.ColumnPosition{} - offsets := []int{} - ifNotExists := []bool{} - err = job.DecodeArgs(&columns, &positions, &offsets, &ifNotExists) - if err != nil { - job.State = model.JobStateCancelled - return nil, nil, nil, nil, nil, nil, errors.Trace(err) - } - - columnInfos := make([]*model.ColumnInfo, 0, len(columns)) - newColumns := make([]*model.ColumnInfo, 0, len(columns)) - newPositions := make([]*ast.ColumnPosition, 0, len(columns)) - newOffsets := make([]int, 0, len(columns)) - newIfNotExists := make([]bool, 0, len(columns)) - for i, col := range columns { - columnInfo := model.FindColumnInfo(tblInfo.Columns, col.Name.L) - if columnInfo != nil { - if columnInfo.State == model.StatePublic { - // We already have a column with the same column name. - if ifNotExists[i] { - // TODO: Should return a warning. - logutil.BgLogger().Warn("[ddl] check add columns, duplicate column", zap.Stringer("col", col.Name)) - continue - } - job.State = model.JobStateCancelled - return nil, nil, nil, nil, nil, nil, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name) - } - columnInfos = append(columnInfos, columnInfo) - } - newColumns = append(newColumns, columns[i]) - newPositions = append(newPositions, positions[i]) - newOffsets = append(newOffsets, offsets[i]) - newIfNotExists = append(newIfNotExists, ifNotExists[i]) - } - return tblInfo, columnInfos, newColumns, newPositions, newOffsets, newIfNotExists, nil -} - -func setColumnsState(columnInfos []*model.ColumnInfo, state model.SchemaState) { - for i := range columnInfos { - columnInfos[i].State = state - } -} - // checkAfterPositionExists makes sure the column specified in AFTER clause is exists. // For example, ALTER TABLE t ADD COLUMN c3 INT AFTER c1. func checkAfterPositionExists(tblInfo *model.TableInfo, pos *ast.ColumnPosition) error { @@ -318,241 +236,7 @@ func setIndicesState(indexInfos []*model.IndexInfo, state model.SchemaState) { } } -func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { - // Handle the rolling back job. - if job.IsRollingback() { - ver, err = onDropColumns(d, t, job) - if err != nil { - return ver, errors.Trace(err) - } - return ver, nil - } - - failpoint.Inject("errorBeforeDecodeArgs", func(val failpoint.Value) { - if val.(bool) { - failpoint.Return(ver, errors.New("occur an error before decode args")) - } - }) - - tblInfo, columnInfos, columns, positions, offsets, ifNotExists, err := checkAddColumns(t, job) - if err != nil { - return ver, errors.Trace(err) - } - if len(columnInfos) == 0 { - if len(columns) == 0 { - job.State = model.JobStateCancelled - return ver, nil - } - for i := range columns { - columnInfo, pos, offset, err := createColumnInfoWithPosCheck(tblInfo, columns[i], positions[i]) - if err != nil { - job.State = model.JobStateCancelled - return ver, errors.Trace(err) - } - logutil.BgLogger().Info("[ddl] run add columns job", zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo), zap.Int("offset", offset)) - positions[i] = pos - offsets[i] = offset - if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil { - job.State = model.JobStateCancelled - return ver, errors.Trace(err) - } - columnInfos = append(columnInfos, columnInfo) - } - // Set arg to job. - job.Args = []interface{}{columnInfos, positions, offsets, ifNotExists} - } - - originalState := columnInfos[0].State - switch columnInfos[0].State { - case model.StateNone: - // none -> delete only - setColumnsState(columnInfos, model.StateDeleteOnly) - ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, originalState != columnInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - job.SchemaState = model.StateDeleteOnly - case model.StateDeleteOnly: - // delete only -> write only - setColumnsState(columnInfos, model.StateWriteOnly) - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != columnInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - job.SchemaState = model.StateWriteOnly - case model.StateWriteOnly: - // write only -> reorganization - setColumnsState(columnInfos, model.StateWriteReorganization) - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != columnInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - job.SchemaState = model.StateWriteReorganization - case model.StateWriteReorganization: - // reorganization -> public - // Adjust table column offsets. - oldCols := tblInfo.Columns[:len(tblInfo.Columns)-len(offsets)] - newCols := tblInfo.Columns[len(tblInfo.Columns)-len(offsets):] - tblInfo.Columns = oldCols - for i := range offsets { - // For multiple columns with after position, should adjust offsets. - // e.g. create table t(a int); - // alter table t add column b int after a, add column c int after a; - // alter table t add column a1 int after a, add column b1 int after b, add column c1 int after c; - // alter table t add column a1 int after a, add column b1 int first; - if positions[i].Tp == ast.ColumnPositionAfter { - for j := 0; j < i; j++ { - if (positions[j].Tp == ast.ColumnPositionAfter && offsets[j] < offsets[i]) || positions[j].Tp == ast.ColumnPositionFirst { - offsets[i]++ - } - } - } - tblInfo.Columns = append(tblInfo.Columns, newCols[i]) - tblInfo.MoveColumnInfo(len(tblInfo.Columns)-1, offsets[i]) - } - setColumnsState(columnInfos, model.StatePublic) - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != columnInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - // Finish this job. - job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) - asyncNotifyEvent(d, &ddlutil.Event{Tp: model.ActionAddColumns, TableInfo: tblInfo, ColumnInfos: columnInfos}) - default: - err = dbterror.ErrInvalidDDLState.GenWithStackByArgs("column", columnInfos[0].State) - } - - return ver, errors.Trace(err) -} - -func onDropColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { - tblInfo, colInfos, delCount, idxInfos, err := checkDropColumns(t, job) - if err != nil { - return ver, errors.Trace(err) - } - if len(colInfos) == 0 { - job.State = model.JobStateCancelled - return ver, nil - } - - originalState := colInfos[0].State - switch colInfos[0].State { - case model.StatePublic: - // public -> write only - setColumnsState(colInfos, model.StateWriteOnly) - setIndicesState(idxInfos, model.StateWriteOnly) - for _, colInfo := range colInfos { - err = checkDropColumnForStatePublic(tblInfo, colInfo) - if err != nil { - return ver, errors.Trace(err) - } - } - ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, originalState != colInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - job.SchemaState = model.StateWriteOnly - case model.StateWriteOnly: - // write only -> delete only - setColumnsState(colInfos, model.StateDeleteOnly) - if len(idxInfos) > 0 { - newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices)) - for _, idx := range tblInfo.Indices { - if !indexInfoContains(idx.ID, idxInfos) { - newIndices = append(newIndices, idx) - } - } - tblInfo.Indices = newIndices - } - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != colInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - job.Args = append(job.Args, indexInfosToIDList(idxInfos)) - job.SchemaState = model.StateDeleteOnly - case model.StateDeleteOnly: - // delete only -> reorganization - setColumnsState(colInfos, model.StateDeleteReorganization) - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != colInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - job.SchemaState = model.StateDeleteReorganization - case model.StateDeleteReorganization: - // reorganization -> absent - // All reorganization jobs are done, drop this column. - tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-delCount] - setColumnsState(colInfos, model.StateNone) - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != colInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - - // Finish this job. - if job.IsRollingback() { - job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo) - } else { - job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) - job.Args = append(job.Args, getPartitionIDs(tblInfo)) - } - default: - err = dbterror.ErrInvalidDDLJob.GenWithStackByArgs("table", tblInfo.State) - } - return ver, errors.Trace(err) -} - -func checkDropColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, int, []*model.IndexInfo, error) { - schemaID := job.SchemaID - tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) - if err != nil { - return nil, nil, 0, nil, errors.Trace(err) - } - - var colNames []model.CIStr - var ifExists []bool - // indexIds is used to make sure we don't truncate args when decoding the rawArgs. - var indexIds []int64 - err = job.DecodeArgs(&colNames, &ifExists, &indexIds) - if err != nil { - job.State = model.JobStateCancelled - return nil, nil, 0, nil, errors.Trace(err) - } - - newColNames := make([]model.CIStr, 0, len(colNames)) - colInfos := make([]*model.ColumnInfo, 0, len(colNames)) - newIfExists := make([]bool, 0, len(colNames)) - indexInfos := make([]*model.IndexInfo, 0) - for i, colName := range colNames { - colInfo := model.FindColumnInfo(tblInfo.Columns, colName.L) - if colInfo == nil || colInfo.Hidden { - if ifExists[i] { - // TODO: Should return a warning. - logutil.BgLogger().Warn(fmt.Sprintf("column %s doesn't exist", colName)) - continue - } - job.State = model.JobStateCancelled - return nil, nil, 0, nil, dbterror.ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) - } - if err = isDroppableColumn(job.MultiSchemaInfo != nil, tblInfo, colName); err != nil { - job.State = model.JobStateCancelled - return nil, nil, 0, nil, errors.Trace(err) - } - newColNames = append(newColNames, colName) - newIfExists = append(newIfExists, ifExists[i]) - colInfos = append(colInfos, colInfo) - idxInfos := listIndicesWithColumn(colName.L, tblInfo.Indices) - indexInfos = append(indexInfos, idxInfos...) - } - job.Args = []interface{}{newColNames, newIfExists} - if len(indexIds) > 0 { - job.Args = append(job.Args, indexIds) - } - return tblInfo, colInfos, len(colInfos), indexInfos, nil -} - func checkDropColumnForStatePublic(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) (err error) { - // Set this column's offset to the last and reset all following columns' offsets. - adjustColumnInfoInDropColumn(tblInfo, colInfo.Offset) // When the dropping column has not-null flag and it hasn't the default value, we can backfill the column value like "add column". // NOTE: If the state of StateWriteOnly can be rollbacked, we'd better reconsider the original default value. // And we need consider the column without not-null flag. @@ -573,10 +257,22 @@ func checkDropColumnForStatePublic(tblInfo *model.TableInfo, colInfo *model.Colu } func onDropColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { - tblInfo, colInfo, idxInfos, err := checkDropColumn(t, job) + tblInfo, colInfo, idxInfos, ifExists, err := checkDropColumn(t, job) if err != nil { + if ifExists && dbterror.ErrCantDropFieldOrKey.Equal(err) { + // Convert the "not exists" error to a warning. + job.Warning = toTError(err) + job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) + return ver, nil + } return ver, errors.Trace(err) } + if job.MultiSchemaInfo != nil && !job.IsRollingback() && job.MultiSchemaInfo.Revertible { + job.MarkNonRevertible() + job.SchemaState = colInfo.State + // Store the mark and enter the next DDL handling loop. + return updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, false) + } originalState := colInfo.State switch colInfo.State { @@ -584,6 +280,7 @@ func onDropColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) // public -> write only colInfo.State = model.StateWriteOnly setIndicesState(idxInfos, model.StateWriteOnly) + tblInfo.MoveColumnInfo(colInfo.Offset, len(tblInfo.Columns)-1) err = checkDropColumnForStatePublic(tblInfo, colInfo) if err != nil { return ver, errors.Trace(err) @@ -619,6 +316,7 @@ func onDropColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) case model.StateDeleteReorganization: // reorganization -> absent // All reorganization jobs are done, drop this column. + tblInfo.MoveColumnInfo(colInfo.Offset, len(tblInfo.Columns)-1) tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-1] colInfo.State = model.StateNone ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != colInfo.State) @@ -641,33 +339,34 @@ func onDropColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) return ver, errors.Trace(err) } -func checkDropColumn(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.ColumnInfo, []*model.IndexInfo, error) { +func checkDropColumn(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.ColumnInfo, []*model.IndexInfo, bool /* ifExists */, error) { schemaID := job.SchemaID tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { - return nil, nil, nil, errors.Trace(err) + return nil, nil, nil, false, errors.Trace(err) } var colName model.CIStr + var ifExists bool // indexIds is used to make sure we don't truncate args when decoding the rawArgs. var indexIds []int64 - err = job.DecodeArgs(&colName, &indexIds) + err = job.DecodeArgs(&colName, &ifExists, &indexIds) if err != nil { job.State = model.JobStateCancelled - return nil, nil, nil, errors.Trace(err) + return nil, nil, nil, false, errors.Trace(err) } colInfo := model.FindColumnInfo(tblInfo.Columns, colName.L) if colInfo == nil || colInfo.Hidden { job.State = model.JobStateCancelled - return nil, nil, nil, dbterror.ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) + return nil, nil, nil, ifExists, dbterror.ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) } if err = isDroppableColumn(job.MultiSchemaInfo != nil, tblInfo, colName); err != nil { job.State = model.JobStateCancelled - return nil, nil, nil, errors.Trace(err) + return nil, nil, nil, false, errors.Trace(err) } idxInfos := listIndicesWithColumn(colName.L, tblInfo.Indices) - return tblInfo, colInfo, idxInfos, nil + return tblInfo, colInfo, idxInfos, false, nil } func onSetDefaultValue(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { @@ -976,14 +675,15 @@ func (w *worker) doModifyColumnTypeWithData( updateChangingObjState(changingCol, changingIdxs, model.StateDeleteOnly) failpoint.Inject("mockInsertValueAfterCheckNull", func(val failpoint.Value) { if valStr, ok := val.(string); ok { - var ctx sessionctx.Context - ctx, err := w.sessPool.get() + var sctx sessionctx.Context + sctx, err := w.sessPool.get() if err != nil { failpoint.Return(ver, err) } - defer w.sessPool.put(ctx) + defer w.sessPool.put(sctx) - _, _, err = ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(context.Background(), nil, valStr) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + _, _, err = sctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, nil, valStr) if err != nil { job.State = model.JobStateCancelled failpoint.Return(ver, err) @@ -1348,10 +1048,11 @@ type updateColumnWorker struct { rowMap map[int64]types.Datum // For SQL Mode and warnings. - sqlMode mysql.SQLMode + sqlMode mysql.SQLMode + jobContext *JobContext } -func newUpdateColumnWorker(sessCtx sessionctx.Context, id int, t table.PhysicalTable, oldCol, newCol *model.ColumnInfo, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo) *updateColumnWorker { +func newUpdateColumnWorker(sessCtx sessionctx.Context, id int, t table.PhysicalTable, oldCol, newCol *model.ColumnInfo, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) *updateColumnWorker { rowDecoder := decoder.NewRowDecoder(t, t.WritableCols(), decodeColMap) return &updateColumnWorker{ backfillWorker: newBackfillWorker(sessCtx, id, t, reorgInfo), @@ -1361,6 +1062,7 @@ func newUpdateColumnWorker(sessCtx sessionctx.Context, id int, t table.PhysicalT rowDecoder: rowDecoder, rowMap: make(map[int64]types.Datum, len(decodeColMap)), sqlMode: reorgInfo.ReorgMeta.SQLMode, + jobContext: jc, } } @@ -1526,7 +1228,8 @@ func (w *updateColumnWorker) cleanRowMap() { // BackfillDataInTxn will backfill the table record in a transaction. A lock corresponds to a rowKey if the value of rowKey is changed. func (w *updateColumnWorker) BackfillDataInTxn(handleRange reorgBackfillTask) (taskCtx backfillTaskContext, errInTxn error) { oprStartTime := time.Now() - errInTxn = kv.RunInNewTxn(context.Background(), w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), w.jobContext.ddlJobSourceType()) + errInTxn = kv.RunInNewTxn(ctx, w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { taskCtx.addedCount = 0 taskCtx.scanCount = 0 txn.SetOption(kv.Priority, w.priority) @@ -1774,7 +1477,18 @@ func updateColumnDefaultValue(d *ddlCtx, t *meta.Meta, job *model.Job, newCol *m // The newCol's offset may be the value of the old schema version, so we can't use newCol directly. oldCol.DefaultValue = newCol.DefaultValue oldCol.DefaultValueBit = newCol.DefaultValueBit - oldCol.SetFlag(newCol.GetFlag()) + oldCol.DefaultIsExpr = newCol.DefaultIsExpr + if mysql.HasNoDefaultValueFlag(newCol.GetFlag()) { + oldCol.AddFlag(mysql.NoDefaultValueFlag) + } else { + oldCol.DelFlag(mysql.NoDefaultValueFlag) + sctx := newContext(d.store) + err = checkDefaultValue(sctx, table.ToColumn(oldCol), true) + if err != nil { + job.State = model.JobStateCancelled + return ver, err + } + } ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, true) if err != nil { diff --git a/ddl/column_change_test.go b/ddl/column_change_test.go index a6de4bc964d2f..0ed158cd740a0 100644 --- a/ddl/column_change_test.go +++ b/ddl/column_change_test.go @@ -161,7 +161,8 @@ func TestModifyAutoRandColumnWithMetaKeyChanged(t *testing.T) { tID = job.TableID if atomic.LoadInt32(&errCount) > 0 && job.Type == model.ActionModifyColumn { atomic.AddInt32(&errCount, -1) - genAutoRandErr = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBackfillDDLPrefix+ddl.DDLBackfillers[model.ActionModifyColumn]) + genAutoRandErr = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) _, err1 := t.GetAutoIDAccessors(dbID, tID).RandomID().Inc(1) return err1 @@ -176,7 +177,8 @@ func TestModifyAutoRandColumnWithMetaKeyChanged(t *testing.T) { const newAutoRandomBits uint64 = 10 testCheckJobDone(t, store, jobID, true) var newTbInfo *model.TableInfo - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) var err error newTbInfo, err = t.GetTable(dbID, tID) diff --git a/ddl/db_change_test.go b/ddl/db_change_test.go index 2863aa2685f5a..5efbd6dc3b688 100644 --- a/ddl/db_change_test.go +++ b/ddl/db_change_test.go @@ -2092,3 +2092,60 @@ func TestParallelRenameTable(t *testing.T) { require.True(t, strings.Contains(checkErr.Error(), "Table 'test.t' doesn't exist"), checkErr.Error()) tk.MustExec("rename table tt to t") } + +func TestConcurrentSetDefaultValue(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t(a YEAR NULL DEFAULT '2029')") + + tk1 := testkit.NewTestKit(t, store) + tk1.MustExec("use test") + + setdefaultSQL := []string{ + "alter table t alter a SET DEFAULT '2098'", + "alter table t alter a SET DEFAULT '1'", + } + setdefaultSQLOffset := 0 + + var wg sync.WaitGroup + d := dom.DDL() + originalCallback := d.GetHook() + defer d.SetHook(originalCallback) + callback := &ddl.TestDDLCallback{Do: dom} + skip := false + callback.OnJobRunBeforeExported = func(job *model.Job) { + switch job.SchemaState { + case model.StateDeleteOnly: + if skip { + break + } + skip = true + wg.Add(1) + go func() { + _, err := tk1.Exec(setdefaultSQL[setdefaultSQLOffset]) + if setdefaultSQLOffset == 0 { + require.Nil(t, err) + } + wg.Done() + }() + } + } + + d.SetHook(callback) + tk.MustExec("alter table t modify column a MEDIUMINT NULL DEFAULT '-8145111'") + + wg.Wait() + tk.MustQuery("select column_type from information_schema.columns where table_name = 't' and table_schema = 'test';").Check(testkit.Rows("mediumint(9)")) + + tk.MustExec("drop table t") + tk.MustExec("create table t(a int default 2)") + skip = false + setdefaultSQLOffset = 1 + tk.MustExec("alter table t modify column a TIMESTAMP NULL DEFAULT '2017-08-06 10:47:11'") + wg.Wait() + tk.MustExec("show create table t") + tk.MustExec("insert into t value()") +} diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index d44f69c29c994..a0a9f57106181 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -450,8 +450,8 @@ func TestIssue5092(t *testing.T) { tk.MustExec("create table t_issue_5092 (a int)") tk.MustExec("alter table t_issue_5092 add column (b int, c int)") tk.MustGetErrCode("alter table t_issue_5092 drop column if exists a, drop column b, drop column c", errno.ErrCantRemoveAllFields) - tk.MustGetErrCode("alter table t_issue_5092 drop column if exists c, drop column c", errno.ErrCantDropFieldOrKey) - tk.MustExec("alter table t_issue_5092 drop column c, drop column if exists c") + tk.MustGetErrCode("alter table t_issue_5092 drop column if exists c, drop column c", errno.ErrUnsupportedDDLOperation) + tk.MustGetErrCode("alter table t_issue_5092 drop column c, drop column if exists c", errno.ErrUnsupportedDDLOperation) tk.MustExec("drop table t_issue_5092") } @@ -593,9 +593,9 @@ func TestErrnoErrorCode(t *testing.T) { sql = "alter table test_drop_columns drop column c1, drop column c2, drop column c3;" tk.MustGetErrCode(sql, errno.ErrCantRemoveAllFields) sql = "alter table test_drop_columns drop column c1, add column c2 int;" - tk.MustGetErrCode(sql, errno.ErrUnsupportedDDLOperation) + tk.MustGetErrCode(sql, errno.ErrDupFieldName) sql = "alter table test_drop_columns drop column c1, drop column c1;" - tk.MustGetErrCode(sql, errno.ErrCantDropFieldOrKey) + tk.MustGetErrCode(sql, errno.ErrUnsupportedDDLOperation) // add index sql = "alter table test_error_code_succ add index idx (c_not_exist)" tk.MustGetErrCode(sql, errno.ErrKeyColumnDoesNotExits) diff --git a/ddl/db_partition_test.go b/ddl/db_partition_test.go index 4c3bdfb66faf9..41a3ddc3f3a7d 100644 --- a/ddl/db_partition_test.go +++ b/ddl/db_partition_test.go @@ -2391,7 +2391,8 @@ func checkPartitionDelRangeDone(t *testing.T, tk *testkit.TestKit, store kv.Stor } hasOldPartitionData := true - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { it, err := txn.Iter(partitionPrefix, nil) if err != nil { return err diff --git a/ddl/ddl.go b/ddl/ddl.go index 0ea01e1222eb5..cd8e5c4d4806e 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -23,7 +23,6 @@ import ( "encoding/json" "flag" "fmt" - "sort" "sync" "time" @@ -49,7 +48,6 @@ import ( "github.com/pingcap/tidb/statistics/handle" "github.com/pingcap/tidb/table" pumpcli "github.com/pingcap/tidb/tidb-binlog/pump_client" - goutil "github.com/pingcap/tidb/util" tidbutil "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/gcutil" @@ -58,6 +56,7 @@ import ( clientv3 "go.etcd.io/etcd/client/v3" atomicutil "go.uber.org/atomic" "go.uber.org/zap" + "golang.org/x/exp/slices" ) const ( @@ -106,23 +105,21 @@ type DDL interface { DropSchema(ctx sessionctx.Context, stmt *ast.DropDatabaseStmt) error CreateTable(ctx sessionctx.Context, stmt *ast.CreateTableStmt) error CreateView(ctx sessionctx.Context, stmt *ast.CreateViewStmt) error - DropTable(ctx sessionctx.Context, tableIdent ast.Ident) (err error) + DropTable(ctx sessionctx.Context, stmt *ast.DropTableStmt) (err error) RecoverTable(ctx sessionctx.Context, recoverInfo *RecoverInfo) (err error) - DropView(ctx sessionctx.Context, tableIdent ast.Ident) (err error) - CreateIndex(ctx sessionctx.Context, tableIdent ast.Ident, keyType ast.IndexKeyType, indexName model.CIStr, - columnNames []*ast.IndexPartSpecification, indexOption *ast.IndexOption, ifNotExists bool) error - DropIndex(ctx sessionctx.Context, tableIdent ast.Ident, indexName model.CIStr, ifExists bool) error - AlterTable(ctx context.Context, sctx sessionctx.Context, tableIdent ast.Ident, spec []*ast.AlterTableSpec) error + DropView(ctx sessionctx.Context, stmt *ast.DropTableStmt) (err error) + CreateIndex(ctx sessionctx.Context, stmt *ast.CreateIndexStmt) error + DropIndex(ctx sessionctx.Context, stmt *ast.DropIndexStmt) error + AlterTable(ctx context.Context, sctx sessionctx.Context, stmt *ast.AlterTableStmt) error TruncateTable(ctx sessionctx.Context, tableIdent ast.Ident) error - RenameTable(ctx sessionctx.Context, oldTableIdent, newTableIdent ast.Ident, isAlterTable bool) error - RenameTables(ctx sessionctx.Context, oldTableIdent, newTableIdent []ast.Ident, isAlterTable bool) error + RenameTable(ctx sessionctx.Context, stmt *ast.RenameTableStmt) error LockTables(ctx sessionctx.Context, stmt *ast.LockTablesStmt) error UnlockTables(ctx sessionctx.Context, lockedTables []model.TableLockTpInfo) error CleanupTableLock(ctx sessionctx.Context, tables []*ast.TableName) error UpdateTableReplicaInfo(ctx sessionctx.Context, physicalID int64, available bool) error RepairTable(ctx sessionctx.Context, table *ast.TableName, createStmt *ast.CreateTableStmt) error CreateSequence(ctx sessionctx.Context, stmt *ast.CreateSequenceStmt) error - DropSequence(ctx sessionctx.Context, tableIdent ast.Ident, ifExists bool) (err error) + DropSequence(ctx sessionctx.Context, stmt *ast.DropSequenceStmt) (err error) AlterSequence(ctx sessionctx.Context, stmt *ast.AlterSequenceStmt) error CreatePlacementPolicy(ctx sessionctx.Context, stmt *ast.CreatePlacementPolicyStmt) error DropPlacementPolicy(ctx sessionctx.Context, stmt *ast.DropPlacementPolicyStmt) error @@ -271,6 +268,17 @@ func (dc *ddlCtx) setDDLLabelForTopSQL(job *model.Job) { ctx.setDDLLabelForTopSQL(job) } +func (dc *ddlCtx) setDDLSourceForDiagnosis(job *model.Job) { + dc.jobCtx.Lock() + defer dc.jobCtx.Unlock() + ctx, exists := dc.jobCtx.jobCtxMap[job.ID] + if !exists { + ctx = NewJobContext() + dc.jobCtx.jobCtxMap[job.ID] = ctx + } + ctx.setDDLLabelForDiagnosis(job) +} + func (dc *ddlCtx) getResourceGroupTaggerForTopSQL(job *model.Job) tikvrpc.ResourceGroupTagger { dc.jobCtx.Lock() defer dc.jobCtx.Unlock() @@ -433,6 +441,7 @@ func newDDL(ctx context.Context, options ...Option) *ddl { ddlCtx.jobCtx.jobCtxMap = make(map[int64]*JobContext) ddlCtx.mu.hook = opt.Hook ddlCtx.mu.interceptor = &BaseInterceptor{} + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnDDL) ddlCtx.ctx, ddlCtx.cancel = context.WithCancel(ctx) d := &ddl{ ddlCtx: ddlCtx, @@ -526,7 +535,8 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { // GetNextDDLSeqNum return the next ddl seq num. func (d *ddl) GetNextDDLSeqNum() (uint64, error) { var count uint64 - err := kv.RunInNewTxn(d.ctx, d.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(d.ctx, kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, d.store, true, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) var err error count, err = t.GetHistoryDDLCount() @@ -582,7 +592,8 @@ func (d *ddl) GetInfoSchemaWithInterceptor(ctx sessionctx.Context) infoschema.In func (d *ddl) genGlobalIDs(count int) ([]int64, error) { var ret []int64 - err := kv.RunInNewTxn(context.Background(), d.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, d.store, true, func(ctx context.Context, txn kv.Transaction) error { failpoint.Inject("mockGenGlobalIDFail", func(val failpoint.Value) { if val.(bool) { failpoint.Return(errors.New("gofail genGlobalIDs error")) @@ -600,7 +611,8 @@ func (d *ddl) genGlobalIDs(count int) ([]int64, error) { func (d *ddl) genPlacementPolicyID() (int64, error) { var ret int64 - err := kv.RunInNewTxn(context.Background(), d.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, d.store, true, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) var err error ret, err = m.GenPlacementPolicyID() @@ -722,6 +734,19 @@ func (d *ddl) DoDDLJob(ctx sessionctx.Context, job *model.Job) error { setDDLJobQuery(ctx, job) task := &limitJobTask{job, make(chan error)} d.limitJobCh <- task + + failpoint.Inject("mockParallelSameDDLJobTwice", func(val failpoint.Value) { + if val.(bool) { + // The same job will be put to the DDL queue twice. + job = job.Clone() + task1 := &limitJobTask{job, make(chan error)} + d.limitJobCh <- task1 + <-task.err + // The second job result is used for test. + task = task1 + } + }) + // worker should restart to continue handling tasks in limitJobCh, and send back through task.err err := <-task.err if err != nil { @@ -737,6 +762,11 @@ func (d *ddl) DoDDLJob(ctx sessionctx.Context, job *model.Job) error { var historyJob *model.Job jobID := job.ID + + // Attach the context of the jobId to the calling session so that + // KILL can cancel this DDL job. + ctx.GetSessionVars().StmtCtx.DDLJobID = jobID + // For a job from start to end, the state of it will be none -> delete only -> write only -> reorganization -> public // For every state changes, we will wait as lease 2 * lease time, so here the ticker check is 10 * lease. // But we use etcd to speed up, normally it takes less than 0.5s now, so we use 0.5s or 1s or 3s as the max value. @@ -802,16 +832,6 @@ func (d *ddl) DoDDLJob(ctx sessionctx.Context, job *model.Job) error { logutil.BgLogger().Info("[ddl] DDL job is failed", zap.Int64("jobID", jobID)) return errors.Trace(historyJob.Error) } - // Only for JobStateCancelled job which is adding columns or drop columns or drop indexes. - if historyJob.IsCancelled() && (historyJob.Type == model.ActionAddColumns || historyJob.Type == model.ActionDropColumns || historyJob.Type == model.ActionDropIndexes) { - if historyJob.MultiSchemaInfo != nil && len(historyJob.MultiSchemaInfo.Warnings) != 0 { - for _, warning := range historyJob.MultiSchemaInfo.Warnings { - ctx.GetSessionVars().StmtCtx.AppendWarning(warning) - } - } - logutil.BgLogger().Info("[ddl] DDL job is cancelled", zap.Int64("jobID", jobID)) - return nil - } panic("When the state is JobStateRollbackDone or JobStateCancelled, historyJob.Error should never be nil") } } @@ -851,7 +871,7 @@ func (d *ddl) SetHook(h Callback) { func (d *ddl) startCleanDeadTableLock() { defer func() { - goutil.Recover(metrics.LabelDDL, "startCleanDeadTableLock", nil, false) + tidbutil.Recover(metrics.LabelDDL, "startCleanDeadTableLock", nil, false) d.wg.Done() }() @@ -956,10 +976,25 @@ type Info struct { Jobs []*model.Job // It's the currently running jobs. } +// GetDDLInfoWithNewTxn returns DDL information using a new txn. +func GetDDLInfoWithNewTxn(s sessionctx.Context) (*Info, error) { + err := sessiontxn.NewTxn(context.Background(), s) + if err != nil { + return nil, err + } + info, err := GetDDLInfo(s) + s.RollbackTxn(context.Background()) + return info, err +} + // GetDDLInfo returns DDL information. -func GetDDLInfo(txn kv.Transaction) (*Info, error) { +func GetDDLInfo(s sessionctx.Context) (*Info, error) { var err error info := &Info{} + txn, err := s.Txn(true) + if err != nil { + return nil, err + } t := meta.NewMeta(txn) info.Jobs = make([]*model.Job, 0, 2) @@ -1090,39 +1125,28 @@ func GetAllDDLJobs(t *meta.Meta) ([]*model.Job, error) { return nil, errors.Trace(err) } jobs := append(generalJobs, addIdxJobs...) - sort.Sort(jobArray(jobs)) + slices.SortFunc(jobs, func(i, j *model.Job) bool { + return i.ID < j.ID + }) return jobs, nil } -type jobArray []*model.Job - -func (v jobArray) Len() int { - return len(v) -} - -func (v jobArray) Less(i, j int) bool { - return v[i].ID < v[j].ID -} - -func (v jobArray) Swap(i, j int) { - v[i], v[j] = v[j], v[i] -} - // MaxHistoryJobs is exported for testing. const MaxHistoryJobs = 10 // DefNumHistoryJobs is default value of the default number of history job const DefNumHistoryJobs = 10 -// GetHistoryDDLJobs returns the DDL history jobs and an error. +const batchNumHistoryJobs = 128 + +// GetLastNHistoryDDLJobs returns the DDL history jobs and an error. // The maximum count of history jobs is num. -func GetHistoryDDLJobs(txn kv.Transaction, maxNumJobs int) ([]*model.Job, error) { - t := meta.NewMeta(txn) - jobs, err := t.GetLastNHistoryDDLJobs(maxNumJobs) +func GetLastNHistoryDDLJobs(t *meta.Meta, maxNumJobs int) ([]*model.Job, error) { + iterator, err := t.GetLastHistoryDDLJobsIterator() if err != nil { return nil, errors.Trace(err) } - return jobs, nil + return iterator.GetLastJobs(maxNumJobs, nil) } // IterHistoryDDLJobs iterates history DDL jobs until the `finishFn` return true or error. @@ -1162,7 +1186,26 @@ func IterAllDDLJobs(txn kv.Transaction, finishFn func([]*model.Job) (bool, error // GetAllHistoryDDLJobs get all the done DDL jobs. func GetAllHistoryDDLJobs(m *meta.Meta) ([]*model.Job, error) { - return m.GetAllHistoryDDLJobs() + iterator, err := m.GetLastHistoryDDLJobsIterator() + if err != nil { + return nil, errors.Trace(err) + } + allJobs := make([]*model.Job, 0, batchNumHistoryJobs) + for { + jobs, err := iterator.GetLastJobs(batchNumHistoryJobs, nil) + if err != nil { + return nil, errors.Trace(err) + } + allJobs = append(allJobs, jobs...) + if len(jobs) < batchNumHistoryJobs { + break + } + } + // sort job. + slices.SortFunc(allJobs, func(i, j *model.Job) bool { + return i.ID < j.ID + }) + return allJobs, nil } // GetHistoryJobByID return history DDL job by ID. diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 01f57c3ce1d18..9082944257c04 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -56,6 +56,7 @@ import ( "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/domainutil" + "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/mathutil" "github.com/pingcap/tidb/util/mock" @@ -820,6 +821,24 @@ func setCharsetCollationFlenDecimal(tp *types.FieldType, colName, colCharset, co return checkTooBigFieldLengthAndTryAutoConvert(tp, colName, sessVars) } +func decodeEnumSetBinaryLiteralToUTF8(tp *types.FieldType, chs string) { + if tp.GetType() != mysql.TypeEnum && tp.GetType() != mysql.TypeSet { + return + } + enc := charset.FindEncoding(chs) + for i, elem := range tp.GetElems() { + if !tp.GetElemIsBinaryLit(i) { + continue + } + s, err := enc.Transform(nil, hack.Slice(elem), charset.OpDecodeReplace) + if err != nil { + logutil.BgLogger().Warn("decode enum binary literal to utf-8 failed", zap.Error(err)) + } + tp.SetElem(i, string(hack.String(s))) + } + tp.CleanElemIsBinaryLit() +} + // buildColumnAndConstraint builds table.Column and ast.Constraint from the parameters. // outPriKeyConstraint is the primary key constraint out of column definition. For example: // `create table t1 (id int , age int, primary key(id));` @@ -852,6 +871,7 @@ func buildColumnAndConstraint( if err := setCharsetCollationFlenDecimal(colDef.Tp, colDef.Name.Name.O, chs, coll, ctx.GetSessionVars()); err != nil { return nil, nil, errors.Trace(err) } + decodeEnumSetBinaryLiteralToUTF8(colDef.Tp, chs) col, cts, err := columnDefToCol(ctx, offset, colDef, outPriKeyConstraint) if err != nil { return nil, nil, errors.Trace(err) @@ -3048,15 +3068,27 @@ func checkMultiSpecs(sctx sessionctx.Context, specs []*ast.AlterTableSpec) error return dbterror.ErrRunMultiSchemaChanges } } else { - if len(specs) > 1 && !isSameTypeMultiSpecs(specs) { + if len(specs) > 1 && !isSameTypeMultiSpecs(specs) && !allSupported(specs) { return dbterror.ErrRunMultiSchemaChanges } } return nil } -func (d *ddl) AlterTable(ctx context.Context, sctx sessionctx.Context, ident ast.Ident, specs []*ast.AlterTableSpec) (err error) { - validSpecs, err := resolveAlterTableSpec(sctx, specs) +func allSupported(specs []*ast.AlterTableSpec) bool { + for _, s := range specs { + switch s.Tp { + case ast.AlterTableAddColumns, ast.AlterTableDropColumn, ast.AlterTableDropIndex, ast.AlterTableDropPrimaryKey: + default: + return false + } + } + return true +} + +func (d *ddl) AlterTable(ctx context.Context, sctx sessionctx.Context, stmt *ast.AlterTableStmt) (err error) { + ident := ast.Ident{Schema: stmt.Table.Schema, Name: stmt.Table.Name} + validSpecs, err := resolveAlterTableSpec(sctx, stmt.Specs) if err != nil { return errors.Trace(err) } @@ -3086,12 +3118,9 @@ func (d *ddl) AlterTable(ctx context.Context, sctx sessionctx.Context, ident ast if len(validSpecs) > 1 { useMultiSchemaChange := false switch validSpecs[0].Tp { - case ast.AlterTableAddColumns: + case ast.AlterTableAddColumns, ast.AlterTableDropColumn, + ast.AlterTableDropPrimaryKey, ast.AlterTableDropIndex: useMultiSchemaChange = true - case ast.AlterTableDropColumn: - err = d.DropColumns(sctx, ident, validSpecs) - case ast.AlterTableDropPrimaryKey, ast.AlterTableDropIndex: - err = d.DropIndexes(sctx, ident, validSpecs) default: return dbterror.ErrRunMultiSchemaChanges } @@ -3131,9 +3160,9 @@ func (d *ddl) AlterTable(ctx context.Context, sctx sessionctx.Context, ident ast case ast.AlterTableDropColumn: err = d.DropColumn(sctx, ident, spec) case ast.AlterTableDropIndex: - err = d.DropIndex(sctx, ident, model.NewCIStr(spec.Name), spec.IfExists) + err = d.dropIndex(sctx, ident, model.NewCIStr(spec.Name), spec.IfExists) case ast.AlterTableDropPrimaryKey: - err = d.DropIndex(sctx, ident, model.NewCIStr(mysql.PrimaryKeyName), spec.IfExists) + err = d.dropIndex(sctx, ident, model.NewCIStr(mysql.PrimaryKeyName), spec.IfExists) case ast.AlterTableRenameIndex: err = d.RenameIndex(sctx, ident, spec) case ast.AlterTableDropPartition: @@ -3164,10 +3193,10 @@ func (d *ddl) AlterTable(ctx context.Context, sctx sessionctx.Context, ident ast constr := spec.Constraint switch spec.Constraint.Tp { case ast.ConstraintKey, ast.ConstraintIndex: - err = d.CreateIndex(sctx, ident, ast.IndexKeyTypeNone, model.NewCIStr(constr.Name), + err = d.createIndex(sctx, ident, ast.IndexKeyTypeNone, model.NewCIStr(constr.Name), spec.Constraint.Keys, constr.Option, constr.IfNotExists) case ast.ConstraintUniq, ast.ConstraintUniqIndex, ast.ConstraintUniqKey: - err = d.CreateIndex(sctx, ident, ast.IndexKeyTypeUnique, model.NewCIStr(constr.Name), + err = d.createIndex(sctx, ident, ast.IndexKeyTypeUnique, model.NewCIStr(constr.Name), spec.Constraint.Keys, constr.Option, false) // IfNotExists should be not applied case ast.ConstraintForeignKey: // NOTE: we do not handle `symbol` and `index_name` well in the parser and we do not check ForeignKey already exists, @@ -3196,7 +3225,7 @@ func (d *ddl) AlterTable(ctx context.Context, sctx sessionctx.Context, ident ast case ast.AlterTableRenameTable: newIdent := ast.Ident{Schema: spec.NewTable.Schema, Name: spec.NewTable.Name} isAlterTable := true - err = d.RenameTable(sctx, ident, newIdent, isAlterTable) + err = d.renameTable(sctx, ident, newIdent, isAlterTable) case ast.AlterTablePartition: // Prevent silent succeed if user executes ALTER TABLE x PARTITION BY ... err = errors.New("alter table partition is unsupported") @@ -3582,81 +3611,6 @@ func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTab return errors.Trace(err) } -// AddColumns will add multi new columns to the table. -func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.AlterTableSpec) error { - schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) - if err != nil { - return errors.Trace(err) - } - - // Check all the columns at once. - addingColumnNames := make(map[string]bool) - dupColumnNames := make(map[string]bool) - for _, spec := range specs { - for _, specNewColumn := range spec.NewColumns { - if !addingColumnNames[specNewColumn.Name.Name.L] { - addingColumnNames[specNewColumn.Name.Name.L] = true - continue - } - if !spec.IfNotExists { - return errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(specNewColumn.Name.Name.O)) - } - dupColumnNames[specNewColumn.Name.Name.L] = true - } - } - columns := make([]*table.Column, 0, len(addingColumnNames)) - positions := make([]*ast.ColumnPosition, 0, len(addingColumnNames)) - offsets := make([]int, 0, len(addingColumnNames)) - ifNotExists := make([]bool, 0, len(addingColumnNames)) - newColumnsCount := 0 - // Check the columns one by one. - for _, spec := range specs { - for _, specNewColumn := range spec.NewColumns { - if spec.IfNotExists && dupColumnNames[specNewColumn.Name.Name.L] { - err = infoschema.ErrColumnExists.GenWithStackByArgs(specNewColumn.Name.Name.O) - ctx.GetSessionVars().StmtCtx.AppendNote(err) - continue - } - col, err := checkAndCreateNewColumn(ctx, ti, schema, spec, t, specNewColumn) - if err != nil { - return errors.Trace(err) - } - // Added column has existed and if_not_exists flag is true. - if col == nil && spec.IfNotExists { - continue - } - columns = append(columns, col) - positions = append(positions, spec.Position) - offsets = append(offsets, 0) - ifNotExists = append(ifNotExists, spec.IfNotExists) - newColumnsCount++ - } - } - if newColumnsCount == 0 { - return nil - } - if err = checkAddColumnTooManyColumns(len(t.Cols()) + newColumnsCount); err != nil { - return errors.Trace(err) - } - - job := &model.Job{ - SchemaID: schema.ID, - TableID: t.Meta().ID, - SchemaName: schema.Name.L, - TableName: t.Meta().Name.L, - Type: model.ActionAddColumns, - BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{columns, positions, offsets, ifNotExists}, - } - - err = d.DoDDLJob(ctx, job) - if err != nil { - return errors.Trace(err) - } - err = d.callHookOnChanged(job, err) - return errors.Trace(err) -} - // AddTablePartitions will add a new partition to the table. func (d *ddl) AddTablePartitions(ctx sessionctx.Context, ident ast.Ident, spec *ast.AlterTableSpec) error { is := d.infoCache.GetLatest() @@ -4077,85 +4031,7 @@ func (d *ddl) DropColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTa if err != nil { return err } - var multiSchemaInfo *model.MultiSchemaInfo - if variable.EnableChangeMultiSchema.Load() { - multiSchemaInfo = &model.MultiSchemaInfo{} - } - - job := &model.Job{ - SchemaID: schema.ID, - TableID: t.Meta().ID, - SchemaName: schema.Name.L, - TableName: t.Meta().Name.L, - Type: model.ActionDropColumn, - BinlogInfo: &model.HistoryInfo{}, - MultiSchemaInfo: multiSchemaInfo, - SchemaState: model.StatePublic, - Args: []interface{}{colName}, - } - - err = d.DoDDLJob(ctx, job) - // column not exists, but if_exists flags is true, so we ignore this error. - if dbterror.ErrCantDropFieldOrKey.Equal(err) && spec.IfExists { - ctx.GetSessionVars().StmtCtx.AppendNote(err) - return nil - } - err = d.callHookOnChanged(job, err) - return errors.Trace(err) -} - -// DropColumns will drop multi-columns from the table, now we don't support drop the column with index covered. -func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.AlterTableSpec) error { - schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) - if err != nil { - return errors.Trace(err) - } - tblInfo := t.Meta() - dropingColumnNames := make(map[string]bool) - dupColumnNames := make(map[string]bool) - for _, spec := range specs { - if !dropingColumnNames[spec.OldColumnName.Name.L] { - dropingColumnNames[spec.OldColumnName.Name.L] = true - } else { - if spec.IfExists { - dupColumnNames[spec.OldColumnName.Name.L] = true - continue - } - return errors.Trace(dbterror.ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", spec.OldColumnName.Name.O)) - } - } - - ifExists := make([]bool, 0, len(specs)) - colNames := make([]model.CIStr, 0, len(specs)) - for _, spec := range specs { - if spec.IfExists && dupColumnNames[spec.OldColumnName.Name.L] { - err = dbterror.ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", spec.OldColumnName.Name.L) - ctx.GetSessionVars().StmtCtx.AppendNote(err) - continue - } - isDropable, err := checkIsDroppableColumn(ctx, t, spec) - if err != nil { - return err - } - // Column can't drop and if_exists flag is true. - if !isDropable && spec.IfExists { - continue - } - colNames = append(colNames, spec.OldColumnName.Name) - ifExists = append(ifExists, spec.IfExists) - } - if len(colNames) == 0 { - return nil - } - if len(tblInfo.Columns) == len(colNames) { - return dbterror.ErrCantRemoveAllFields.GenWithStack("can't drop all columns in table %s", - tblInfo.Name) - } - err = checkVisibleColumnCnt(t, 0, len(colNames)) - if err != nil { - return err - } var multiSchemaInfo *model.MultiSchemaInfo if variable.EnableChangeMultiSchema.Load() { multiSchemaInfo = &model.MultiSchemaInfo{} @@ -4165,17 +4041,15 @@ func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt SchemaID: schema.ID, TableID: t.Meta().ID, SchemaName: schema.Name.L, + SchemaState: model.StatePublic, TableName: t.Meta().Name.L, - Type: model.ActionDropColumns, + Type: model.ActionDropColumn, BinlogInfo: &model.HistoryInfo{}, MultiSchemaInfo: multiSchemaInfo, - Args: []interface{}{colNames, ifExists}, + Args: []interface{}{colName, spec.IfExists}, } err = d.DoDDLJob(ctx, job) - if err != nil { - return errors.Trace(err) - } err = d.callHookOnChanged(job, err) return errors.Trace(err) } @@ -4521,6 +4395,7 @@ func (d *ddl) getModifiableColumnJob(ctx context.Context, sctx sessionctx.Contex if err = setCharsetCollationFlenDecimal(&newCol.FieldType, newCol.Name.O, chs, coll, sctx.GetSessionVars()); err != nil { return nil, errors.Trace(err) } + decodeEnumSetBinaryLiteralToUTF8(&newCol.FieldType, chs) // Check the column with foreign key, waiting for the default flen and decimal. if fkInfo := getColumnForeignKeyInfo(originalColName.L, t.Meta().ForeignKeys); fkInfo != nil { @@ -4706,7 +4581,6 @@ func checkAutoRandom(tableInfo *model.TableInfo, originCol *table.Column, specNe } switch { case oldRandBits == newRandBits: - break case oldRandBits < newRandBits: addingAutoRandom := oldRandBits == 0 if addingAutoRandom { @@ -5326,71 +5200,173 @@ func (d *ddl) RenameIndex(ctx sessionctx.Context, ident ast.Ident, spec *ast.Alt return errors.Trace(err) } -// DropTable will proceed even if some table in the list does not exists. -func (d *ddl) DropTable(ctx sessionctx.Context, ti ast.Ident) (err error) { - schema, tb, err := d.getSchemaAndTableByIdent(ctx, ti) - if err != nil { - return errors.Trace(err) - } +// If one drop those tables by mistake, it's difficult to recover. +// In the worst case, the whole TiDB cluster fails to bootstrap, so we prevent user from dropping them. +var systemTables = map[string]struct{}{ + "tidb": {}, + "gc_delete_range": {}, + "gc_delete_range_done": {}, +} - if tb.Meta().IsView() { - return infoschema.ErrTableNotExists.GenWithStackByArgs(ti.Schema, ti.Name) - } - if tb.Meta().IsSequence() { - return infoschema.ErrTableNotExists.GenWithStackByArgs(ti.Schema, ti.Name) +func isSystemTable(schema, table string) bool { + if schema != "mysql" { + return false } - if tb.Meta().TableCacheStatusType != model.TableCacheStatusDisable { - return dbterror.ErrOptOnCacheTable.GenWithStackByArgs("Drop Table") + if _, ok := systemTables[table]; ok { + return true } + return false +} - job := &model.Job{ - SchemaID: schema.ID, - TableID: tb.Meta().ID, - SchemaName: schema.Name.L, - SchemaState: schema.State, - TableName: tb.Meta().Name.L, - Type: model.ActionDropTable, - BinlogInfo: &model.HistoryInfo{}, - } +type objectType int + +const ( + tableObject objectType = iota + viewObject + sequenceObject +) + +// dropTableObject provides common logic to DROP TABLE/VIEW/SEQUENCE. +func (d *ddl) dropTableObject( + ctx sessionctx.Context, + objects []*ast.TableName, + ifExists bool, + tableObjectType objectType, +) error { + var ( + notExistTables []string + sessVars = ctx.GetSessionVars() + is = d.GetInfoSchemaWithInterceptor(ctx) + dropExistErr *terror.Error + jobType model.ActionType + ) + + switch tableObjectType { + case tableObject: + dropExistErr = infoschema.ErrTableDropExists + jobType = model.ActionDropTable + case viewObject: + dropExistErr = infoschema.ErrTableDropExists + jobType = model.ActionDropView + case sequenceObject: + dropExistErr = infoschema.ErrSequenceDropExists + jobType = model.ActionDropSequence + } + + for _, tn := range objects { + fullti := ast.Ident{Schema: tn.Schema, Name: tn.Name} + schema, ok := is.SchemaByName(tn.Schema) + if !ok { + // TODO: we should return special error for table not exist, checking "not exist" is not enough, + // because some other errors may contain this error string too. + notExistTables = append(notExistTables, fullti.String()) + continue + } + tableInfo, err := is.TableByName(tn.Schema, tn.Name) + if err != nil && infoschema.ErrTableNotExists.Equal(err) { + notExistTables = append(notExistTables, fullti.String()) + continue + } else if err != nil { + return err + } + + // prechecks before build DDL job + + // Protect important system table from been dropped by a mistake. + // I can hardly find a case that a user really need to do this. + if isSystemTable(tn.Schema.L, tn.Name.L) { + return errors.Errorf("Drop tidb system table '%s.%s' is forbidden", tn.Schema.L, tn.Name.L) + } + switch tableObjectType { + case tableObject: + if !tableInfo.Meta().IsBaseTable() { + notExistTables = append(notExistTables, fullti.String()) + continue + } + + tempTableType := tableInfo.Meta().TempTableType + if config.CheckTableBeforeDrop && tempTableType == model.TempTableNone { + logutil.BgLogger().Warn("admin check table before drop", + zap.String("database", fullti.Schema.O), + zap.String("table", fullti.Name.O), + ) + exec := ctx.(sqlexec.RestrictedSQLExecutor) + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + _, _, err := exec.ExecRestrictedSQL(internalCtx, nil, "admin check table %n.%n", fullti.Schema.O, fullti.Name.O) + if err != nil { + return err + } + } + + if tableInfo.Meta().TableCacheStatusType != model.TableCacheStatusDisable { + return dbterror.ErrOptOnCacheTable.GenWithStackByArgs("Drop Table") + } + case viewObject: + if !tableInfo.Meta().IsView() { + return dbterror.ErrWrongObject.GenWithStackByArgs(fullti.Schema, fullti.Name, "VIEW") + } + case sequenceObject: + if !tableInfo.Meta().IsSequence() { + err = dbterror.ErrWrongObject.GenWithStackByArgs(fullti.Schema, fullti.Name, "SEQUENCE") + if ifExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + continue + } + return err + } + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: tableInfo.Meta().ID, + SchemaName: schema.Name.L, + SchemaState: schema.State, + TableName: tableInfo.Meta().Name.L, + Type: jobType, + BinlogInfo: &model.HistoryInfo{}, + } + + err = d.DoDDLJob(ctx, job) + err = d.callHookOnChanged(job, err) + if infoschema.ErrDatabaseNotExists.Equal(err) || infoschema.ErrTableNotExists.Equal(err) { + notExistTables = append(notExistTables, fullti.String()) + continue + } else if err != nil { + return errors.Trace(err) + } + + // unlock table after drop + if tableObjectType != tableObject { + continue + } + if !config.TableLockEnabled() { + continue + } + if ok, _ := ctx.CheckTableLocked(tableInfo.Meta().ID); ok { + ctx.ReleaseTableLockByTableIDs([]int64{tableInfo.Meta().ID}) + } - err = d.DoDDLJob(ctx, job) - err = d.callHookOnChanged(job, err) - if err != nil { - return errors.Trace(err) } - if !config.TableLockEnabled() { - return nil + if len(notExistTables) > 0 && !ifExists { + return dropExistErr.GenWithStackByArgs(strings.Join(notExistTables, ",")) } - if ok, _ := ctx.CheckTableLocked(tb.Meta().ID); ok { - ctx.ReleaseTableLockByTableIDs([]int64{tb.Meta().ID}) + // We need add warning when use if exists. + if len(notExistTables) > 0 && ifExists { + for _, table := range notExistTables { + sessVars.StmtCtx.AppendNote(dropExistErr.GenWithStackByArgs(table)) + } } return nil } -// DropView will proceed even if some view in the list does not exists. -func (d *ddl) DropView(ctx sessionctx.Context, ti ast.Ident) (err error) { - schema, tb, err := d.getSchemaAndTableByIdent(ctx, ti) - if err != nil { - return errors.Trace(err) - } - - if !tb.Meta().IsView() { - return dbterror.ErrWrongObject.GenWithStackByArgs(ti.Schema, ti.Name, "VIEW") - } - - job := &model.Job{ - SchemaID: schema.ID, - TableID: tb.Meta().ID, - SchemaName: schema.Name.L, - SchemaState: tb.Meta().State, - TableName: tb.Meta().Name.L, - Type: model.ActionDropView, - BinlogInfo: &model.HistoryInfo{}, - } +// DropTable will proceed even if some table in the list does not exists. +func (d *ddl) DropTable(ctx sessionctx.Context, stmt *ast.DropTableStmt) (err error) { + return d.dropTableObject(ctx, stmt.Tables, stmt.IfExists, tableObject) +} - err = d.DoDDLJob(ctx, job) - err = d.callHookOnChanged(job, err) - return errors.Trace(err) +// DropView will proceed even if some view in the list does not exists. +func (d *ddl) DropView(ctx sessionctx.Context, stmt *ast.DropTableStmt) (err error) { + return d.dropTableObject(ctx, stmt.Tables, stmt.IfExists, viewObject) } func (d *ddl) TruncateTable(ctx sessionctx.Context, ti ast.Ident) error { @@ -5447,7 +5423,28 @@ func (d *ddl) TruncateTable(ctx sessionctx.Context, ti ast.Ident) error { return nil } -func (d *ddl) RenameTable(ctx sessionctx.Context, oldIdent, newIdent ast.Ident, isAlterTable bool) error { +func (d *ddl) RenameTable(ctx sessionctx.Context, s *ast.RenameTableStmt) error { + isAlterTable := false + var err error + if len(s.TableToTables) == 1 { + oldIdent := ast.Ident{Schema: s.TableToTables[0].OldTable.Schema, Name: s.TableToTables[0].OldTable.Name} + newIdent := ast.Ident{Schema: s.TableToTables[0].NewTable.Schema, Name: s.TableToTables[0].NewTable.Name} + err = d.renameTable(ctx, oldIdent, newIdent, isAlterTable) + } else { + oldIdents := make([]ast.Ident, 0, len(s.TableToTables)) + newIdents := make([]ast.Ident, 0, len(s.TableToTables)) + for _, tables := range s.TableToTables { + oldIdent := ast.Ident{Schema: tables.OldTable.Schema, Name: tables.OldTable.Name} + newIdent := ast.Ident{Schema: tables.NewTable.Schema, Name: tables.NewTable.Name} + oldIdents = append(oldIdents, oldIdent) + newIdents = append(newIdents, newIdent) + } + err = d.renameTables(ctx, oldIdents, newIdents, isAlterTable) + } + return err +} + +func (d *ddl) renameTable(ctx sessionctx.Context, oldIdent, newIdent ast.Ident, isAlterTable bool) error { is := d.GetInfoSchemaWithInterceptor(ctx) tables := make(map[string]int64) schemas, tableID, err := extractTblInfos(is, oldIdent, newIdent, isAlterTable, tables) @@ -5480,7 +5477,7 @@ func (d *ddl) RenameTable(ctx sessionctx.Context, oldIdent, newIdent ast.Ident, return errors.Trace(err) } -func (d *ddl) RenameTables(ctx sessionctx.Context, oldIdents, newIdents []ast.Ident, isAlterTable bool) error { +func (d *ddl) renameTables(ctx sessionctx.Context, oldIdents, newIdents []ast.Ident, isAlterTable bool) error { is := d.GetInfoSchemaWithInterceptor(ctx) oldTableNames := make([]*model.CIStr, 0, len(oldIdents)) tableNames := make([]*model.CIStr, 0, len(oldIdents)) @@ -5805,7 +5802,13 @@ func buildHiddenColumnInfo(ctx sessionctx.Context, indexPartSpecifications []*as return hiddenCols, nil } -func (d *ddl) CreateIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast.IndexKeyType, indexName model.CIStr, +func (d *ddl) CreateIndex(ctx sessionctx.Context, stmt *ast.CreateIndexStmt) error { + ident := ast.Ident{Schema: stmt.Table.Schema, Name: stmt.Table.Name} + return d.createIndex(ctx, ident, stmt.KeyType, model.NewCIStr(stmt.IndexName), + stmt.IndexPartSpecifications, stmt.IndexOption, stmt.IfNotExists) +} + +func (d *ddl) createIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast.IndexKeyType, indexName model.CIStr, indexPartSpecifications []*ast.IndexPartSpecification, indexOption *ast.IndexOption, ifNotExists bool) error { // not support Spatial and FullText index if keyType == ast.IndexKeyTypeFullText || keyType == ast.IndexKeyTypeSpatial { @@ -5960,10 +5963,12 @@ func buildFKInfo(fkName model.CIStr, keys []*ast.IndexPartSpecification, refer * // Check wrong reference options of foreign key on stored generated columns switch refer.OnUpdate.ReferOpt { case ast.ReferOptionCascade, ast.ReferOptionSetNull, ast.ReferOptionSetDefault: + //nolint: gosec return nil, dbterror.ErrWrongFKOptionForGeneratedColumn.GenWithStackByArgs("ON UPDATE " + refer.OnUpdate.ReferOpt.String()) } switch refer.OnDelete.ReferOpt { case ast.ReferOptionSetNull, ast.ReferOptionSetDefault: + //nolint: gosec return nil, dbterror.ErrWrongFKOptionForGeneratedColumn.GenWithStackByArgs("ON DELETE " + refer.OnDelete.ReferOpt.String()) } continue @@ -6067,7 +6072,16 @@ func (d *ddl) DropForeignKey(ctx sessionctx.Context, ti ast.Ident, fkName model. return errors.Trace(err) } -func (d *ddl) DropIndex(ctx sessionctx.Context, ti ast.Ident, indexName model.CIStr, ifExists bool) error { +func (d *ddl) DropIndex(ctx sessionctx.Context, stmt *ast.DropIndexStmt) error { + ti := ast.Ident{Schema: stmt.Table.Schema, Name: stmt.Table.Name} + err := d.dropIndex(ctx, ti, model.NewCIStr(stmt.IndexName), stmt.IfExists) + if (infoschema.ErrDatabaseNotExists.Equal(err) || infoschema.ErrTableNotExists.Equal(err)) && stmt.IfExists { + err = nil + } + return err +} + +func (d *ddl) dropIndex(ctx sessionctx.Context, ti ast.Ident, indexName model.CIStr, ifExists bool) error { is := d.infoCache.GetLatest() schema, ok := is.SchemaByName(ti.Schema) if !ok { @@ -6112,65 +6126,11 @@ func (d *ddl) DropIndex(ctx sessionctx.Context, ti ast.Ident, indexName model.CI SchemaID: schema.ID, TableID: t.Meta().ID, SchemaName: schema.Name.L, + SchemaState: indexInfo.State, TableName: t.Meta().Name.L, Type: jobTp, BinlogInfo: &model.HistoryInfo{}, - SchemaState: indexInfo.State, - Args: []interface{}{indexName}, - } - - err = d.DoDDLJob(ctx, job) - // index not exists, but if_exists flags is true, so we ignore this error. - if dbterror.ErrCantDropFieldOrKey.Equal(err) && ifExists { - ctx.GetSessionVars().StmtCtx.AppendNote(err) - return nil - } - err = d.callHookOnChanged(job, err) - return errors.Trace(err) -} - -func (d *ddl) DropIndexes(ctx sessionctx.Context, ti ast.Ident, specs []*ast.AlterTableSpec) error { - schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) - if err != nil { - return err - } - - if t.Meta().TableCacheStatusType != model.TableCacheStatusDisable { - return errors.Trace(dbterror.ErrOptOnCacheTable.GenWithStackByArgs("Drop Indexes")) - } - indexNames := make([]model.CIStr, 0, len(specs)) - ifExists := make([]bool, 0, len(specs)) - for _, spec := range specs { - var indexName model.CIStr - if spec.Tp == ast.AlterTableDropPrimaryKey { - indexName = model.NewCIStr(mysql.PrimaryKeyName) - } else { - indexName = model.NewCIStr(spec.Name) - } - - indexInfo := t.Meta().FindIndexByName(indexName.L) - if indexInfo != nil { - _, err := checkIsDropPrimaryKey(indexName, indexInfo, t) - if err != nil { - return err - } - if err := checkDropIndexOnAutoIncrementColumn(t.Meta(), indexInfo); err != nil { - return errors.Trace(err) - } - } - - indexNames = append(indexNames, indexName) - ifExists = append(ifExists, spec.IfExists) - } - - job := &model.Job{ - SchemaID: schema.ID, - TableID: t.Meta().ID, - SchemaName: schema.Name.L, - TableName: t.Meta().Name.L, - Type: model.ActionDropIndexes, - BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{indexNames, ifExists}, + Args: []interface{}{indexName, ifExists}, } err = d.DoDDLJob(ctx, job) @@ -6240,7 +6200,6 @@ func validateCommentLength(vars *variable.SessionVars, name string, comment *str case dbterror.ErrTooLongTableComment: maxLen *= 2 case dbterror.ErrTooLongFieldComment, dbterror.ErrTooLongIndexComment, dbterror.ErrTooLongTablePartitionComment: - break default: // add more types of terror.Error if need } @@ -6304,7 +6263,6 @@ func checkColumnsTypeAndValuesMatch(ctx sessionctx.Context, meta *model.TableInf case mysql.TypeDate, mysql.TypeDatetime, mysql.TypeDuration: switch vkind { case types.KindString, types.KindBytes: - break default: return dbterror.ErrWrongTypeColumnValue.GenWithStackByArgs() } @@ -6676,34 +6634,8 @@ func (d *ddl) AlterSequence(ctx sessionctx.Context, stmt *ast.AlterSequenceStmt) return errors.Trace(err) } -func (d *ddl) DropSequence(ctx sessionctx.Context, ti ast.Ident, ifExists bool) (err error) { - schema, tbl, err := d.getSchemaAndTableByIdent(ctx, ti) - if err != nil { - return errors.Trace(err) - } - - if !tbl.Meta().IsSequence() { - err = dbterror.ErrWrongObject.GenWithStackByArgs(ti.Schema, ti.Name, "SEQUENCE") - if ifExists { - ctx.GetSessionVars().StmtCtx.AppendNote(err) - return nil - } - return err - } - - job := &model.Job{ - SchemaID: schema.ID, - TableID: tbl.Meta().ID, - SchemaName: schema.Name.L, - SchemaState: tbl.Meta().State, - TableName: tbl.Meta().Name.L, - Type: model.ActionDropSequence, - BinlogInfo: &model.HistoryInfo{}, - } - - err = d.DoDDLJob(ctx, job) - err = d.callHookOnChanged(job, err) - return errors.Trace(err) +func (d *ddl) DropSequence(ctx sessionctx.Context, stmt *ast.DropSequenceStmt) (err error) { + return d.dropTableObject(ctx, stmt.Sequences, stmt.IfExists, sequenceObject) } func (d *ddl) AlterIndexVisibility(ctx sessionctx.Context, ident ast.Ident, indexName model.CIStr, visibility ast.IndexVisibility) error { @@ -7094,8 +7026,8 @@ func (d *ddl) AlterPlacementPolicy(ctx sessionctx.Context, stmt *ast.AlterPlacem return errors.Trace(err) } -func (d *ddl) AlterTableCache(ctx sessionctx.Context, ti ast.Ident) (err error) { - schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) +func (d *ddl) AlterTableCache(sctx sessionctx.Context, ti ast.Ident) (err error) { + schema, t, err := d.getSchemaAndTableByIdent(sctx, ti) if err != nil { return err } @@ -7123,17 +7055,18 @@ func (d *ddl) AlterTableCache(ctx sessionctx.Context, ti ast.Ident) (err error) return dbterror.ErrOptOnCacheTable.GenWithStackByArgs("table too large") } - ddlQuery, _ := ctx.Value(sessionctx.QueryString).(string) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + ddlQuery, _ := sctx.Value(sessionctx.QueryString).(string) // Initialize the cached table meta lock info in `mysql.table_cache_meta`. // The operation shouldn't fail in most cases, and if it does, return the error directly. // This DML and the following DDL is not atomic, that's not a problem. - _, _, err = ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(context.Background(), nil, + _, _, err = sctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, nil, "replace into mysql.table_cache_meta values (%?, 'NONE', 0, 0)", t.Meta().ID) if err != nil { return errors.Trace(err) } - ctx.SetValue(sessionctx.QueryString, ddlQuery) + sctx.SetValue(sessionctx.QueryString, ddlQuery) job := &model.Job{ SchemaID: schema.ID, @@ -7145,14 +7078,16 @@ func (d *ddl) AlterTableCache(ctx sessionctx.Context, ti ast.Ident) (err error) Args: []interface{}{}, } - err = d.DoDDLJob(ctx, job) + err = d.DoDDLJob(sctx, job) return d.callHookOnChanged(job, err) } func checkCacheTableSize(store kv.Storage, tableID int64) (bool, error) { const cacheTableSizeLimit = 64 * (1 << 20) // 64M succ := true - err := kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnCacheTable) + err := kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { + txn.SetOption(kv.RequestSourceType, kv.InternalTxnCacheTable) prefix := tablecodec.GenTablePrefix(tableID) it, err := txn.Iter(prefix, prefix.PrefixNext()) if err != nil { diff --git a/ddl/ddl_api_test.go b/ddl/ddl_api_test.go index e915dd33d7678..18db2dfa62170 100644 --- a/ddl/ddl_api_test.go +++ b/ddl/ddl_api_test.go @@ -31,8 +31,6 @@ func TestIsJobRollbackable(t *testing.T) { {model.ActionDropIndex, model.StateDeleteOnly, false}, {model.ActionDropSchema, model.StateDeleteOnly, false}, {model.ActionDropColumn, model.StateDeleteOnly, false}, - {model.ActionDropColumns, model.StateDeleteOnly, false}, - {model.ActionDropIndexes, model.StateDeleteOnly, false}, } job := &model.Job{} for _, ca := range cases { diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index 14ec2a6ec4c56..83f83c6dabae6 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -61,6 +61,9 @@ func (d *ddl) generalWorker() *worker { return d.workers[generalWorker] } +// JobNeedGCForTest is only used for test. +var JobNeedGCForTest = jobNeedGC + // GetMaxRowID is used for test. func GetMaxRowID(store kv.Storage, priority int, t table.Table, startHandle, endHandle kv.Key) (kv.Key, error) { return getRangeEndKey(NewJobContext(), store, priority, t, startHandle, endHandle) @@ -259,6 +262,7 @@ func TestBuildJobDependence(t *testing.T) { defer func() { require.NoError(t, store.Close()) }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) // Add some non-add-index jobs. job1 := &model.Job{ID: 1, TableID: 1, Type: model.ActionAddColumn} job2 := &model.Job{ID: 2, TableID: 1, Type: model.ActionCreateTable} @@ -267,7 +271,7 @@ func TestBuildJobDependence(t *testing.T) { job7 := &model.Job{ID: 7, TableID: 2, Type: model.ActionModifyColumn} job9 := &model.Job{ID: 9, SchemaID: 111, Type: model.ActionDropSchema} job11 := &model.Job{ID: 11, TableID: 2, Type: model.ActionRenameTable, Args: []interface{}{int64(111), "old db name"}} - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) require.NoError(t, m.EnQueueDDLJob(job1)) require.NoError(t, m.EnQueueDDLJob(job2)) @@ -280,7 +284,7 @@ func TestBuildJobDependence(t *testing.T) { }) require.NoError(t, err) job4 := &model.Job{ID: 4, TableID: 1, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err := buildJobDependence(m, job4) require.NoError(t, err) @@ -289,7 +293,7 @@ func TestBuildJobDependence(t *testing.T) { }) require.NoError(t, err) job5 := &model.Job{ID: 5, TableID: 2, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err := buildJobDependence(m, job5) require.NoError(t, err) @@ -298,7 +302,7 @@ func TestBuildJobDependence(t *testing.T) { }) require.NoError(t, err) job8 := &model.Job{ID: 8, TableID: 3, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err := buildJobDependence(m, job8) require.NoError(t, err) @@ -307,7 +311,7 @@ func TestBuildJobDependence(t *testing.T) { }) require.NoError(t, err) job10 := &model.Job{ID: 10, SchemaID: 111, TableID: 3, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err := buildJobDependence(m, job10) require.NoError(t, err) @@ -316,7 +320,7 @@ func TestBuildJobDependence(t *testing.T) { }) require.NoError(t, err) job12 := &model.Job{ID: 12, SchemaID: 112, TableID: 2, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err := buildJobDependence(m, job12) require.NoError(t, err) @@ -477,8 +481,9 @@ func isDDLJobDone(test *testing.T, t *meta.Meta) bool { func testCheckSchemaState(test *testing.T, d *ddl, dbInfo *model.DBInfo, state model.SchemaState) { isDropped := true + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) for { - err := kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { + err := kv.RunInNewTxn(ctx, d.store, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) info, err := t.GetDatabase(dbInfo.ID) require.NoError(test, err) @@ -553,24 +558,24 @@ func TestReorg(t *testing.T) { time.Sleep(testLease) - ctx := testNewContext(d) + sctx := testNewContext(d) - ctx.SetValue(testCtxKey, 1) - require.Equal(t, ctx.Value(testCtxKey), 1) - ctx.ClearValue(testCtxKey) + sctx.SetValue(testCtxKey, 1) + require.Equal(t, sctx.Value(testCtxKey), 1) + sctx.ClearValue(testCtxKey) - err = sessiontxn.NewTxn(context.Background(), ctx) + err = sessiontxn.NewTxn(context.Background(), sctx) require.NoError(t, err) - txn, err := ctx.Txn(true) + txn, err := sctx.Txn(true) require.NoError(t, err) err = txn.Set([]byte("a"), []byte("b")) require.NoError(t, err) err = txn.Rollback() require.NoError(t, err) - err = sessiontxn.NewTxn(context.Background(), ctx) + err = sessiontxn.NewTxn(context.Background(), sctx) require.NoError(t, err) - txn, err = ctx.Txn(true) + txn, err = sctx.Txn(true) require.NoError(t, err) err = txn.Set([]byte("a"), []byte("b")) require.NoError(t, err) @@ -583,9 +588,9 @@ func TestReorg(t *testing.T) { ID: 1, SnapshotVer: 1, // Make sure it is not zero. So the reorgInfo's first is false. } - err = sessiontxn.NewTxn(context.Background(), ctx) + err = sessiontxn.NewTxn(context.Background(), sctx) require.NoError(t, err) - txn, err = ctx.Txn(true) + txn, err = sctx.Txn(true) require.NoError(t, err) m := meta.NewMeta(txn) e := &meta.Element{ID: 333, TypeKey: meta.IndexElementKey} @@ -614,7 +619,7 @@ func TestReorg(t *testing.T) { // Test whether reorgInfo's Handle is update. err = txn.Commit(context.Background()) require.NoError(t, err) - err = sessiontxn.NewTxn(context.Background(), ctx) + err = sessiontxn.NewTxn(context.Background(), sctx) require.NoError(t, err) m = meta.NewMeta(txn) @@ -644,7 +649,8 @@ func TestReorg(t *testing.T) { EndKey: test.endKey.Encoded(), PhysicalTableID: 456, } - err = kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err = kv.RunInNewTxn(ctx, d.store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) var err1 error _, err1 = getReorgInfo(NewJobContext(), d.ddlCtx, newReorgHandler(m), job, mockTbl, []*meta.Element{element}) @@ -656,7 +662,7 @@ func TestReorg(t *testing.T) { job.SnapshotVer = uint64(1) err = info.UpdateReorgMeta(info.StartKey) require.NoError(t, err) - err = kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, d.store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) info1, err1 := getReorgInfo(NewJobContext(), d.ddlCtx, newReorgHandler(m), job, mockTbl, []*meta.Element{element}) require.NoError(t, err1) @@ -675,7 +681,7 @@ func TestReorg(t *testing.T) { return nil }) require.Error(t, err) - txn, err = ctx.Txn(true) + txn, err = sctx.Txn(true) require.NoError(t, err) err = txn.Commit(context.Background()) require.NoError(t, err) @@ -683,55 +689,6 @@ func TestReorg(t *testing.T) { } } -func TestGetDDLInfo(t *testing.T) { - store, clean := newMockStore(t) - defer clean() - - txn, err := store.Begin() - require.NoError(t, err) - m := meta.NewMeta(txn) - - dbInfo2 := &model.DBInfo{ - ID: 2, - Name: model.NewCIStr("b"), - State: model.StateNone, - } - job := &model.Job{ - SchemaID: dbInfo2.ID, - Type: model.ActionCreateSchema, - RowCount: 0, - } - job1 := &model.Job{ - SchemaID: dbInfo2.ID, - Type: model.ActionAddIndex, - RowCount: 0, - } - - err = m.EnQueueDDLJob(job) - require.NoError(t, err) - - info, err := GetDDLInfo(txn) - require.NoError(t, err) - require.Len(t, info.Jobs, 1) - require.Equal(t, job, info.Jobs[0]) - require.Nil(t, info.ReorgHandle) - - // two jobs - m = meta.NewMeta(txn, meta.AddIndexJobListKey) - err = m.EnQueueDDLJob(job1) - require.NoError(t, err) - - info, err = GetDDLInfo(txn) - require.NoError(t, err) - require.Len(t, info.Jobs, 2) - require.Equal(t, job, info.Jobs[0]) - require.Equal(t, job1, info.Jobs[1]) - require.Nil(t, info.ReorgHandle) - - err = txn.Rollback() - require.NoError(t, err) -} - func TestGetDDLJobs(t *testing.T) { store, clean := newMockStore(t) defer clean() @@ -946,7 +903,7 @@ func TestGetHistoryDDLJobs(t *testing.T) { err = AddHistoryDDLJob(m, jobs[i], true) require.NoError(t, err) - historyJobs, err := GetHistoryDDLJobs(txn, DefNumHistoryJobs) + historyJobs, err := GetLastNHistoryDDLJobs(m, DefNumHistoryJobs) require.NoError(t, err) if i+1 > MaxHistoryJobs { @@ -957,7 +914,7 @@ func TestGetHistoryDDLJobs(t *testing.T) { } delta := cnt - MaxHistoryJobs - historyJobs, err := GetHistoryDDLJobs(txn, DefNumHistoryJobs) + historyJobs, err := GetLastNHistoryDDLJobs(m, DefNumHistoryJobs) require.NoError(t, err) require.Len(t, historyJobs, MaxHistoryJobs) diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 50362923488a8..2e1d50435189f 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -105,6 +105,7 @@ type JobContext struct { cacheSQL string cacheNormalizedSQL string cacheDigest *parser.Digest + tp string } // NewJobContext returns a new ddl job context. @@ -114,6 +115,7 @@ func NewJobContext() *JobContext { cacheSQL: "", cacheNormalizedSQL: "", cacheDigest: nil, + tp: "unknown", } } @@ -284,7 +286,8 @@ func (d *ddl) limitDDLJobs() { // addBatchDDLJobs gets global job IDs and puts the DDL jobs in the DDL queue. func (d *ddl) addBatchDDLJobs(tasks []*limitJobTask) { startTime := time.Now() - err := kv.RunInNewTxn(context.Background(), d.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, d.store, true, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) ids, err := t.GenGlobalIDs(len(tasks)) if err != nil { @@ -434,6 +437,11 @@ func (w *worker) deleteRange(ctx context.Context, job *model.Job) error { func jobNeedGC(job *model.Job) bool { if !job.IsCancelled() { + if job.Warning != nil && dbterror.ErrCantDropFieldOrKey.Equal(job.Warning) { + // For the field/key not exists warnings, there is no need to + // delete the ranges. + return false + } switch job.Type { case model.ActionAddIndex, model.ActionAddPrimaryKey: if job.State != model.JobStateRollbackDone { @@ -442,8 +450,17 @@ func jobNeedGC(job *model.Job) bool { // After rolling back an AddIndex operation, we need to use delete-range to delete the half-done index data. return true case model.ActionDropSchema, model.ActionDropTable, model.ActionTruncateTable, model.ActionDropIndex, model.ActionDropPrimaryKey, - model.ActionDropTablePartition, model.ActionTruncateTablePartition, model.ActionDropColumn, model.ActionDropColumns, model.ActionModifyColumn, model.ActionDropIndexes: + model.ActionDropTablePartition, model.ActionTruncateTablePartition, model.ActionDropColumn, model.ActionModifyColumn: return true + case model.ActionMultiSchemaChange: + for _, sub := range job.MultiSchemaInfo.SubJobs { + proxyJob := sub.ToProxyJob(job) + needGC := jobNeedGC(&proxyJob) + if needGC { + return true + } + } + return false } } return false @@ -460,7 +477,7 @@ func (w *worker) finishDDLJob(t *meta.Meta, job *model.Job) (err error) { if jobNeedGC(job) { err = w.deleteRange(w.ctx, job) if err != nil { - return err + return errors.Trace(err) } } @@ -570,6 +587,25 @@ func (w *worker) unlockSeqNum(err error) { } } +// DDLBackfillers contains the DDL need backfill step. +var DDLBackfillers = map[model.ActionType]string{ + model.ActionAddIndex: "add_index", + model.ActionModifyColumn: "modify_column", + model.ActionDropIndex: "drop_index", +} + +func getDDLRequestSource(job *model.Job) string { + if tp, ok := DDLBackfillers[job.Type]; ok { + return kv.InternalTxnBackfillDDLPrefix + tp + } + return kv.InternalTxnDDL +} + +func (w *JobContext) setDDLLabelForDiagnosis(job *model.Job) { + w.tp = getDDLRequestSource(job) + w.ddlJobCtx = kv.WithInternalSourceType(w.ddlJobCtx, w.ddlJobSourceType()) +} + func (w *JobContext) getResourceGroupTaggerForTopSQL() tikvrpc.ResourceGroupTagger { if !topsqlstate.TopSQLEnabled() || w.cacheDigest == nil { return nil @@ -583,6 +619,10 @@ func (w *JobContext) getResourceGroupTaggerForTopSQL() tikvrpc.ResourceGroupTagg return tagger } +func (w *JobContext) ddlJobSourceType() string { + return w.tp +} + // handleDDLJobQueue handles DDL jobs in DDL Job queue. func (w *worker) handleDDLJobQueue(d *ddlCtx) error { once := true @@ -598,7 +638,8 @@ func (w *worker) handleDDLJobQueue(d *ddlCtx) error { runJobErr error ) waitTime := 2 * d.lease - err := kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, d.store, false, func(ctx context.Context, txn kv.Transaction) error { // We are not owner, return and retry checking later. if !d.isOwner() { return nil @@ -618,6 +659,8 @@ func (w *worker) handleDDLJobQueue(d *ddlCtx) error { } w.setDDLLabelForTopSQL(job) + w.setDDLSourceForDiagnosis(job) + jobContext := w.jobContext(job) if tagger := w.getResourceGroupTaggerForTopSQL(job); tagger != nil { txn.SetOption(kv.ResourceGroupTagger, tagger) } @@ -643,6 +686,8 @@ func (w *worker) handleDDLJobQueue(d *ddlCtx) error { d.mu.hook.OnJobRunBefore(job) d.mu.RUnlock() + // set request source type to DDL type + txn.SetOption(kv.RequestSourceType, jobContext.ddlJobSourceType()) // If running job meets error, we will save this error in job Error // and retry later if the job is not cancelled. schemaVer, runJobErr = w.runDDLJob(d, t, job) @@ -733,7 +778,7 @@ func writeBinlog(binlogCli *pumpcli.PumpsClient, txn kv.Transaction, job *model. // When this column is in the "delete only" and "delete reorg" states, the binlog of "drop column" has not been written yet, // but the column has been removed from the binlog of the write operation. // So we add this binlog to enable downstream components to handle DML correctly in this schema state. - ((job.Type == model.ActionDropColumn || job.Type == model.ActionDropColumns) && job.SchemaState == model.StateDeleteOnly) { + ((job.Type == model.ActionDropColumn) && job.SchemaState == model.StateDeleteOnly) { if skipWriteBinlog(job) { return } @@ -876,12 +921,8 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, ver, err = w.onExchangeTablePartition(d, t, job) case model.ActionAddColumn: ver, err = onAddColumn(d, t, job) - case model.ActionAddColumns: - ver, err = onAddColumns(d, t, job) case model.ActionDropColumn: ver, err = onDropColumn(d, t, job) - case model.ActionDropColumns: - ver, err = onDropColumns(d, t, job) case model.ActionModifyColumn: ver, err = w.onModifyColumn(d, t, job) case model.ActionSetDefaultValue: @@ -892,8 +933,6 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, ver, err = w.onCreateIndex(d, t, job, true) case model.ActionDropIndex, model.ActionDropPrimaryKey: ver, err = onDropIndex(d, t, job) - case model.ActionDropIndexes: - ver, err = onDropIndexes(d, t, job) case model.ActionRenameIndex: ver, err = onRenameIndex(d, t, job) case model.ActionAddForeignKey: diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index 082fa3fe6aa7d..6c21be950aead 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + package ddl_test import ( @@ -113,7 +113,8 @@ func TestParallelDDL(t *testing.T) { qLen2 := int64(0) var err error for { - checkErr = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + checkErr = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) qLen1, err = m.DDLJobQueueLen() if err != nil { @@ -265,3 +266,39 @@ func TestParallelDDL(t *testing.T) { require.Less(t, seqIDs[5], seqIDs[8]) require.Less(t, seqIDs[8], seqIDs[10]) } + +func TestJobNeedGC(t *testing.T) { + job := &model.Job{Type: model.ActionAddIndex, State: model.JobStateCancelled} + require.False(t, ddl.JobNeedGCForTest(job)) + + job = &model.Job{Type: model.ActionAddIndex, State: model.JobStateDone} + require.False(t, ddl.JobNeedGCForTest(job)) + job = &model.Job{Type: model.ActionAddPrimaryKey, State: model.JobStateDone} + require.False(t, ddl.JobNeedGCForTest(job)) + job = &model.Job{Type: model.ActionAddIndex, State: model.JobStateRollbackDone} + require.True(t, ddl.JobNeedGCForTest(job)) + job = &model.Job{Type: model.ActionAddPrimaryKey, State: model.JobStateRollbackDone} + require.True(t, ddl.JobNeedGCForTest(job)) + + job = &model.Job{Type: model.ActionMultiSchemaChange, State: model.JobStateDone, MultiSchemaInfo: &model.MultiSchemaInfo{ + SubJobs: []*model.SubJob{ + {Type: model.ActionAddIndex, State: model.JobStateDone}, + {Type: model.ActionAddColumn, State: model.JobStateDone}, + {Type: model.ActionRebaseAutoID, State: model.JobStateDone}, + }}} + require.False(t, ddl.JobNeedGCForTest(job)) + job = &model.Job{Type: model.ActionMultiSchemaChange, State: model.JobStateDone, MultiSchemaInfo: &model.MultiSchemaInfo{ + SubJobs: []*model.SubJob{ + {Type: model.ActionAddIndex, State: model.JobStateDone}, + {Type: model.ActionDropColumn, State: model.JobStateDone}, + {Type: model.ActionRebaseAutoID, State: model.JobStateDone}, + }}} + require.True(t, ddl.JobNeedGCForTest(job)) + job = &model.Job{Type: model.ActionMultiSchemaChange, State: model.JobStateRollbackDone, MultiSchemaInfo: &model.MultiSchemaInfo{ + SubJobs: []*model.SubJob{ + {Type: model.ActionAddIndex, State: model.JobStateRollbackDone}, + {Type: model.ActionAddColumn, State: model.JobStateRollbackDone}, + {Type: model.ActionRebaseAutoID, State: model.JobStateCancelled}, + }}} + require.True(t, ddl.JobNeedGCForTest(job)) +} diff --git a/ddl/delete_range.go b/ddl/delete_range.go index d5fcfb1901341..644ef71eaf874 100644 --- a/ddl/delete_range.go +++ b/ddl/delete_range.go @@ -94,7 +94,11 @@ func (dr *delRange) addDelRangeJob(ctx context.Context, job *model.Job) error { } defer dr.sessPool.put(sctx) - err = insertJobIntoDeleteRangeTable(ctx, sctx, job) + if job.MultiSchemaInfo != nil { + err = insertJobIntoDeleteRangeTableMultiSchema(ctx, sctx, job) + } else { + err = insertJobIntoDeleteRangeTable(ctx, sctx, job, &elementIDAlloc{}) + } if err != nil { logutil.BgLogger().Error("[ddl] add job into delete-range table failed", zap.Int64("jobID", job.ID), zap.String("jobType", job.Type.String()), zap.Error(err)) return errors.Trace(err) @@ -106,6 +110,20 @@ func (dr *delRange) addDelRangeJob(ctx context.Context, job *model.Job) error { return nil } +func insertJobIntoDeleteRangeTableMultiSchema(ctx context.Context, sctx sessionctx.Context, job *model.Job) error { + var ea elementIDAlloc + for _, sub := range job.MultiSchemaInfo.SubJobs { + proxyJob := sub.ToProxyJob(job) + if jobNeedGC(&proxyJob) { + err := insertJobIntoDeleteRangeTable(ctx, sctx, &proxyJob, &ea) + if err != nil { + return errors.Trace(err) + } + } + } + return nil +} + // removeFromGCDeleteRange implements delRangeManager interface. func (dr *delRange) removeFromGCDeleteRange(ctx context.Context, jobID int64, tableIDs []int64) error { sctx, err := dr.sessPool.get() @@ -152,21 +170,22 @@ func (dr *delRange) startEmulator() { } func (dr *delRange) doDelRangeWork() error { - ctx, err := dr.sessPool.get() + sctx, err := dr.sessPool.get() if err != nil { logutil.BgLogger().Error("[ddl] delRange emulator get session failed", zap.Error(err)) return errors.Trace(err) } - defer dr.sessPool.put(ctx) + defer dr.sessPool.put(sctx) - ranges, err := util.LoadDeleteRanges(ctx, math.MaxInt64) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + ranges, err := util.LoadDeleteRanges(ctx, sctx, math.MaxInt64) if err != nil { logutil.BgLogger().Error("[ddl] delRange emulator load tasks failed", zap.Error(err)) return errors.Trace(err) } for _, r := range ranges { - if err := dr.doTask(ctx, r); err != nil { + if err := dr.doTask(sctx, r); err != nil { logutil.BgLogger().Error("[ddl] delRange emulator do task failed", zap.Error(err)) return errors.Trace(err) } @@ -174,13 +193,14 @@ func (dr *delRange) doDelRangeWork() error { return nil } -func (dr *delRange) doTask(ctx sessionctx.Context, r util.DelRangeTask) error { +func (dr *delRange) doTask(sctx sessionctx.Context, r util.DelRangeTask) error { var oldStartKey, newStartKey kv.Key oldStartKey = r.StartKey for { finish := true dr.keys = dr.keys[:0] - err := kv.RunInNewTxn(context.Background(), dr.store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, dr.store, false, func(ctx context.Context, txn kv.Transaction) error { if topsqlstate.TopSQLEnabled() { // Only when TiDB run without PD(use unistore as storage for test) will run into here, so just set a mock internal resource tagger. txn.SetOption(kv.ResourceGroupTagger, util.GetInternalResourceGroupTaggerForTopSQL()) @@ -217,7 +237,7 @@ func (dr *delRange) doTask(ctx sessionctx.Context, r util.DelRangeTask) error { return errors.Trace(err) } if finish { - if err := util.CompleteDeleteRange(ctx, r); err != nil { + if err := util.CompleteDeleteRange(sctx, r); err != nil { logutil.BgLogger().Error("[ddl] delRange emulator complete task failed", zap.Error(err)) return errors.Trace(err) } @@ -229,7 +249,7 @@ func (dr *delRange) doTask(ctx sessionctx.Context, r util.DelRangeTask) error { zap.Stringer("endKey", endKey)) break } - if err := util.UpdateDeleteRange(ctx, r, newStartKey, oldStartKey); err != nil { + if err := util.UpdateDeleteRange(sctx, r, newStartKey, oldStartKey); err != nil { logutil.BgLogger().Error("[ddl] delRange emulator update task failed", zap.Error(err)) } oldStartKey = newStartKey @@ -237,25 +257,16 @@ func (dr *delRange) doTask(ctx sessionctx.Context, r util.DelRangeTask) error { return nil } -type elementIDAlloc struct { - id int64 -} - -func (ea *elementIDAlloc) alloc() int64 { - ea.id++ - return ea.id -} - // insertJobIntoDeleteRangeTable parses the job into delete-range arguments, // and inserts a new record into gc_delete_range table. The primary key is // (job ID, element ID), so we ignore key conflict error. -func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, job *model.Job) error { +func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, job *model.Job, ea *elementIDAlloc) error { now, err := getNowTSO(sctx) if err != nil { return errors.Trace(err) } - var ea elementIDAlloc + ctx = kv.WithInternalSourceType(ctx, getDDLRequestSource(job)) s := sctx.(sqlexec.SQLExecutor) switch job.Type { case model.ActionDropSchema: @@ -268,7 +279,7 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, if batchEnd > i+batchInsertDeleteRangeSize { batchEnd = i + batchInsertDeleteRangeSize } - if err := doBatchInsert(ctx, s, job.ID, tableIDs[i:batchEnd], now, &ea); err != nil { + if err := doBatchInsert(ctx, s, job.ID, tableIDs[i:batchEnd], now, ea); err != nil { return errors.Trace(err) } } @@ -285,7 +296,8 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, for _, pid := range physicalTableIDs { startKey = tablecodec.EncodeTablePrefix(pid) endKey := tablecodec.EncodeTablePrefix(pid + 1) - if err := doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("partition ID is %d", pid)); err != nil { + elemID := ea.allocForPhysicalID(pid) + if err := doInsert(ctx, s, job.ID, elemID, startKey, endKey, now, fmt.Sprintf("partition ID is %d", pid)); err != nil { return errors.Trace(err) } } @@ -293,7 +305,8 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, } startKey = tablecodec.EncodeTablePrefix(tableID) endKey := tablecodec.EncodeTablePrefix(tableID + 1) - return doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("table ID is %d", tableID)) + elemID := ea.allocForPhysicalID(tableID) + return doInsert(ctx, s, job.ID, elemID, startKey, endKey, now, fmt.Sprintf("table ID is %d", tableID)) case model.ActionDropTablePartition, model.ActionTruncateTablePartition: var physicalTableIDs []int64 if err := job.DecodeArgs(&physicalTableIDs); err != nil { @@ -302,7 +315,8 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, for _, physicalTableID := range physicalTableIDs { startKey := tablecodec.EncodeTablePrefix(physicalTableID) endKey := tablecodec.EncodeTablePrefix(physicalTableID + 1) - if err := doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("partition table ID is %d", physicalTableID)); err != nil { + elemID := ea.allocForPhysicalID(physicalTableID) + if err := doInsert(ctx, s, job.ID, elemID, startKey, endKey, now, fmt.Sprintf("partition table ID is %d", physicalTableID)); err != nil { return errors.Trace(err) } } @@ -310,97 +324,67 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, case model.ActionAddIndex, model.ActionAddPrimaryKey: tableID := job.TableID var indexID int64 + var ifExists bool var partitionIDs []int64 - if err := job.DecodeArgs(&indexID, &partitionIDs); err != nil { + if err := job.DecodeArgs(&indexID, &ifExists, &partitionIDs); err != nil { return errors.Trace(err) } if len(partitionIDs) > 0 { for _, pid := range partitionIDs { startKey := tablecodec.EncodeTableIndexPrefix(pid, indexID) endKey := tablecodec.EncodeTableIndexPrefix(pid, indexID+1) - if err := doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("partition table ID is %d", pid)); err != nil { + elemID := ea.allocForIndexID(pid, indexID) + if err := doInsert(ctx, s, job.ID, elemID, startKey, endKey, now, fmt.Sprintf("partition table ID is %d", pid)); err != nil { return errors.Trace(err) } } } else { startKey := tablecodec.EncodeTableIndexPrefix(tableID, indexID) endKey := tablecodec.EncodeTableIndexPrefix(tableID, indexID+1) - return doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("table ID is %d", tableID)) + elemID := ea.allocForIndexID(tableID, indexID) + return doInsert(ctx, s, job.ID, elemID, startKey, endKey, now, fmt.Sprintf("table ID is %d", tableID)) } case model.ActionDropIndex, model.ActionDropPrimaryKey: tableID := job.TableID var indexName interface{} + var ifExists bool var indexID int64 var partitionIDs []int64 - if err := job.DecodeArgs(&indexName, &indexID, &partitionIDs); err != nil { + if err := job.DecodeArgs(&indexName, &ifExists, &indexID, &partitionIDs); err != nil { return errors.Trace(err) } if len(partitionIDs) > 0 { for _, pid := range partitionIDs { startKey := tablecodec.EncodeTableIndexPrefix(pid, indexID) endKey := tablecodec.EncodeTableIndexPrefix(pid, indexID+1) - if err := doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("partition table ID is %d", pid)); err != nil { + elemID := ea.allocForIndexID(pid, indexID) + if err := doInsert(ctx, s, job.ID, elemID, startKey, endKey, now, fmt.Sprintf("partition table ID is %d", pid)); err != nil { return errors.Trace(err) } } } else { startKey := tablecodec.EncodeTableIndexPrefix(tableID, indexID) endKey := tablecodec.EncodeTableIndexPrefix(tableID, indexID+1) - return doInsert(ctx, s, job.ID, ea.alloc(), startKey, endKey, now, fmt.Sprintf("index ID is %d", indexID)) - } - case model.ActionDropIndexes: - var indexIDs []int64 - var partitionIDs []int64 - if err := job.DecodeArgs(&[]model.CIStr{}, &[]bool{}, &indexIDs, &partitionIDs); err != nil { - return errors.Trace(err) - } - // Remove data in TiKV. - if len(indexIDs) == 0 { - return nil - } - if len(partitionIDs) == 0 { - return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now, &ea) - } - for _, pID := range partitionIDs { - if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pID, indexIDs, now, &ea); err != nil { - return errors.Trace(err) - } + elemID := ea.allocForIndexID(tableID, indexID) + return doInsert(ctx, s, job.ID, elemID, startKey, endKey, now, fmt.Sprintf("index ID is %d", indexID)) } case model.ActionDropColumn: var colName model.CIStr + var ifExists bool var indexIDs []int64 var partitionIDs []int64 - if err := job.DecodeArgs(&colName, &indexIDs, &partitionIDs); err != nil { - return errors.Trace(err) - } - if len(indexIDs) > 0 { - if len(partitionIDs) > 0 { - for _, pid := range partitionIDs { - if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pid, indexIDs, now, &ea); err != nil { - return errors.Trace(err) - } - } - } else { - return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now, &ea) - } - } - case model.ActionDropColumns: - var colNames []model.CIStr - var ifExists []bool - var indexIDs []int64 - var partitionIDs []int64 - if err := job.DecodeArgs(&colNames, &ifExists, &indexIDs, &partitionIDs); err != nil { + if err := job.DecodeArgs(&colName, &ifExists, &indexIDs, &partitionIDs); err != nil { return errors.Trace(err) } if len(indexIDs) > 0 { if len(partitionIDs) > 0 { for _, pid := range partitionIDs { - if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pid, indexIDs, now, &ea); err != nil { + if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pid, indexIDs, now, ea); err != nil { return errors.Trace(err) } } } else { - return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now, &ea) + return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now, ea) } } case model.ActionModifyColumn: @@ -413,10 +397,10 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, return nil } if len(partitionIDs) == 0 { - return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now, &ea) + return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now, ea) } for _, pid := range partitionIDs { - if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pid, indexIDs, now, &ea); err != nil { + if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pid, indexIDs, now, ea); err != nil { return errors.Trace(err) } } @@ -438,7 +422,8 @@ func doBatchDeleteIndiceRange(ctx context.Context, s sqlexec.SQLExecutor, jobID, if i != len(indexIDs)-1 { buf.WriteString(",") } - paramsList = append(paramsList, jobID, ea.alloc(), startKeyEncoded, endKeyEncoded, ts) + elemID := ea.allocForIndexID(tableID, indexID) + paramsList = append(paramsList, jobID, elemID, startKeyEncoded, endKeyEncoded, ts) } _, err := s.ExecuteInternal(ctx, buf.String(), paramsList...) return errors.Trace(err) @@ -471,7 +456,8 @@ func doBatchInsert(ctx context.Context, s sqlexec.SQLExecutor, jobID int64, tabl if i != len(tableIDs)-1 { buf.WriteString(",") } - paramsList = append(paramsList, jobID, ea.alloc(), startKeyEncoded, endKeyEncoded, ts) + elemID := ea.allocForPhysicalID(tableID) + paramsList = append(paramsList, jobID, elemID, startKeyEncoded, endKeyEncoded, ts) } // set session disk full opt s.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) diff --git a/ddl/delete_range_util.go b/ddl/delete_range_util.go new file mode 100644 index 0000000000000..51c02c52932fe --- /dev/null +++ b/ddl/delete_range_util.go @@ -0,0 +1,50 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +type tblIdxID struct { + physicalID int64 + indexID int64 +} + +type elementIDAlloc struct { + physicalIDs map[int64]int64 + indexIDs map[tblIdxID]int64 +} + +func (e *elementIDAlloc) allocForIndexID(physicalID, indexID int64) int64 { + if e.indexIDs == nil { + e.indexIDs = make(map[tblIdxID]int64) + } + k := tblIdxID{physicalID: physicalID, indexID: indexID} + if id, found := e.indexIDs[k]; found { + return id + } + next := int64(len(e.physicalIDs) + len(e.indexIDs) + 1) + e.indexIDs[k] = next + return next +} + +func (e *elementIDAlloc) allocForPhysicalID(tableID int64) int64 { + if e.physicalIDs == nil { + e.physicalIDs = make(map[int64]int64) + } + if id, found := e.physicalIDs[tableID]; found { + return id + } + next := int64(len(e.physicalIDs) + len(e.indexIDs) + 1) + e.physicalIDs[tableID] = next + return next +} diff --git a/ddl/index.go b/ddl/index.go index 9296d2507275c..4016cee19f59a 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -46,6 +46,7 @@ import ( "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/tikv" "go.uber.org/zap" + "golang.org/x/exp/slices" ) const ( @@ -634,19 +635,23 @@ func doReorgWorkForCreateIndex(w *worker, d *ddlCtx, t *meta.Meta, job *model.Jo } func onDropIndex(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { - tblInfo, indexInfo, err := checkDropIndex(t, job) + tblInfo, indexInfo, ifExists, err := checkDropIndex(t, job) if err != nil { + if ifExists && dbterror.ErrCantDropFieldOrKey.Equal(err) { + job.Warning = toTError(err) + job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) + return ver, nil + } return ver, errors.Trace(err) } if tblInfo.TableCacheStatusType != model.TableCacheStatusDisable { return ver, errors.Trace(dbterror.ErrOptOnCacheTable.GenWithStackByArgs("Drop Index")) } - dependentHiddenCols := make([]*model.ColumnInfo, 0) - for _, indexColumn := range indexInfo.Columns { - if tblInfo.Columns[indexColumn.Offset].Hidden { - dependentHiddenCols = append(dependentHiddenCols, tblInfo.Columns[indexColumn.Offset]) - } + if job.MultiSchemaInfo != nil && !job.IsRollingback() && job.MultiSchemaInfo.Revertible { + job.MarkNonRevertible() + job.SchemaState = indexInfo.State + return updateVersionAndTableInfo(d, t, job, tblInfo, false) } originalState := indexInfo.State @@ -675,24 +680,11 @@ func onDropIndex(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { case model.StateDeleteReorganization: // reorganization -> absent indexInfo.State = model.StateNone - if len(dependentHiddenCols) > 0 { - firstHiddenOffset := dependentHiddenCols[0].Offset - for i := 0; i < len(dependentHiddenCols); i++ { - // Set this column's offset to the last and reset all following columns' offsets. - adjustColumnInfoInDropColumn(tblInfo, firstHiddenOffset) - } - } - newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices)) - for _, idx := range tblInfo.Indices { - if idx.Name.L != indexInfo.Name.L { - newIndices = append(newIndices, idx) - } - } - tblInfo.Indices = newIndices // Set column index flag. dropIndexColumnFlag(tblInfo, indexInfo) + removeDependentHiddenColumns(tblInfo, indexInfo) + removeIndexInfo(tblInfo, indexInfo) - tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-len(dependentHiddenCols)] failpoint.Inject("mockExceedErrorLimit", func(val failpoint.Value) { if val.(bool) { panic("panic test in cancelling add index") @@ -721,197 +713,75 @@ func onDropIndex(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { return ver, errors.Trace(err) } -func checkDropIndex(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.IndexInfo, error) { +func removeDependentHiddenColumns(tblInfo *model.TableInfo, idxInfo *model.IndexInfo) { + hiddenColOffs := make([]int, 0) + for _, indexColumn := range idxInfo.Columns { + col := tblInfo.Columns[indexColumn.Offset] + if col.Hidden { + hiddenColOffs = append(hiddenColOffs, col.Offset) + } + } + // Sort the offset in descending order. + slices.SortFunc(hiddenColOffs, func(a, b int) bool { return a > b }) + // Move all the dependent hidden columns to the end. + endOffset := len(tblInfo.Columns) - 1 + for _, offset := range hiddenColOffs { + tblInfo.MoveColumnInfo(offset, endOffset) + } + tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-len(hiddenColOffs)] +} + +func removeIndexInfo(tblInfo *model.TableInfo, idxInfo *model.IndexInfo) { + indices := tblInfo.Indices + offset := -1 + for i, idx := range indices { + if idxInfo.ID == idx.ID { + offset = i + break + } + } + if offset == -1 { + // The target index has been removed. + return + } + // Remove the target index. + tblInfo.Indices = append(tblInfo.Indices[:offset], tblInfo.Indices[offset+1:]...) +} + +func checkDropIndex(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.IndexInfo, bool /* ifExists */, error) { schemaID := job.SchemaID tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { - return nil, nil, errors.Trace(err) + return nil, nil, false, errors.Trace(err) } var indexName model.CIStr - if err = job.DecodeArgs(&indexName); err != nil { + var ifExists bool + if err = job.DecodeArgs(&indexName, &ifExists); err != nil { job.State = model.JobStateCancelled - return nil, nil, errors.Trace(err) + return nil, nil, false, errors.Trace(err) } indexInfo := tblInfo.FindIndexByName(indexName.L) if indexInfo == nil { job.State = model.JobStateCancelled - return nil, nil, dbterror.ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName) + return nil, nil, ifExists, dbterror.ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName) } // Double check for drop index on auto_increment column. err = checkDropIndexOnAutoIncrementColumn(tblInfo, indexInfo) if err != nil { job.State = model.JobStateCancelled - return nil, nil, autoid.ErrWrongAutoKey + return nil, nil, false, autoid.ErrWrongAutoKey } // Check that drop primary index will not cause invisible implicit primary index. if err := checkInvisibleIndexesOnPK(tblInfo, []*model.IndexInfo{indexInfo}, job); err != nil { - return nil, nil, errors.Trace(err) - } - - return tblInfo, indexInfo, nil -} - -func onDropIndexes(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { - tblInfo, indexNames, ifExists, err := getSchemaInfos(t, job) - if err != nil { - return ver, errors.Trace(err) - } - if tblInfo.TableCacheStatusType != model.TableCacheStatusDisable { - return ver, errors.Trace(dbterror.ErrOptOnCacheTable.GenWithStackByArgs("Drop Indexes")) - } - - indexInfos, err := checkDropIndexes(tblInfo, job, indexNames, ifExists) - if err != nil { - return ver, errors.Trace(err) - } - - if len(indexInfos) == 0 { job.State = model.JobStateCancelled - return ver, nil + return nil, nil, false, errors.Trace(err) } - dependentHiddenCols := make([]*model.ColumnInfo, 0) - for _, indexInfo := range indexInfos { - for _, indexColumn := range indexInfo.Columns { - if tblInfo.Columns[indexColumn.Offset].Hidden { - dependentHiddenCols = append(dependentHiddenCols, tblInfo.Columns[indexColumn.Offset]) - } - } - } - - originalState := indexInfos[0].State - switch indexInfos[0].State { - case model.StatePublic: - // public -> write only - setIndicesState(indexInfos, model.StateWriteOnly) - setColumnsState(dependentHiddenCols, model.StateWriteOnly) - for _, colInfo := range dependentHiddenCols { - adjustColumnInfoInDropColumn(tblInfo, colInfo.Offset) - } - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != indexInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - job.SchemaState = model.StateWriteOnly - case model.StateWriteOnly: - // write only -> delete only - setIndicesState(indexInfos, model.StateDeleteOnly) - setColumnsState(dependentHiddenCols, model.StateDeleteOnly) - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != indexInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - job.SchemaState = model.StateDeleteOnly - case model.StateDeleteOnly: - // delete only -> reorganization - setIndicesState(indexInfos, model.StateDeleteReorganization) - setColumnsState(dependentHiddenCols, model.StateDeleteReorganization) - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != indexInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - job.SchemaState = model.StateDeleteReorganization - case model.StateDeleteReorganization: - // reorganization -> absent - indexIDs := make([]int64, 0, len(indexInfos)) - indexNames := make(map[string]bool, len(indexInfos)) - for _, indexInfo := range indexInfos { - indexNames[indexInfo.Name.L] = true - indexIDs = append(indexIDs, indexInfo.ID) - } - - newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices)) - for _, idx := range tblInfo.Indices { - if _, ok := indexNames[idx.Name.L]; !ok { - newIndices = append(newIndices, idx) - } - } - tblInfo.Indices = newIndices - - // Set column index flag. - for _, indexInfo := range indexInfos { - dropIndexColumnFlag(tblInfo, indexInfo) - } - - tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-len(dependentHiddenCols)] - - ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, originalState != model.StateNone) - if err != nil { - return ver, errors.Trace(err) - } - - job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) - job.Args = append(job.Args, indexIDs, getPartitionIDs(tblInfo)) - default: - err = dbterror.ErrInvalidDDLState.GenWithStackByArgs("index", indexInfos[0].State) - } - - return ver, errors.Trace(err) -} - -func getSchemaInfos(t *meta.Meta, job *model.Job) (*model.TableInfo, []model.CIStr, []bool, error) { - schemaID := job.SchemaID - tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) - if err != nil { - return nil, nil, nil, errors.Trace(err) - } - - var indexNames []model.CIStr - var ifExists []bool - if err = job.DecodeArgs(&indexNames, &ifExists); err != nil { - return nil, nil, nil, errors.Trace(err) - } - - return tblInfo, indexNames, ifExists, nil -} - -func checkDropIndexes(tblInfo *model.TableInfo, job *model.Job, indexNames []model.CIStr, ifExists []bool) ([]*model.IndexInfo, error) { - var warnings []*errors.Error - indexInfos := make([]*model.IndexInfo, 0, len(indexNames)) - UniqueIndexNames := make(map[model.CIStr]bool, len(indexNames)) - for i, indexName := range indexNames { - // Double check the index is exists. - indexInfo := tblInfo.FindIndexByName(indexName.L) - if indexInfo == nil { - if ifExists[i] { - warnings = append(warnings, toTError(dbterror.ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName))) - continue - } - job.State = model.JobStateCancelled - return nil, dbterror.ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName) - } - - // Double check for drop index on auto_increment column. - if err := checkDropIndexOnAutoIncrementColumn(tblInfo, indexInfo); err != nil { - job.State = model.JobStateCancelled - return nil, autoid.ErrWrongAutoKey - } - - // Check for dropping duplicate indexes. - if UniqueIndexNames[indexName] { - if !ifExists[i] { - job.State = model.JobStateCancelled - return nil, dbterror.ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName) - } - warnings = append(warnings, toTError(dbterror.ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName))) - } - UniqueIndexNames[indexName] = true - - indexInfos = append(indexInfos, indexInfo) - } - - // Check that drop primary index will not cause invisible implicit primary index. - if err := checkInvisibleIndexesOnPK(tblInfo, indexInfos, job); err != nil { - return nil, errors.Trace(err) - } - - job.MultiSchemaInfo = &model.MultiSchemaInfo{Warnings: warnings} - - return indexInfos, nil + return tblInfo, indexInfo, false, nil } func checkInvisibleIndexesOnPK(tblInfo *model.TableInfo, indexInfos []*model.IndexInfo, job *model.Job) error { @@ -1039,7 +909,8 @@ type baseIndexWorker struct { rowMap map[int64]types.Datum rowDecoder *decoder.RowDecoder - sqlMode mysql.SQLMode + sqlMode mysql.SQLMode + jobContext *JobContext } type addIndexWorker struct { @@ -1052,7 +923,7 @@ type addIndexWorker struct { distinctCheckFlags []bool } -func newAddIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, indexInfo *model.IndexInfo, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo) *addIndexWorker { +func newAddIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, indexInfo *model.IndexInfo, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) *addIndexWorker { index := tables.NewIndex(t.GetPhysicalID(), t.Meta(), indexInfo) rowDecoder := decoder.NewRowDecoder(t, t.WritableCols(), decodeColMap) return &addIndexWorker{ @@ -1064,6 +935,7 @@ func newAddIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t tab rowMap: make(map[int64]types.Datum, len(decodeColMap)), metricCounter: metrics.BackfillTotalCounter.WithLabelValues("add_idx_rate"), sqlMode: reorgInfo.ReorgMeta.SQLMode, + jobContext: jc, }, index: index, } @@ -1312,7 +1184,8 @@ func (w *addIndexWorker) BackfillDataInTxn(handleRange reorgBackfillTask) (taskC }) oprStartTime := time.Now() - errInTxn = kv.RunInNewTxn(context.Background(), w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), w.jobContext.ddlJobSourceType()) + errInTxn = kv.RunInNewTxn(ctx, w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { taskCtx.addedCount = 0 taskCtx.scanCount = 0 txn.SetOption(kv.Priority, w.priority) @@ -1497,7 +1370,7 @@ type cleanUpIndexWorker struct { baseIndexWorker } -func newCleanUpIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo) *cleanUpIndexWorker { +func newCleanUpIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) *cleanUpIndexWorker { indexes := make([]table.Index, 0, len(t.Indices())) rowDecoder := decoder.NewRowDecoder(t, t.WritableCols(), decodeColMap) for _, index := range t.Indices() { @@ -1514,6 +1387,7 @@ func newCleanUpIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t rowMap: make(map[int64]types.Datum, len(decodeColMap)), metricCounter: metrics.BackfillTotalCounter.WithLabelValues("cleanup_idx_rate"), sqlMode: reorgInfo.ReorgMeta.SQLMode, + jobContext: jc, }, } } @@ -1526,7 +1400,8 @@ func (w *cleanUpIndexWorker) BackfillDataInTxn(handleRange reorgBackfillTask) (t }) oprStartTime := time.Now() - errInTxn = kv.RunInNewTxn(context.Background(), w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), w.jobContext.ddlJobSourceType()) + errInTxn = kv.RunInNewTxn(ctx, w.sessCtx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { taskCtx.addedCount = 0 taskCtx.scanCount = 0 txn.SetOption(kv.Priority, w.priority) diff --git a/ddl/index_modify_test.go b/ddl/index_modify_test.go index 3ae2ae16482e0..bec851c44d450 100644 --- a/ddl/index_modify_test.go +++ b/ddl/index_modify_test.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" testddlutil "github.com/pingcap/tidb/ddl/testutil" + "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -885,16 +886,18 @@ func testDropIndexesIfExists(t *testing.T, store kv.Storage) { tk.MustQuery("show warnings;").Check(testkit.RowsWithSep("|", "Note|1091|index i3 doesn't exist")) // Verify the impact of deletion order when dropping duplicate indexes. - tk.MustGetErrMsg( + tk.MustGetErrCode( "alter table test_drop_indexes_if_exists drop index i2, drop index i2;", - "[ddl:1091]index i2 doesn't exist", + errno.ErrUnsupportedDDLOperation, ) - tk.MustGetErrMsg( + tk.MustGetErrCode( "alter table test_drop_indexes_if_exists drop index if exists i2, drop index i2;", - "[ddl:1091]index i2 doesn't exist", + errno.ErrUnsupportedDDLOperation, + ) + tk.MustGetErrCode( + "alter table test_drop_indexes_if_exists drop index i2, drop index if exists i2;", + errno.ErrUnsupportedDDLOperation, ) - tk.MustExec("alter table test_drop_indexes_if_exists drop index i2, drop index if exists i2;") - tk.MustQuery("show warnings;").Check(testkit.RowsWithSep("|", "Note|1091|index i2 doesn't exist")) } func testDropIndexesFromPartitionedTable(t *testing.T, store kv.Storage) { @@ -910,10 +913,12 @@ func testDropIndexesFromPartitionedTable(t *testing.T, store kv.Storage) { } tk.MustExec("alter table test_drop_indexes_from_partitioned_table drop index i1, drop index if exists i2;") tk.MustExec("alter table test_drop_indexes_from_partitioned_table add index i1(c1)") - tk.MustExec("alter table test_drop_indexes_from_partitioned_table drop index i1, drop index if exists i1;") + tk.MustGetErrCode("alter table test_drop_indexes_from_partitioned_table drop index i1, drop index if exists i1;", + errno.ErrUnsupportedDDLOperation) tk.MustExec("alter table test_drop_indexes_from_partitioned_table drop column c1, drop column c2;") tk.MustExec("alter table test_drop_indexes_from_partitioned_table add column c1 int") - tk.MustExec("alter table test_drop_indexes_from_partitioned_table drop column c1, drop column if exists c1;") + tk.MustGetErrCode("alter table test_drop_indexes_from_partitioned_table drop column c1, drop column if exists c1;", + errno.ErrUnsupportedDDLOperation) } func TestDropPrimaryKey(t *testing.T) { diff --git a/ddl/integration_test.go b/ddl/integration_test.go index 34edd8dfe34d6..99ba587322f88 100644 --- a/ddl/integration_test.go +++ b/ddl/integration_test.go @@ -68,19 +68,19 @@ func TestDefaultValueInEnum(t *testing.T) { tk.MustExec("use test;") // The value 0x91 should not cause panic. tk.MustExec("create table t(a enum('a', 0x91) charset gbk);") - tk.MustExec("insert into t values (1), (2);") // Use 1-base index to locate the value. - tk.MustQuery("select a from t;").Check(testkit.Rows("a", "")) // 0x91 is truncate. + tk.MustExec("insert into t values (1), (2);") // Use 1-base index to locate the value. + tk.MustQuery("select a from t;").Check(testkit.Rows("a", "?")) // 0x91 is replaced to '?'. tk.MustExec("drop table t;") tk.MustExec("create table t (a enum('a', 0x91)) charset gbk;") // Test for table charset. tk.MustExec("insert into t values (1), (2);") - tk.MustQuery("select a from t;").Check(testkit.Rows("a", "")) + tk.MustQuery("select a from t;").Check(testkit.Rows("a", "?")) tk.MustExec("drop table t;") - tk.MustGetErrMsg("create table t(a set('a', 0x91, '') charset gbk);", - "[types:1291]Column 'a' has duplicated value '' in SET") - // Test valid utf-8 string value in enum. Note that the binary literal only can be decoded to utf-8. + tk.MustGetErrMsg("create table t(a set('a', 0x91, '?') charset gbk);", + "[types:1291]Column 'a' has duplicated value '?' in SET") + // Test valid utf-8 string value in enum. tk.MustExec("create table t (a enum('a', 0xE4BDA0E5A5BD) charset gbk);") tk.MustExec("insert into t values (1), (2);") - tk.MustQuery("select a from t;").Check(testkit.Rows("a", "你好")) + tk.MustQuery("select a from t;").Check(testkit.Rows("a", "浣犲ソ")) } func TestDDLStatementsBackFill(t *testing.T) { diff --git a/ddl/multi_schema_change.go b/ddl/multi_schema_change.go index bd0518404530a..20e62d89ed22b 100644 --- a/ddl/multi_schema_change.go +++ b/ddl/multi_schema_change.go @@ -68,8 +68,8 @@ func onMultiSchemaChange(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) (ve continue } proxyJob := sub.ToProxyJob(job) - ver, err = w.runDDLJob(d, t, proxyJob) - sub.FromProxyJob(proxyJob) + ver, err = w.runDDLJob(d, t, &proxyJob) + sub.FromProxyJob(&proxyJob) return ver, err } // The last rollback/cancelling sub-job is done. @@ -87,8 +87,8 @@ func onMultiSchemaChange(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) (ve continue } proxyJob := sub.ToProxyJob(job) - ver, err = w.runDDLJob(d, t, proxyJob) - sub.FromProxyJob(proxyJob) + ver, err = w.runDDLJob(d, t, &proxyJob) + sub.FromProxyJob(&proxyJob) handleRevertibleException(job, sub, proxyJob.Error) return ver, err } @@ -107,8 +107,8 @@ func onMultiSchemaChange(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) (ve } subJobs[i] = *sub proxyJob := sub.ToProxyJob(job) - ver, err = w.runDDLJob(d, t, proxyJob) - sub.FromProxyJob(proxyJob) + ver, err = w.runDDLJob(d, t, &proxyJob) + sub.FromProxyJob(&proxyJob) if err != nil || proxyJob.Error != nil { for j := i - 1; j >= 0; j-- { job.MultiSchemaInfo.SubJobs[j] = &subJobs[j] @@ -129,8 +129,8 @@ func onMultiSchemaChange(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) (ve continue } proxyJob := sub.ToProxyJob(job) - ver, err = w.runDDLJob(d, t, proxyJob) - sub.FromProxyJob(proxyJob) + ver, err = w.runDDLJob(d, t, &proxyJob) + sub.FromProxyJob(&proxyJob) return ver, err } job.State = model.JobStateDone @@ -183,6 +183,12 @@ func fillMultiSchemaInfo(info *model.MultiSchemaInfo, job *model.Job) (err error if pos != nil && pos.Tp == ast.ColumnPositionAfter { info.PositionColumns = append(info.PositionColumns, pos.RelativeColumn.Name) } + case model.ActionDropColumn: + colName := job.Args[0].(model.CIStr) + info.DropColumns = append(info.DropColumns, colName) + case model.ActionDropIndex, model.ActionDropPrimaryKey: + indexName := job.Args[0].(model.CIStr) + info.DropIndexes = append(info.DropIndexes, indexName) default: return dbterror.ErrRunMultiSchemaChanges } diff --git a/ddl/multi_schema_change_test.go b/ddl/multi_schema_change_test.go index 9e3cae966c43f..73f286a67efd1 100644 --- a/ddl/multi_schema_change_test.go +++ b/ddl/multi_schema_change_test.go @@ -15,10 +15,18 @@ package ddl_test import ( + "context" + "strconv" "testing" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/testkit" + "github.com/stretchr/testify/require" ) func TestMultiSchemaChangeAddColumns(t *testing.T) { @@ -26,7 +34,6 @@ func TestMultiSchemaChangeAddColumns(t *testing.T) { defer clean() tk := testkit.NewTestKit(t, store) tk.MustExec("use test") - tk.MustExec("set @@global.tidb_enable_change_multi_schema = 1") // Test add multiple columns in multiple specs. tk.MustExec("create table t (a int);") @@ -98,3 +105,321 @@ func TestMultiSchemaChangeAddColumns(t *testing.T) { tk.MustExec("insert into t values (1, 2);") tk.MustGetErrCode("alter table t modify column b double, add column c double as (a + b);", errno.ErrUnsupportedDDLOperation) } + +func TestMultiSchemaChangeAddColumnsCancelled(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + originHook := dom.DDL().GetHook() + + tk.MustExec("create table t (a int);") + tk.MustExec("insert into t values (1);") + hook := newCancelJobHook(store, dom, func(job *model.Job) bool { + // Cancel job when the column 'c' is in write-reorg. + return job.MultiSchemaInfo.SubJobs[1].SchemaState == model.StateWriteReorganization + }) + dom.DDL().SetHook(hook) + sql := "alter table t add column b int default 2, add column c int default 3, add column d int default 4;" + tk.MustGetErrCode(sql, errno.ErrCancelledDDLJob) + dom.DDL().SetHook(originHook) + hook.MustCancelDone(t) + tk.MustQuery("select * from t;").Check(testkit.Rows("1")) +} + +func TestMultiSchemaChangeAddColumnsParallel(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t (a int default 1);") + tk.MustExec("insert into t values ();") + putTheSameDDLJobTwice(t, func() { + tk.MustExec("alter table t add column if not exists b int default 2, " + + "add column if not exists c int default 3;") + tk.MustQuery("show warnings").Check(testkit.Rows( + "Note 1060 Duplicate column name 'b'", + "Note 1060 Duplicate column name 'c'", + )) + }) + tk.MustQuery("select * from t;").Check(testkit.Rows("1 2 3")) + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int);") + putTheSameDDLJobTwice(t, func() { + tk.MustGetErrCode("alter table t add column b int, add column c int;", errno.ErrDupFieldName) + }) +} + +func TestMultiSchemaChangeDropColumns(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test;") + + // Test drop all columns + tk.MustExec("create table t (a int, b int);") + tk.MustGetErrCode("alter table t drop column a, drop column b;", errno.ErrCantRemoveAllFields) + + // Test drop multiple columns in multiple specs + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int, b int, c int, d int, e int);") + tk.MustExec("insert into t values (1, 2, 3, 4, 5);") + tk.MustExec("alter table t drop column a, drop column d, drop column b;") + tk.MustQuery("select * from t;").Check(testkit.Rows("3 5")) + + // Test drop same column + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int default 1, c int default 4);") + tk.MustGetErrCode("alter table t drop column a, drop column a", errno.ErrUnsupportedDDLOperation) + + // Test drop if exists column. + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int default 1, b int default 2);") + tk.MustExec("insert into t values ();") + tk.MustExec("alter table t drop column if exists c, drop column a;") + tk.MustQuery("show warnings;").Check(testkit.Rows("Note 1091 Can't DROP 'c'; check that column/key exists")) + tk.MustQuery("select * from t;").Check(testkit.Rows("2")) + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int default 1, b int default 2, c int default 3);") + tk.MustExec("insert into t values ();") + tk.MustExec("alter table t drop column a, drop column if exists d, drop column c;") + tk.MustQuery("show warnings;").Check(testkit.Rows("Note 1091 Can't DROP 'd'; check that column/key exists")) + tk.MustQuery("select * from t;").Check(testkit.Rows("2")) +} + +func TestMultiSchemaChangeDropColumnsCancelled(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + originHook := dom.DDL().GetHook() + + // Test for cancelling the job in a middle state. + tk.MustExec("create table t (a int default 1, b int default 2, c int default 3, d int default 4);") + tk.MustExec("insert into t values ();") + hook := newCancelJobHook(store, dom, func(job *model.Job) bool { + // Cancel job when the column 'a' is in delete-reorg. + return job.MultiSchemaInfo.SubJobs[1].SchemaState == model.StateDeleteReorganization + }) + dom.DDL().SetHook(hook) + tk.MustExec("alter table t drop column b, drop column a, drop column d;") + dom.DDL().SetHook(originHook) + hook.MustCancelFailed(t) + tk.MustQuery("select * from t;").Check(testkit.Rows("3")) + + // Test for cancelling the job in public. + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int default 1, b int default 2, c int default 3, d int default 4);") + tk.MustExec("insert into t values ();") + hook = newCancelJobHook(store, dom, func(job *model.Job) bool { + // Cancel job when the column 'a' is in public. + return job.MultiSchemaInfo.SubJobs[1].SchemaState == model.StatePublic + }) + dom.DDL().SetHook(hook) + tk.MustGetErrCode("alter table t drop column b, drop column a, drop column d;", errno.ErrCancelledDDLJob) + dom.DDL().SetHook(originHook) + hook.MustCancelDone(t) + tk.MustQuery("select * from t;").Check(testkit.Rows("1 2 3 4")) +} + +func TestMultiSchemaChangeDropIndexedColumnsCancelled(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + originHook := dom.DDL().GetHook() + + // Test for cancelling the job in a middle state. + tk.MustExec("create table t (a int default 1, b int default 2, c int default 3, d int default 4, " + + "index(a), index(b), index(c), index(d));") + tk.MustExec("insert into t values ();") + hook := newCancelJobHook(store, dom, func(job *model.Job) bool { + // Cancel job when the column 'a' is in delete-reorg. + return job.MultiSchemaInfo.SubJobs[1].SchemaState == model.StateDeleteReorganization + }) + dom.DDL().SetHook(hook) + tk.MustExec("alter table t drop column b, drop column a, drop column d;") + dom.DDL().SetHook(originHook) + hook.MustCancelFailed(t) + tk.MustQuery("select * from t;").Check(testkit.Rows("3")) +} + +func TestMultiSchemaChangeDropColumnsParallel(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t (a int, b int, c int);") + putTheSameDDLJobTwice(t, func() { + tk.MustExec("alter table t drop column if exists b, drop column if exists c;") + tk.MustQuery("show warnings").Check(testkit.Rows( + "Note 1091 column b doesn't exist", + "Note 1091 column c doesn't exist")) + }) + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int, b int, c int);") + putTheSameDDLJobTwice(t, func() { + tk.MustGetErrCode("alter table t drop column b, drop column a;", errno.ErrCantDropFieldOrKey) + }) +} + +func TestMultiSchemaChangeAddDropColumns(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test;") + + // [a, b] -> [+c, -a, +d, -b] -> [c, d] + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int default 1, b int default 2);") + tk.MustExec("insert into t values ();") + tk.MustExec("alter table t add column c int default 3, drop column a, add column d int default 4, drop column b;") + tk.MustQuery("select * from t;").Check(testkit.Rows("3 4")) + + // [a, b] -> [-a, -b, +c, +d] -> [c, d] + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int default 1, b int default 2);") + tk.MustExec("insert into t values ();") + tk.MustExec("alter table t drop column a, drop column b, add column c int default 3, add column d int default 4;") + tk.MustQuery("select * from t;").Check(testkit.Rows("3 4")) + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int default 1, b int default 2);") + tk.MustExec("insert into t values ();") + tk.MustGetErrCode("alter table t add column c int default 3 after a, add column d int default 4 first, drop column a, drop column b;", errno.ErrUnsupportedDDLOperation) +} + +func TestMultiSchemaChangeDropIndexes(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test;") + + // Test drop same index. + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int, b int, c int, index t(a));") + tk.MustGetErrCode("alter table t drop index t, drop index t", errno.ErrUnsupportedDDLOperation) + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (id int, c1 int, c2 int, primary key(id) nonclustered, key i1(c1), key i2(c2), key i3(c1, c2));") + tk.MustExec("insert into t values (1, 2, 3);") + tk.MustExec("alter table t drop index i1, drop index i2;") + tk.MustGetErrCode("select * from t use index(i1);", errno.ErrKeyDoesNotExist) + tk.MustGetErrCode("select * from t use index(i2);", errno.ErrKeyDoesNotExist) + tk.MustExec("alter table t drop index i3, drop primary key;") + tk.MustGetErrCode("select * from t use index(primary);", errno.ErrKeyDoesNotExist) + tk.MustGetErrCode("select * from t use index(i3);", errno.ErrKeyDoesNotExist) + + // Test drop index with drop column. + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int default 1, b int default 2, c int default 3, index t(a))") + tk.MustExec("insert into t values ();") + tk.MustExec("alter table t drop index t, drop column a") + tk.MustGetErrCode("select * from t force index(t)", errno.ErrKeyDoesNotExist) +} + +func TestMultiSchemaChangeDropIndexesCancelled(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test;") + originHook := dom.DDL().GetHook() + + // Test for cancelling the job in a middle state. + tk.MustExec("create table t (a int, b int, index(a), unique index(b), index idx(a, b));") + hook := newCancelJobHook(store, dom, func(job *model.Job) bool { + return job.MultiSchemaInfo.SubJobs[1].SchemaState == model.StateDeleteOnly + }) + dom.DDL().SetHook(hook) + tk.MustExec("alter table t drop index a, drop index b, drop index idx;") + dom.DDL().SetHook(originHook) + hook.MustCancelFailed(t) + tk.MustGetErrCode("select * from t use index (a);", errno.ErrKeyDoesNotExist) + tk.MustGetErrCode("select * from t use index (b);", errno.ErrKeyDoesNotExist) + tk.MustGetErrCode("select * from t use index (idx);", errno.ErrKeyDoesNotExist) + + // Test for cancelling the job in none state. + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int, b int, index(a), unique index(b), index idx(a, b));") + hook = newCancelJobHook(store, dom, func(job *model.Job) bool { + return job.MultiSchemaInfo.SubJobs[1].SchemaState == model.StatePublic + }) + dom.DDL().SetHook(hook) + tk.MustGetErrCode("alter table t drop index a, drop index b, drop index idx;", errno.ErrCancelledDDLJob) + dom.DDL().SetHook(originHook) + hook.MustCancelDone(t) + tk.MustQuery("select * from t use index (a);").Check(testkit.Rows()) + tk.MustQuery("select * from t use index (b);").Check(testkit.Rows()) + tk.MustQuery("select * from t use index (idx);").Check(testkit.Rows()) +} + +func TestMultiSchemaChangeDropIndexesParallel(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t (a int, b int, c int, index(a), index(b), index(c));") + putTheSameDDLJobTwice(t, func() { + tk.MustExec("alter table t drop index if exists b, drop index if exists c;") + tk.MustQuery("show warnings").Check(testkit.Rows( + "Note 1091 index b doesn't exist", + "Note 1091 index c doesn't exist")) + }) + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int, b int, c int, index (a), index(b), index(c));") + putTheSameDDLJobTwice(t, func() { + tk.MustGetErrCode("alter table t drop index b, drop index a;", errno.ErrCantDropFieldOrKey) + }) +} + +type cancelOnceHook struct { + store kv.Storage + triggered bool + cancelErr error + pred func(job *model.Job) bool + + ddl.TestDDLCallback +} + +func (c *cancelOnceHook) OnJobUpdated(job *model.Job) { + if c.triggered || !c.pred(job) { + return + } + c.triggered = true + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + c.cancelErr = kv.RunInNewTxn(ctx, c.store, false, + func(ctx context.Context, txn kv.Transaction) error { + errs, err := ddl.CancelJobs(txn, []int64{job.ID}) + if errs[0] != nil { + return errs[0] + } + return err + }) +} + +func (c *cancelOnceHook) MustCancelDone(t *testing.T) { + require.True(t, c.triggered) + require.NoError(t, c.cancelErr) +} + +func (c *cancelOnceHook) MustCancelFailed(t *testing.T) { + require.True(t, c.triggered) + require.Contains(t, c.cancelErr.Error(), strconv.Itoa(errno.ErrCannotCancelDDLJob)) +} + +func newCancelJobHook(store kv.Storage, dom *domain.Domain, + pred func(job *model.Job) bool) *cancelOnceHook { + return &cancelOnceHook{ + store: store, + pred: pred, + TestDDLCallback: ddl.TestDDLCallback{Do: dom}, + } +} + +func putTheSameDDLJobTwice(t *testing.T, fn func()) { + err := failpoint.Enable("github.com/pingcap/tidb/ddl/mockParallelSameDDLJobTwice", `return(true)`) + require.NoError(t, err) + fn() + err = failpoint.Disable("github.com/pingcap/tidb/ddl/mockParallelSameDDLJobTwice") + require.NoError(t, err) +} diff --git a/ddl/placement/BUILD.bazel b/ddl/placement/BUILD.bazel index f9ece52b485fc..d5a8f75644611 100644 --- a/ddl/placement/BUILD.bazel +++ b/ddl/placement/BUILD.bazel @@ -18,6 +18,7 @@ go_library( "//util/codec", "@com_github_pingcap_failpoint//:failpoint", "@in_gopkg_yaml_v2//:yaml_v2", + "@org_golang_x_exp//slices", ], ) diff --git a/ddl/placement/bundle.go b/ddl/placement/bundle.go index 342ffbd4dac3a..5b53bb3758109 100644 --- a/ddl/placement/bundle.go +++ b/ddl/placement/bundle.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" + "golang.org/x/exp/slices" ) // Refer to https://github.com/tikv/pd/issues/2701 . @@ -161,7 +162,7 @@ func NewBundleFromSugarOptions(options *model.PlacementSettings) (*Bundle, error } // regions must include the primary - sort.Strings(regions) + slices.Sort(regions) primaryIndex := sort.SearchStrings(regions, primaryRegion) if primaryIndex >= len(regions) || regions[primaryIndex] != primaryRegion { return nil, fmt.Errorf("%w: primary region must be included in regions", ErrInvalidPlacementOptions) diff --git a/ddl/placement/meta_bundle_test.go b/ddl/placement/meta_bundle_test.go index f53599bd2e14c..4a2d6f645b136 100644 --- a/ddl/placement/meta_bundle_test.go +++ b/ddl/placement/meta_bundle_test.go @@ -128,7 +128,8 @@ func createMetaBundleSuite() *metaBundleSuite { } func (s *metaBundleSuite) prepareMeta(t *testing.T, store kv.Storage) { - err := kv.RunInNewTxn(context.TODO(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) require.NoError(t, m.CreatePolicy(s.policy1)) require.NoError(t, m.CreatePolicy(s.policy2)) @@ -145,7 +146,8 @@ func TestNewTableBundle(t *testing.T) { s := createMetaBundleSuite() s.prepareMeta(t, store) - require.NoError(t, kv.RunInNewTxn(context.TODO(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) // tbl1 @@ -180,7 +182,8 @@ func TestNewPartitionBundle(t *testing.T) { s := createMetaBundleSuite() s.prepareMeta(t, store) - require.NoError(t, kv.RunInNewTxn(context.TODO(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) // tbl1.par0 @@ -205,7 +208,8 @@ func TestNewPartitionListBundles(t *testing.T) { s := createMetaBundleSuite() s.prepareMeta(t, store) - require.NoError(t, kv.RunInNewTxn(context.TODO(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) bundles, err := placement.NewPartitionListBundles(m, s.tbl1.Partition.Definitions) @@ -236,7 +240,8 @@ func TestNewFullTableBundles(t *testing.T) { s := createMetaBundleSuite() s.prepareMeta(t, store) - require.NoError(t, kv.RunInNewTxn(context.TODO(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) bundles, err := placement.NewFullTableBundles(m, s.tbl1) diff --git a/ddl/placement_policy_ddl_test.go b/ddl/placement_policy_ddl_test.go index 6525ad6a2b092..48203b9c317f5 100644 --- a/ddl/placement_policy_ddl_test.go +++ b/ddl/placement_policy_ddl_test.go @@ -127,9 +127,10 @@ func TestPlacementPolicyInUse(t *testing.T) { require.NoError(t, err) is := builder.Build() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) for _, policy := range []*model.PolicyInfo{p1, p2, p4, p5} { require.True(t, dbterror.ErrPlacementPolicyInUse.Equal(ddl.CheckPlacementPolicyNotInUseFromInfoSchema(is, policy))) - require.NoError(t, kv.RunInNewTxn(context.Background(), sctx.GetStore(), false, func(ctx context.Context, txn kv.Transaction) error { + require.NoError(t, kv.RunInNewTxn(ctx, sctx.GetStore(), false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) require.True(t, dbterror.ErrPlacementPolicyInUse.Equal(ddl.CheckPlacementPolicyNotInUseFromMeta(m, policy))) return nil @@ -137,7 +138,7 @@ func TestPlacementPolicyInUse(t *testing.T) { } require.NoError(t, ddl.CheckPlacementPolicyNotInUseFromInfoSchema(is, p3)) - require.NoError(t, kv.RunInNewTxn(context.Background(), sctx.GetStore(), false, func(ctx context.Context, txn kv.Transaction) error { + require.NoError(t, kv.RunInNewTxn(ctx, sctx.GetStore(), false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) require.NoError(t, ddl.CheckPlacementPolicyNotInUseFromMeta(m, p3)) return nil diff --git a/ddl/placement_policy_test.go b/ddl/placement_policy_test.go index b4b71e76cd422..3c4c1b4cd3109 100644 --- a/ddl/placement_policy_test.go +++ b/ddl/placement_policy_test.go @@ -44,7 +44,8 @@ func checkExistTableBundlesInPD(t *testing.T, do *domain.Domain, dbName string, tblInfo, err := do.InfoSchema().TableByName(model.NewCIStr(dbName), model.NewCIStr(tbName)) require.NoError(t, err) - require.NoError(t, kv.RunInNewTxn(context.TODO(), do.Store(), false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, do.Store(), false, func(ctx context.Context, txn kv.Transaction) error { tt := meta.NewMeta(txn) checkTableBundlesInPD(t, do, tt, tblInfo.Meta()) return nil @@ -321,7 +322,8 @@ func testGetPolicyByIDFromMeta(t *testing.T, store kv.Storage, policyID int64) * policyInfo *model.PolicyInfo err error ) - err1 := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err1 := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) policyInfo, err = t.GetPolicy(policyID) if err != nil { @@ -870,7 +872,8 @@ func testGetPolicyByName(t *testing.T, ctx sessionctx.Context, name string, must func testGetPolicyDependency(storage kv.Storage, name string) []int64 { ids := make([]int64, 0, 32) - err1 := kv.RunInNewTxn(context.Background(), storage, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err1 := kv.RunInNewTxn(ctx, storage, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) dbs, err := t.ListDatabases() if err != nil { diff --git a/ddl/reorg.go b/ddl/reorg.go index b1437972976ed..f3c87213c684d 100644 --- a/ddl/reorg.go +++ b/ddl/reorg.go @@ -458,6 +458,8 @@ func (dc *ddlCtx) buildDescTableScan(ctx *JobContext, startTS uint64, tbl table. builder.Request.ResourceGroupTagger = ctx.getResourceGroupTaggerForTopSQL() builder.Request.NotFillCache = true builder.Request.Priority = kv.PriorityLow + builder.RequestSource.RequestSourceInternal = true + builder.RequestSource.RequestSourceType = ctx.ddlJobSourceType() kvReq, err := builder.Build() if err != nil { @@ -730,7 +732,8 @@ func (r *reorgInfo) UpdateReorgMeta(startKey kv.Key) error { return nil } - err := kv.RunInNewTxn(context.Background(), r.d.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, r.d.store, true, func(ctx context.Context, txn kv.Transaction) error { rh := newReorgHandler(meta.NewMeta(txn)) return errors.Trace(rh.UpdateDDLReorgHandle(r.Job, startKey, r.EndKey, r.PhysicalTableID, r.currElement)) }) diff --git a/ddl/reorg_test.go b/ddl/reorg_test.go index e365a0e2d1a7e..e6006f53a6f2d 100644 --- a/ddl/reorg_test.go +++ b/ddl/reorg_test.go @@ -35,7 +35,7 @@ func TestReorgOwner(t *testing.T) { d1 := domain.DDL() - ctx := testkit.NewTestKit(t, store).Session() + sctx := testkit.NewTestKit(t, store).Session() require.True(t, d1.OwnerManager().IsOwner()) @@ -60,23 +60,23 @@ func TestReorgOwner(t *testing.T) { dbInfo, err := testSchemaInfo(store, "test_reorg") require.NoError(t, err) - testCreateSchema(t, ctx, d1, dbInfo) + testCreateSchema(t, sctx, d1, dbInfo) tblInfo, err := testTableInfo(store, "t", 3) require.NoError(t, err) - testCreateTable(t, ctx, d1, dbInfo, tblInfo) + testCreateTable(t, sctx, d1, dbInfo, tblInfo) tbl, err := testGetTableWithError(store, dbInfo.ID, tblInfo.ID) require.NoError(t, err) num := 10 - ctx = testkit.NewTestKit(t, store).Session() - err = sessiontxn.NewTxn(context.Background(), ctx) + sctx = testkit.NewTestKit(t, store).Session() + err = sessiontxn.NewTxn(context.Background(), sctx) require.NoError(t, err) for i := 0; i < num; i++ { - _, err := tbl.AddRecord(ctx, types.MakeDatums(i, i, i)) + _, err := tbl.AddRecord(sctx, types.MakeDatums(i, i, i)) require.NoError(t, err) } - require.NoError(t, ctx.CommitTxn(context.Background())) + require.NoError(t, sctx.CommitTxn(context.Background())) tc := &ddl.TestDDLCallback{} tc.OnJobRunBeforeExported = func(job *model.Job) { @@ -88,9 +88,10 @@ func TestReorgOwner(t *testing.T) { d1.SetHook(tc) - testDropSchema(t, ctx, d1, dbInfo) + testDropSchema(t, sctx, d1, dbInfo) - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) db, err1 := m.GetDatabase(dbInfo.ID) require.NoError(t, err1) diff --git a/ddl/restart_test.go b/ddl/restart_test.go index fc4334a2e1b85..bbec12b40ca6f 100644 --- a/ddl/restart_test.go +++ b/ddl/restart_test.go @@ -287,7 +287,8 @@ func testTableInfo(d *ddl, name string, num int) (*model.TableInfo, error) { } func testCheckTableState(t *testing.T, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, state model.SchemaState) { - require.NoError(t, kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, d.store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) info, err := m.GetTable(dbInfo.ID, tblInfo.ID) require.NoError(t, err) diff --git a/ddl/rollingback.go b/ddl/rollingback.go index b1ea2da7c7592..e13a73742bc89 100644 --- a/ddl/rollingback.go +++ b/ddl/rollingback.go @@ -60,8 +60,8 @@ func convertAddIdxJob2RollbackJob(d *ddlCtx, t *meta.Meta, job *model.Job, tblIn } } - // the second args will be used in onDropIndex. - job.Args = []interface{}{indexInfo.Name, getPartitionIDs(tblInfo)} + // the second and the third args will be used in onDropIndex. + job.Args = []interface{}{indexInfo.Name, false /* ifExists */, getPartitionIDs(tblInfo)} // If add index job rollbacks in write reorganization state, its need to delete all keys which has been added. // Its work is the same as drop index job do. // The write reorganization state in add index job that likes write only state in drop index job. @@ -175,36 +175,8 @@ func rollingbackAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, e return ver, dbterror.ErrCancelledDDLJob } -func rollingbackAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { - tblInfo, columnInfos, _, _, _, _, err := checkAddColumns(t, job) - if err != nil { - return ver, errors.Trace(err) - } - if len(columnInfos) == 0 { - job.State = model.JobStateCancelled - return ver, dbterror.ErrCancelledDDLJob - } - - colNames := make([]model.CIStr, len(columnInfos)) - originalState := columnInfos[0].State - for i, columnInfo := range columnInfos { - columnInfos[i].State = model.StateDeleteOnly - colNames[i] = columnInfo.Name - } - ifExists := make([]bool, len(columnInfos)) - - job.SchemaState = model.StateDeleteOnly - job.Args = []interface{}{colNames, ifExists} - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != columnInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - job.State = model.JobStateRollingback - return ver, dbterror.ErrCancelledDDLJob -} - func rollingbackDropColumn(t *meta.Meta, job *model.Job) (ver int64, err error) { - _, colInfo, idxInfos, err := checkDropColumn(t, job) + _, colInfo, idxInfos, _, err := checkDropColumn(t, job) if err != nil { return ver, errors.Trace(err) } @@ -235,40 +207,8 @@ func rollingbackDropColumn(t *meta.Meta, job *model.Job) (ver int64, err error) return ver, nil } -func rollingbackDropColumns(t *meta.Meta, job *model.Job) (ver int64, err error) { - _, colInfos, _, idxInfos, err := checkDropColumns(t, job) - if err != nil { - return ver, errors.Trace(err) - } - - for _, indexInfo := range idxInfos { - switch indexInfo.State { - case model.StateWriteOnly, model.StateDeleteOnly, model.StateDeleteReorganization, model.StateNone: - // We can not rollback now, so just continue to drop index. - // In function isJobRollbackable will let job rollback when state is StateNone. - // When there is no index related to the drop columns job it is OK, but when there has indices, we should - // make sure the job is not rollback. - job.State = model.JobStateRunning - return ver, nil - case model.StatePublic: - default: - return ver, dbterror.ErrInvalidDDLState.GenWithStackByArgs("index", indexInfo.State) - } - } - - // StatePublic means when the job is not running yet. - if colInfos[0].State == model.StatePublic { - job.State = model.JobStateCancelled - return ver, dbterror.ErrCancelledDDLJob - } - // In the state of drop columns `write only -> delete only -> reorganization`, - // We can not rollback now, so just continue to drop columns. - job.State = model.JobStateRunning - return ver, nil -} - func rollingbackDropIndex(t *meta.Meta, job *model.Job) (ver int64, err error) { - _, indexInfo, err := checkDropIndex(t, job) + _, indexInfo, _, err := checkDropIndex(t, job) if err != nil { return ver, errors.Trace(err) } @@ -287,43 +227,6 @@ func rollingbackDropIndex(t *meta.Meta, job *model.Job) (ver int64, err error) { } } -func rollingbackDropIndexes(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { - tblInfo, indexNames, ifExists, err := getSchemaInfos(t, job) - if err != nil { - return ver, errors.Trace(err) - } - - indexInfos, err := checkDropIndexes(tblInfo, job, indexNames, ifExists) - if err != nil { - return ver, errors.Trace(err) - } - - indexInfo := indexInfos[0] - originalState := indexInfo.State - switch indexInfo.State { - case model.StateWriteOnly, model.StateDeleteOnly, model.StateDeleteReorganization, model.StateNone: - // We can not rollback now, so just continue to drop index. - // Normally won't fetch here, because there is a check when canceling DDL jobs. See function: IsRollbackable. - job.State = model.JobStateRunning - return ver, nil - case model.StatePublic: - job.State = model.JobStateRollbackDone - for _, indexInfo := range indexInfos { - indexInfo.State = model.StatePublic - } - default: - return ver, dbterror.ErrInvalidDDLState.GenWithStackByArgs("index", indexInfo.State) - } - - job.SchemaState = indexInfo.State - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != indexInfo.State) - if err != nil { - return ver, errors.Trace(err) - } - job.FinishTableJob(model.JobStateRollbackDone, model.StatePublic, ver, tblInfo) - return ver, dbterror.ErrCancelledDDLJob -} - func rollingbackAddIndex(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, isPK bool) (ver int64, err error) { // If the value of SnapshotVer isn't zero, it means the work is backfilling the indexes. if job.SchemaState == model.StateWriteReorganization && job.SnapshotVer != 0 { @@ -444,8 +347,6 @@ func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) switch job.Type { case model.ActionAddColumn: ver, err = rollingbackAddColumn(d, t, job) - case model.ActionAddColumns: - ver, err = rollingbackAddColumns(d, t, job) case model.ActionAddIndex: ver, err = rollingbackAddIndex(w, d, t, job, false) case model.ActionAddPrimaryKey: @@ -454,12 +355,8 @@ func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) ver, err = rollingbackAddTablePartition(d, t, job) case model.ActionDropColumn: ver, err = rollingbackDropColumn(t, job) - case model.ActionDropColumns: - ver, err = rollingbackDropColumns(t, job) case model.ActionDropIndex, model.ActionDropPrimaryKey: ver, err = rollingbackDropIndex(t, job) - case model.ActionDropIndexes: - ver, err = rollingbackDropIndexes(d, t, job) case model.ActionDropTable, model.ActionDropView, model.ActionDropSequence: err = rollingbackDropTableOrView(t, job) case model.ActionDropTablePartition: diff --git a/ddl/sanity_check.go b/ddl/sanity_check.go index ab7b01bd89132..9f0f540b20793 100644 --- a/ddl/sanity_check.go +++ b/ddl/sanity_check.go @@ -20,53 +20,51 @@ import ( "fmt" "strings" + "github.com/pingcap/errors" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/mathutil" "github.com/pingcap/tidb/util/sqlexec" + "go.uber.org/zap" ) -func checkRangeCntByTableIDs(physicalTableIDs []int64, cnt int64) { - if len(physicalTableIDs) > 0 { - if len(physicalTableIDs) != int(cnt) { - panic("should not happened" + fmt.Sprintf("expect count: %d, real count: %d", len(physicalTableIDs), cnt)) +func (d *ddl) checkDeleteRangeCnt(job *model.Job) { + actualCnt, err := queryDeleteRangeCnt(d.sessPool, job.ID) + if err != nil { + if strings.Contains(err.Error(), "Not Supported") { + return // For mock session, we don't support executing SQLs. } - } else if cnt != 1 { - panic("should not happened" + fmt.Sprintf("expect count: %d, real count: %d", 1, cnt)) - } -} - -func checkRangeCntByTableIDsAndIndexIDs(partitionTableIDs []int64, indexIDs []int64, cnt int64) { - if len(indexIDs) == 0 { - return + logutil.BgLogger().Error("query delete range count failed", zap.Error(err)) + panic(err) } - expectedCnt := len(indexIDs) - if len(partitionTableIDs) > 0 { - expectedCnt *= len(partitionTableIDs) + expectedCnt, err := expectedDeleteRangeCnt(job) + if err != nil { + logutil.BgLogger().Error("decode job's delete range count failed", zap.Error(err)) + panic(err) } - if expectedCnt != int(cnt) { - panic("should not happened" + fmt.Sprintf("expect count: %d, real count: %d", expectedCnt, cnt)) + if actualCnt != expectedCnt { + panic(fmt.Sprintf("expect delete range count %d, actual count %d", expectedCnt, actualCnt)) } } -func (d *ddl) checkDeleteRangeCnt(job *model.Job) { - sctx, _ := d.sessPool.get() +func queryDeleteRangeCnt(sessPool *sessionPool, jobID int64) (int, error) { + sctx, _ := sessPool.get() s, _ := sctx.(sqlexec.SQLExecutor) defer func() { - d.sessPool.put(sctx) + sessPool.put(sctx) }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) query := `select sum(cnt) from (select count(1) cnt from mysql.gc_delete_range where job_id = %? union all select count(1) cnt from mysql.gc_delete_range_done where job_id = %?) as gdr;` - rs, err := s.ExecuteInternal(context.TODO(), query, job.ID, job.ID) + rs, err := s.ExecuteInternal(ctx, query, jobID, jobID) if err != nil { - if strings.Contains(err.Error(), "Not Supported") { - return - } - panic(err) + return 0, errors.Trace(err) } defer func() { _ = rs.Close() @@ -74,82 +72,82 @@ func (d *ddl) checkDeleteRangeCnt(job *model.Job) { req := rs.NewChunk(nil) err = rs.Next(context.TODO(), req) if err != nil { - panic("should not happened, err:" + err.Error()) + return 0, errors.Trace(err) } cnt, _ := req.GetRow(0).GetMyDecimal(0).ToInt() + return int(cnt), nil +} +func expectedDeleteRangeCnt(job *model.Job) (int, error) { switch job.Type { case model.ActionDropSchema: var tableIDs []int64 if err := job.DecodeArgs(&tableIDs); err != nil { - panic("should not happened") - } - if len(tableIDs) != int(cnt) { - panic("should not happened" + fmt.Sprintf("expect count: %d, real count: %d", len(tableIDs), cnt)) + return 0, errors.Trace(err) } + return len(tableIDs), nil case model.ActionDropTable, model.ActionTruncateTable: var startKey kv.Key var physicalTableIDs []int64 var ruleIDs []string if err := job.DecodeArgs(&startKey, &physicalTableIDs, &ruleIDs); err != nil { - panic("Error in drop/truncate table, please report a bug with this stack trace and how it happened") + return 0, errors.Trace(err) } - checkRangeCntByTableIDs(physicalTableIDs, cnt) + return mathutil.Max(len(physicalTableIDs), 1), nil case model.ActionDropTablePartition, model.ActionTruncateTablePartition: var physicalTableIDs []int64 if err := job.DecodeArgs(&physicalTableIDs); err != nil { - panic("should not happened") - } - if len(physicalTableIDs) != int(cnt) { - panic("should not happened" + fmt.Sprintf("expect count: %d, real count: %d", len(physicalTableIDs), cnt)) + return 0, errors.Trace(err) } + return len(physicalTableIDs), nil case model.ActionAddIndex, model.ActionAddPrimaryKey: var indexID int64 + var ifExists bool var partitionIDs []int64 - if err := job.DecodeArgs(&indexID, &partitionIDs); err != nil { - panic("should not happened") + if err := job.DecodeArgs(&indexID, &ifExists, &partitionIDs); err != nil { + return 0, errors.Trace(err) } - checkRangeCntByTableIDs(partitionIDs, cnt) + return mathutil.Max(len(partitionIDs), 1), nil case model.ActionDropIndex, model.ActionDropPrimaryKey: var indexName interface{} + var ifNotExists bool var indexID int64 var partitionIDs []int64 - if err := job.DecodeArgs(&indexName, &indexID, &partitionIDs); err != nil { - panic("should not happened") - } - checkRangeCntByTableIDsAndIndexIDs(partitionIDs, []int64{indexID}, cnt) - case model.ActionDropIndexes: - var indexIDs []int64 - var partitionIDs []int64 - if err := job.DecodeArgs(&[]model.CIStr{}, &[]bool{}, &indexIDs, &partitionIDs); err != nil { - panic("should not happened") + if err := job.DecodeArgs(&indexName, &ifNotExists, &indexID, &partitionIDs); err != nil { + return 0, errors.Trace(err) } - checkRangeCntByTableIDsAndIndexIDs(partitionIDs, indexIDs, cnt) + return mathutil.Max(len(partitionIDs), 1), nil case model.ActionDropColumn: var colName model.CIStr + var ifExists bool var indexIDs []int64 var partitionIDs []int64 - if err := job.DecodeArgs(&colName, &indexIDs, &partitionIDs); err != nil { - panic("should not happened") - } - checkRangeCntByTableIDsAndIndexIDs(partitionIDs, indexIDs, cnt) - case model.ActionDropColumns: - var colNames []model.CIStr - var ifExists []bool - var indexIDs []int64 - var partitionIDs []int64 - if err := job.DecodeArgs(&colNames, &ifExists, &indexIDs, &partitionIDs); err != nil { - panic("should not happened") + if err := job.DecodeArgs(&colName, &ifExists, &indexIDs, &partitionIDs); err != nil { + return 0, errors.Trace(err) } - checkRangeCntByTableIDsAndIndexIDs(partitionIDs, indexIDs, cnt) + physicalCnt := mathutil.Max(len(partitionIDs), 1) + return physicalCnt * len(indexIDs), nil case model.ActionModifyColumn: var indexIDs []int64 var partitionIDs []int64 if err := job.DecodeArgs(&indexIDs, &partitionIDs); err != nil { - panic("should not happened") + return 0, errors.Trace(err) + } + physicalCnt := mathutil.Max(len(partitionIDs), 1) + return physicalCnt * len(indexIDs), nil + case model.ActionMultiSchemaChange: + totalExpectedCnt := 0 + for _, sub := range job.MultiSchemaInfo.SubJobs { + p := sub.ToProxyJob(job) + cnt, err := expectedDeleteRangeCnt(&p) + if err != nil { + return 0, err + } + totalExpectedCnt += cnt } - checkRangeCntByTableIDsAndIndexIDs(partitionIDs, indexIDs, cnt) + return totalExpectedCnt, nil } + return 0, nil } // checkHistoryJobInTest does some sanity check to make sure something is correct after DDL complete. diff --git a/ddl/schema_test.go b/ddl/schema_test.go index 58e7996401f0b..cdb071d5e0daf 100644 --- a/ddl/schema_test.go +++ b/ddl/schema_test.go @@ -55,7 +55,8 @@ func testCreateTable(t *testing.T, ctx sessionctx.Context, d ddl.DDL, dbInfo *mo } func testCheckTableState(t *testing.T, store kv.Storage, dbInfo *model.DBInfo, tblInfo *model.TableInfo, state model.SchemaState) { - require.NoError(t, kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) info, err := m.GetTable(dbInfo.ID, tblInfo.ID) require.NoError(t, err) @@ -105,7 +106,8 @@ func testTableInfo(store kv.Storage, name string, num int) (*model.TableInfo, er func genGlobalIDs(store kv.Storage, count int) ([]int64, error) { var ret []int64 - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) var err error ret, err = m.GenGlobalIDs(count) @@ -175,8 +177,9 @@ func isDDLJobDone(test *testing.T, t *meta.Meta) bool { func testCheckSchemaState(test *testing.T, store kv.Storage, dbInfo *model.DBInfo, state model.SchemaState) { isDropped := true + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) for { - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) info, err := t.GetDatabase(dbInfo.ID) require.NoError(test, err) diff --git a/ddl/serial_test.go b/ddl/serial_test.go index f1359854f7b6e..7d5c2c55a2d89 100644 --- a/ddl/serial_test.go +++ b/ddl/serial_test.go @@ -28,7 +28,6 @@ import ( "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/ddl/util" - ddlutil "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/infoschema" @@ -726,7 +725,7 @@ func TestCancelJobByErrorCountLimit(t *testing.T) { limit := variable.GetDDLErrorCountLimit() tk.MustExec("set @@global.tidb_ddl_error_count_limit = 16") - err := ddlutil.LoadDDLVars(tk.Session()) + err := util.LoadDDLVars(tk.Session()) require.NoError(t, err) defer tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_error_count_limit = %d", limit)) @@ -744,7 +743,7 @@ func TestTruncateTableUpdateSchemaVersionErr(t *testing.T) { limit := variable.GetDDLErrorCountLimit() tk.MustExec("set @@global.tidb_ddl_error_count_limit = 5") - err := ddlutil.LoadDDLVars(tk.Session()) + err := util.LoadDDLVars(tk.Session()) require.NoError(t, err) defer tk.MustExec(fmt.Sprintf("set @@global.tidb_ddl_error_count_limit = %d", limit)) @@ -767,7 +766,8 @@ func TestCanceledJobTakeTime(t *testing.T) { once := sync.Once{} hook.OnJobUpdatedExported = func(job *model.Job) { once.Do(func() { - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err := m.GetAutoIDAccessors(job.SchemaID, job.TableID).Del() if err != nil { diff --git a/ddl/split_region.go b/ddl/split_region.go index 87b408df2c3ce..e1ae191812fd1 100644 --- a/ddl/split_region.go +++ b/ddl/split_region.go @@ -32,6 +32,7 @@ func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, regionIDs := make([]uint64, 0, len(pi.Definitions)) ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) defer cancel() + ctxWithTimeout = kv.WithInternalSourceType(ctxWithTimeout, kv.InternalTxnDDL) if shardingBits(tbInfo) > 0 && tbInfo.PreSplitRegions > 0 { for _, def := range pi.Definitions { regionIDs = append(regionIDs, preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, def.ID, scatter)...) @@ -49,6 +50,7 @@ func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, scatter bool) { ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) defer cancel() + ctxWithTimeout = kv.WithInternalSourceType(ctxWithTimeout, kv.InternalTxnDDL) var regionIDs []uint64 if shardingBits(tbInfo) > 0 && tbInfo.PreSplitRegions > 0 { regionIDs = preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, tbInfo.ID, scatter) diff --git a/ddl/stat.go b/ddl/stat.go index 3105ec7d66c17..24462f9bb141a 100644 --- a/ddl/stat.go +++ b/ddl/stat.go @@ -15,10 +15,7 @@ package ddl import ( - "context" - "github.com/pingcap/errors" - "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx/variable" ) @@ -51,14 +48,12 @@ func (d *ddl) Stats(vars *variable.SessionVars) (map[string]interface{}, error) m[serverID] = d.uuid var ddlInfo *Info - err := kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { - var err1 error - ddlInfo, err1 = GetDDLInfo(txn) - if err1 != nil { - return errors.Trace(err1) - } - return errors.Trace(err1) - }) + s, err := d.sessPool.get() + if err != nil { + return nil, errors.Trace(err) + } + defer d.sessPool.put(s) + ddlInfo, err = GetDDLInfoWithNewTxn(s) if err != nil { return nil, errors.Trace(err) } diff --git a/ddl/stat_test.go b/ddl/stat_test.go index ca91366e4f0d6..6b40869a18ab9 100644 --- a/ddl/stat_test.go +++ b/ddl/stat_test.go @@ -20,6 +20,8 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" @@ -86,6 +88,66 @@ func TestDDLStatsInfo(t *testing.T) { } } +func TestGetDDLInfo(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + sess := testkit.NewTestKit(t, store).Session() + _, err := sess.Execute(context.Background(), "begin") + require.NoError(t, err) + txn, err := sess.Txn(true) + require.NoError(t, err) + + dbInfo2 := &model.DBInfo{ + ID: 2, + Name: model.NewCIStr("b"), + State: model.StateNone, + } + job := &model.Job{ + ID: 1, + SchemaID: dbInfo2.ID, + Type: model.ActionCreateSchema, + RowCount: 0, + } + job1 := &model.Job{ + ID: 2, + SchemaID: dbInfo2.ID, + Type: model.ActionAddIndex, + RowCount: 0, + } + + err = addDDLJobs(txn, job) + require.NoError(t, err) + + info, err := ddl.GetDDLInfo(sess) + require.NoError(t, err) + require.Len(t, info.Jobs, 1) + require.Equal(t, job, info.Jobs[0]) + require.Nil(t, info.ReorgHandle) + + // two jobs + err = addDDLJobs(txn, job1) + require.NoError(t, err) + + info, err = ddl.GetDDLInfo(sess) + require.NoError(t, err) + require.Len(t, info.Jobs, 2) + require.Equal(t, job, info.Jobs[0]) + require.Equal(t, job1, info.Jobs[1]) + require.Nil(t, info.ReorgHandle) + + _, err = sess.Execute(context.Background(), "rollback") + require.NoError(t, err) +} + +func addDDLJobs(txn kv.Transaction, job *model.Job) error { + m := meta.NewMeta(txn) + if job.MayNeedReorg() { + return m.EnQueueDDLJob(job, meta.AddIndexJobListKey) + } + return m.EnQueueDDLJob(job) +} + func buildCreateIdxJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job { return &model.Job{ SchemaID: dbInfo.ID, diff --git a/ddl/table_split_test.go b/ddl/table_split_test.go index 299abe09fb686..ad944215a6550 100644 --- a/ddl/table_split_test.go +++ b/ddl/table_split_test.go @@ -26,10 +26,8 @@ import ( "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/testkit" - - "github.com/tikv/client-go/v2/tikv" - "github.com/stretchr/testify/require" + "github.com/tikv/client-go/v2/tikv" ) func TestTableSplit(t *testing.T) { diff --git a/ddl/table_test.go b/ddl/table_test.go index d3c9480a92470..0a1dff690f5b2 100644 --- a/ddl/table_test.go +++ b/ddl/table_test.go @@ -97,7 +97,8 @@ func testLockTable(t *testing.T, ctx sessionctx.Context, d ddl.DDL, newSchemaID } func checkTableLockedTest(t *testing.T, store kv.Storage, dbInfo *model.DBInfo, tblInfo *model.TableInfo, serverID string, sessionID uint64, lockTp model.TableLockType) { - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { tt := meta.NewMeta(txn) info, err := tt.GetTable(dbInfo.ID, tblInfo.ID) require.NoError(t, err) @@ -138,7 +139,8 @@ func testTruncateTable(t *testing.T, ctx sessionctx.Context, store kv.Storage, d func testGetTableWithError(store kv.Storage, schemaID, tableID int64) (table.Table, error) { var tblInfo *model.TableInfo - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) var err1 error tblInfo, err1 = t.GetTable(schemaID, tableID) @@ -233,7 +235,8 @@ func TestTable(t *testing.T) { } func checkTableCacheTest(t *testing.T, store kv.Storage, dbInfo *model.DBInfo, tblInfo *model.TableInfo) { - require.NoError(t, kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { tt := meta.NewMeta(txn) info, err := tt.GetTable(dbInfo.ID, tblInfo.ID) require.NoError(t, err) @@ -245,7 +248,8 @@ func checkTableCacheTest(t *testing.T, store kv.Storage, dbInfo *model.DBInfo, t } func checkTableNoCacheTest(t *testing.T, store kv.Storage, dbInfo *model.DBInfo, tblInfo *model.TableInfo) { - require.NoError(t, kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { tt := meta.NewMeta(txn) info, err := tt.GetTable(dbInfo.ID, tblInfo.ID) require.NoError(t, err) diff --git a/ddl/tiflash_replica_test.go b/ddl/tiflash_replica_test.go index f0ccad6e73aa9..10727aed4abac 100644 --- a/ddl/tiflash_replica_test.go +++ b/ddl/tiflash_replica_test.go @@ -247,7 +247,6 @@ func TestCreateTableWithLike2(t *testing.T) { var onceChecker sync.Map hook.OnJobRunBeforeExported = func(job *model.Job) { if job.Type != model.ActionAddColumn && job.Type != model.ActionDropColumn && - job.Type != model.ActionAddColumns && job.Type != model.ActionDropColumns && job.Type != model.ActionAddIndex && job.Type != model.ActionDropIndex { return } @@ -368,7 +367,8 @@ func TestTruncateTable2(t *testing.T) { tablePrefix := tablecodec.EncodeTablePrefix(oldTblID) hasOldTableData := true for i := 0; i < waitForCleanDataRound; i++ { - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { it, err1 := txn.Iter(tablePrefix, nil) if err1 != nil { return err1 diff --git a/ddl/util/util.go b/ddl/util/util.go index d4836a187f832..386b8d71f7e78 100644 --- a/ddl/util/util.go +++ b/ddl/util/util.go @@ -56,17 +56,17 @@ func (t DelRangeTask) Range() (kv.Key, kv.Key) { } // LoadDeleteRanges loads delete range tasks from gc_delete_range table. -func LoadDeleteRanges(ctx sessionctx.Context, safePoint uint64) (ranges []DelRangeTask, _ error) { - return loadDeleteRangesFromTable(ctx, deleteRangesTable, safePoint) +func LoadDeleteRanges(ctx context.Context, sctx sessionctx.Context, safePoint uint64) (ranges []DelRangeTask, _ error) { + return loadDeleteRangesFromTable(ctx, sctx, deleteRangesTable, safePoint) } // LoadDoneDeleteRanges loads deleted ranges from gc_delete_range_done table. -func LoadDoneDeleteRanges(ctx sessionctx.Context, safePoint uint64) (ranges []DelRangeTask, _ error) { - return loadDeleteRangesFromTable(ctx, doneDeleteRangesTable, safePoint) +func LoadDoneDeleteRanges(ctx context.Context, sctx sessionctx.Context, safePoint uint64) (ranges []DelRangeTask, _ error) { + return loadDeleteRangesFromTable(ctx, sctx, doneDeleteRangesTable, safePoint) } -func loadDeleteRangesFromTable(ctx sessionctx.Context, table string, safePoint uint64) (ranges []DelRangeTask, _ error) { - rs, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), loadDeleteRangeSQL, table, safePoint) +func loadDeleteRangesFromTable(ctx context.Context, sctx sessionctx.Context, table string, safePoint uint64) (ranges []DelRangeTask, _ error) { + rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, loadDeleteRangeSQL, table, safePoint) if rs != nil { defer terror.Call(rs.Close) } @@ -106,28 +106,30 @@ func loadDeleteRangesFromTable(ctx sessionctx.Context, table string, safePoint u } // CompleteDeleteRange moves a record from gc_delete_range table to gc_delete_range_done table. -func CompleteDeleteRange(ctx sessionctx.Context, dr DelRangeTask) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "BEGIN") +func CompleteDeleteRange(sctx sessionctx.Context, dr DelRangeTask) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "BEGIN") if err != nil { return errors.Trace(err) } - _, err = ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), recordDoneDeletedRangeSQL, dr.JobID, dr.ElementID) + _, err = sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, recordDoneDeletedRangeSQL, dr.JobID, dr.ElementID) if err != nil { return errors.Trace(err) } - err = RemoveFromGCDeleteRange(ctx, dr.JobID, dr.ElementID) + err = RemoveFromGCDeleteRange(sctx, dr.JobID, dr.ElementID) if err != nil { return errors.Trace(err) } - _, err = ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "COMMIT") + _, err = sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "COMMIT") return errors.Trace(err) } // RemoveFromGCDeleteRange is exported for ddl pkg to use. -func RemoveFromGCDeleteRange(ctx sessionctx.Context, jobID, elementID int64) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), completeDeleteRangeSQL, jobID, elementID) +func RemoveFromGCDeleteRange(sctx sessionctx.Context, jobID, elementID int64) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, completeDeleteRangeSQL, jobID, elementID) return errors.Trace(err) } @@ -150,16 +152,18 @@ func RemoveMultiFromGCDeleteRange(ctx context.Context, sctx sessionctx.Context, } // DeleteDoneRecord removes a record from gc_delete_range_done table. -func DeleteDoneRecord(ctx sessionctx.Context, dr DelRangeTask) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), deleteDoneRecordSQL, dr.JobID, dr.ElementID) +func DeleteDoneRecord(sctx sessionctx.Context, dr DelRangeTask) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, deleteDoneRecordSQL, dr.JobID, dr.ElementID) return errors.Trace(err) } // UpdateDeleteRange is only for emulator. -func UpdateDeleteRange(ctx sessionctx.Context, dr DelRangeTask, newStartKey, oldStartKey kv.Key) error { +func UpdateDeleteRange(sctx sessionctx.Context, dr DelRangeTask, newStartKey, oldStartKey kv.Key) error { newStartKeyHex := hex.EncodeToString(newStartKey) oldStartKeyHex := hex.EncodeToString(oldStartKey) - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), updateDeleteRangeSQL, newStartKeyHex, dr.JobID, dr.ElementID, oldStartKeyHex) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, updateDeleteRangeSQL, newStartKeyHex, dr.JobID, dr.ElementID, oldStartKeyHex) return errors.Trace(err) } @@ -177,6 +181,7 @@ func LoadDDLVars(ctx sessionctx.Context) error { // LoadGlobalVars loads global variable from mysql.global_variables. func LoadGlobalVars(ctx context.Context, sctx sessionctx.Context, varNames []string) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnDDL) if e, ok := sctx.(sqlexec.RestrictedSQLExecutor); ok { var buf strings.Builder buf.WriteString(loadGlobalVars) diff --git a/distsql/distsql.go b/distsql/distsql.go index 6a502b58478b6..9bc9b9cc323d5 100644 --- a/distsql/distsql.go +++ b/distsql/distsql.go @@ -154,6 +154,8 @@ func SelectWithRuntimeStats(ctx context.Context, sctx sessionctx.Context, kvReq func Analyze(ctx context.Context, client kv.Client, kvReq *kv.Request, vars interface{}, isRestrict bool, stmtCtx *stmtctx.StatementContext) (SelectResult, error) { ctx = WithSQLKvExecCounterInterceptor(ctx, stmtCtx) + kvReq.RequestSource.RequestSourceInternal = true + kvReq.RequestSource.RequestSourceType = kv.InternalTxnStats resp := client.Send(ctx, kvReq, vars, &kv.ClientSendOption{}) if resp == nil { return nil, errors.New("client returns nil response") diff --git a/distsql/request_builder.go b/distsql/request_builder.go index c4840ca8741a3..2bd7c5df65c04 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -267,6 +267,8 @@ func (builder *RequestBuilder) SetFromSessionVars(sv *variable.SessionVars) *Req if sv.EnablePaging { builder.SetPaging(true) } + builder.RequestSource.RequestSourceInternal = sv.InRestrictedSQL + builder.RequestSource.RequestSourceType = sv.RequestSourceType return builder } @@ -309,15 +311,8 @@ func (builder *RequestBuilder) SetResourceGroupTagger(tagger tikvrpc.ResourceGro } func (builder *RequestBuilder) verifyTxnScope() error { - // Stale Read uses the calculated TSO for the read, - // so there is no need to check the TxnScope here. - if builder.IsStaleness { - return nil - } - if builder.ReadReplicaScope == "" { - builder.ReadReplicaScope = kv.GlobalReplicaScope - } - if builder.ReadReplicaScope == kv.GlobalReplicaScope || builder.is == nil { + txnScope := builder.TxnScope + if txnScope == "" || txnScope == kv.GlobalReplicaScope || builder.is == nil { return nil } visitPhysicalTableID := make(map[int64]struct{}) @@ -331,7 +326,7 @@ func (builder *RequestBuilder) verifyTxnScope() error { } for phyTableID := range visitPhysicalTableID { - valid := VerifyTxnScope(builder.ReadReplicaScope, phyTableID, builder.is) + valid := VerifyTxnScope(txnScope, phyTableID, builder.is) if !valid { var tblName string var partName string @@ -343,10 +338,10 @@ func (builder *RequestBuilder) verifyTxnScope() error { tblInfo, _ = builder.is.TableByID(phyTableID) tblName = tblInfo.Meta().Name.String() } - err := fmt.Errorf("table %v can not be read by %v txn_scope", tblName, builder.ReadReplicaScope) + err := fmt.Errorf("table %v can not be read by %v txn_scope", tblName, txnScope) if len(partName) > 0 { err = fmt.Errorf("table %v's partition %v can not be read by %v txn_scope", - tblName, partName, builder.ReadReplicaScope) + tblName, partName, txnScope) } return err } @@ -354,6 +349,12 @@ func (builder *RequestBuilder) verifyTxnScope() error { return nil } +// SetTxnScope sets request TxnScope +func (builder *RequestBuilder) SetTxnScope(scope string) *RequestBuilder { + builder.TxnScope = scope + return builder +} + // SetReadReplicaScope sets request readReplicaScope func (builder *RequestBuilder) SetReadReplicaScope(scope string) *RequestBuilder { builder.ReadReplicaScope = scope diff --git a/distsql/select_result_test.go b/distsql/select_result_test.go index 622e672e1ee9d..c12892083d641 100644 --- a/distsql/select_result_test.go +++ b/distsql/select_result_test.go @@ -24,7 +24,6 @@ import ( "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tipb/go-tipb" - "github.com/stretchr/testify/require" ) diff --git a/domain/domain.go b/domain/domain.go index e9eed74e24da6..9a75b0a3a5491 100644 --- a/domain/domain.go +++ b/domain/domain.go @@ -326,6 +326,11 @@ func canSkipSchemaCheckerDDL(tp model.ActionType) bool { // InfoSchema gets the latest information schema from domain. func (do *Domain) InfoSchema() infoschema.InfoSchema { + if do.infoCache == nil { + // Return nil is for test purpose where domain is not well initialized in session context. + // In real implementation, the code will not reach here. + return nil + } return do.infoCache.GetLatest() } @@ -970,14 +975,15 @@ func (do *Domain) GetEtcdClient() *clientv3.Client { // LoadPrivilegeLoop create a goroutine loads privilege tables in a loop, it // should be called only once in BootstrapSession. -func (do *Domain) LoadPrivilegeLoop(ctx sessionctx.Context) error { - ctx.GetSessionVars().InRestrictedSQL = true - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "set @@autocommit = 1") +func (do *Domain) LoadPrivilegeLoop(sctx sessionctx.Context) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + sctx.GetSessionVars().InRestrictedSQL = true + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "set @@autocommit = 1") if err != nil { return err } do.privHandle = privileges.NewHandle() - err = do.privHandle.Update(ctx) + err = do.privHandle.Update(sctx) if err != nil { return err } @@ -1016,7 +1022,7 @@ func (do *Domain) LoadPrivilegeLoop(ctx sessionctx.Context) error { } count = 0 - err := do.privHandle.Update(ctx) + err := do.privHandle.Update(sctx) metrics.LoadPrivilegeCounter.WithLabelValues(metrics.RetLabel(err)).Inc() if err != nil { logutil.BgLogger().Error("load privilege failed", zap.Error(err)) diff --git a/domain/sysvar_cache.go b/domain/sysvar_cache.go index 931e28cec5e15..e235e95b9dec5 100644 --- a/domain/sysvar_cache.go +++ b/domain/sysvar_cache.go @@ -21,6 +21,7 @@ import ( "sync" "time" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/logutil" @@ -87,11 +88,12 @@ func (do *Domain) GetGlobalVar(name string) (string, error) { return "", variable.ErrUnknownSystemVar.GenWithStackByArgs(name) } -func (do *Domain) fetchTableValues(ctx sessionctx.Context) (map[string]string, error) { +func (do *Domain) fetchTableValues(sctx sessionctx.Context) (map[string]string, error) { tableContents := make(map[string]string) // Copy all variables from the table to tableContents - exec := ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, `SELECT variable_name, variable_value FROM mysql.global_variables`) + exec := sctx.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnSysVar) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT variable_name, variable_value FROM mysql.global_variables`) if err != nil { return nil, err } diff --git a/dumpling/cli/versions.go b/dumpling/cli/versions.go index 2d29dd1296bd6..ad55c6ae9539f 100644 --- a/dumpling/cli/versions.go +++ b/dumpling/cli/versions.go @@ -16,9 +16,8 @@ package cli import ( "fmt" - "go.uber.org/zap" - "github.com/pingcap/tidb/dumpling/log" + "go.uber.org/zap" ) var ( diff --git a/dumpling/cmd/dumpling/main.go b/dumpling/cmd/dumpling/main.go index 3d72df1e694ea..5e4b8d8f0521b 100644 --- a/dumpling/cmd/dumpling/main.go +++ b/dumpling/cmd/dumpling/main.go @@ -18,13 +18,12 @@ import ( "fmt" "os" + "github.com/pingcap/tidb/dumpling/cli" + "github.com/pingcap/tidb/dumpling/export" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" "github.com/spf13/pflag" "go.uber.org/zap" - - "github.com/pingcap/tidb/dumpling/cli" - "github.com/pingcap/tidb/dumpling/export" ) func main() { diff --git a/dumpling/export/BUILD.bazel b/dumpling/export/BUILD.bazel index fca79ffd31a76..b12dc5f87246b 100644 --- a/dumpling/export/BUILD.bazel +++ b/dumpling/export/BUILD.bazel @@ -59,6 +59,7 @@ go_library( "@com_github_spf13_pflag//:pflag", "@com_github_tikv_pd_client//:client", "@io_etcd_go_etcd_client_v3//:client", + "@org_golang_x_exp//slices", "@org_golang_x_sync//errgroup", "@org_uber_go_multierr//:multierr", "@org_uber_go_zap//:zap", diff --git a/dumpling/export/block_allow_list.go b/dumpling/export/block_allow_list.go index 2ce925abe02da..a3d4852b50c65 100644 --- a/dumpling/export/block_allow_list.go +++ b/dumpling/export/block_allow_list.go @@ -3,9 +3,8 @@ package export import ( - "go.uber.org/zap" - tcontext "github.com/pingcap/tidb/dumpling/context" + "go.uber.org/zap" ) func filterDatabases(tctx *tcontext.Context, conf *Config, databases []string) []string { diff --git a/dumpling/export/block_allow_list_test.go b/dumpling/export/block_allow_list_test.go index 640e9cef7e4dc..28faa4e95f261 100644 --- a/dumpling/export/block_allow_list_test.go +++ b/dumpling/export/block_allow_list_test.go @@ -6,12 +6,11 @@ import ( "strings" "testing" + "github.com/pingcap/tidb/br/pkg/version" + tcontext "github.com/pingcap/tidb/dumpling/context" "github.com/pingcap/tidb/util/filter" tf "github.com/pingcap/tidb/util/table-filter" "github.com/stretchr/testify/require" - - "github.com/pingcap/tidb/br/pkg/version" - tcontext "github.com/pingcap/tidb/dumpling/context" ) func TestFilterTables(t *testing.T) { diff --git a/dumpling/export/config.go b/dumpling/export/config.go index b74799c39d8d2..d9fa184643415 100644 --- a/dumpling/export/config.go +++ b/dumpling/export/config.go @@ -17,15 +17,14 @@ import ( "github.com/docker/go-units" "github.com/go-sql-driver/mysql" "github.com/pingcap/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/spf13/pflag" - "go.uber.org/zap" - "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/version" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/promutil" filter "github.com/pingcap/tidb/util/table-filter" + "github.com/prometheus/client_golang/prometheus" + "github.com/spf13/pflag" + "go.uber.org/zap" ) const ( diff --git a/dumpling/export/conn.go b/dumpling/export/conn.go index c6723865b7003..c981febe19450 100644 --- a/dumpling/export/conn.go +++ b/dumpling/export/conn.go @@ -6,10 +6,9 @@ import ( "database/sql" "github.com/pingcap/errors" - "go.uber.org/zap" - "github.com/pingcap/tidb/br/pkg/utils" tcontext "github.com/pingcap/tidb/dumpling/context" + "go.uber.org/zap" ) // BaseConn wraps connection instance. diff --git a/dumpling/export/consistency.go b/dumpling/export/consistency.go index 2af2abaa7d19c..6e0f65960f275 100644 --- a/dumpling/export/consistency.go +++ b/dumpling/export/consistency.go @@ -7,7 +7,6 @@ import ( "database/sql" "github.com/pingcap/errors" - "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/version" tcontext "github.com/pingcap/tidb/dumpling/context" diff --git a/dumpling/export/consistency_test.go b/dumpling/export/consistency_test.go index cd81d27db11f5..97f94a16a0334 100644 --- a/dumpling/export/consistency_test.go +++ b/dumpling/export/consistency_test.go @@ -10,11 +10,10 @@ import ( "github.com/DATA-DOG/go-sqlmock" "github.com/go-sql-driver/mysql" - "github.com/stretchr/testify/require" - "github.com/pingcap/tidb/br/pkg/version" dbconfig "github.com/pingcap/tidb/config" tcontext "github.com/pingcap/tidb/dumpling/context" + "github.com/stretchr/testify/require" ) func TestConsistencyController(t *testing.T) { diff --git a/dumpling/export/dump.go b/dumpling/export/dump.go index 43d801430e6e8..c6fa3c08615f8 100755 --- a/dumpling/export/dump.go +++ b/dumpling/export/dump.go @@ -9,7 +9,6 @@ import ( "encoding/hex" "fmt" "math/big" - "sort" "strconv" "strings" "sync/atomic" @@ -20,10 +19,6 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" pclog "github.com/pingcap/log" - pd "github.com/tikv/pd/client" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" - "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/version" @@ -36,6 +31,10 @@ import ( "github.com/pingcap/tidb/store/helper" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" + pd "github.com/tikv/pd/client" + "go.uber.org/zap" + "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" ) var openDBFunc = sql.Open @@ -1558,9 +1557,7 @@ func (d *Dumper) renewSelectTableRegionFuncForLowerTiDB(tctx *tcontext.Context) for _, tbInfoLoop := range tbInfos { // make sure tbInfo is only used in this loop tbInfo := tbInfoLoop - sort.Slice(tbInfo, func(i, j int) bool { - return tbInfo[i] < tbInfo[j] - }) + slices.Sort(tbInfo) } } diff --git a/dumpling/export/dump_test.go b/dumpling/export/dump_test.go index b059a1ae28ac2..51e522bf4824b 100644 --- a/dumpling/export/dump_test.go +++ b/dumpling/export/dump_test.go @@ -9,15 +9,13 @@ import ( "time" "github.com/DATA-DOG/go-sqlmock" - "github.com/pingcap/tidb/util/promutil" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" - "github.com/pingcap/errors" - "github.com/pingcap/tidb/br/pkg/version" tcontext "github.com/pingcap/tidb/dumpling/context" "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/util/promutil" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" ) func TestDumpBlock(t *testing.T) { diff --git a/dumpling/export/http_handler.go b/dumpling/export/http_handler.go index 91332736f17f2..ca861dd11e8bc 100644 --- a/dumpling/export/http_handler.go +++ b/dumpling/export/http_handler.go @@ -10,11 +10,10 @@ import ( "time" "github.com/pingcap/errors" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/soheilhy/cmux" - tcontext "github.com/pingcap/tidb/dumpling/context" "github.com/pingcap/tidb/dumpling/log" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/soheilhy/cmux" ) var cmuxReadTimeout = 10 * time.Second diff --git a/dumpling/export/ir.go b/dumpling/export/ir.go index b74d7dc1dfef3..4b98019605e9c 100644 --- a/dumpling/export/ir.go +++ b/dumpling/export/ir.go @@ -8,7 +8,6 @@ import ( "strings" "github.com/pingcap/errors" - tcontext "github.com/pingcap/tidb/dumpling/context" ) diff --git a/dumpling/export/ir_impl.go b/dumpling/export/ir_impl.go index 57a14f5a66d11..d51462b59ab01 100644 --- a/dumpling/export/ir_impl.go +++ b/dumpling/export/ir_impl.go @@ -7,9 +7,8 @@ import ( "strings" "github.com/pingcap/errors" - "go.uber.org/zap" - tcontext "github.com/pingcap/tidb/dumpling/context" + "go.uber.org/zap" ) // rowIter implements the SQLRowIter interface. diff --git a/dumpling/export/metadata.go b/dumpling/export/metadata.go index d4ffa3c7aa3d2..7d2cf53128688 100644 --- a/dumpling/export/metadata.go +++ b/dumpling/export/metadata.go @@ -11,11 +11,10 @@ import ( "time" "github.com/pingcap/errors" - "go.uber.org/zap" - "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/version" tcontext "github.com/pingcap/tidb/dumpling/context" + "go.uber.org/zap" ) type globalMetadata struct { diff --git a/dumpling/export/metadata_test.go b/dumpling/export/metadata_test.go index dc67c73628b87..90099bbf00acd 100644 --- a/dumpling/export/metadata_test.go +++ b/dumpling/export/metadata_test.go @@ -10,11 +10,10 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" - "github.com/stretchr/testify/require" - "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/version" tcontext "github.com/pingcap/tidb/dumpling/context" + "github.com/stretchr/testify/require" ) const ( diff --git a/dumpling/export/prepare.go b/dumpling/export/prepare.go index 1d9d05304c7cf..0878ce31f8666 100644 --- a/dumpling/export/prepare.go +++ b/dumpling/export/prepare.go @@ -10,7 +10,6 @@ import ( "text/template" "github.com/pingcap/errors" - tcontext "github.com/pingcap/tidb/dumpling/context" ) diff --git a/dumpling/export/prepare_test.go b/dumpling/export/prepare_test.go index ad326a412c4a3..f9f559448d078 100644 --- a/dumpling/export/prepare_test.go +++ b/dumpling/export/prepare_test.go @@ -9,9 +9,8 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" - "github.com/stretchr/testify/require" - tcontext "github.com/pingcap/tidb/dumpling/context" + "github.com/stretchr/testify/require" ) func TestPrepareDumpingDatabases(t *testing.T) { diff --git a/dumpling/export/retry.go b/dumpling/export/retry.go index f2bd998df8ccb..ad49f46b76c65 100644 --- a/dumpling/export/retry.go +++ b/dumpling/export/retry.go @@ -8,11 +8,10 @@ import ( "github.com/go-sql-driver/mysql" "github.com/pingcap/errors" - "github.com/pingcap/tidb/util/dbutil" - "go.uber.org/zap" - "github.com/pingcap/tidb/br/pkg/utils" tcontext "github.com/pingcap/tidb/dumpling/context" + "github.com/pingcap/tidb/util/dbutil" + "go.uber.org/zap" ) const ( diff --git a/dumpling/export/sql.go b/dumpling/export/sql.go index 9f866209241b5..ad78158bb27a8 100644 --- a/dumpling/export/sql.go +++ b/dumpling/export/sql.go @@ -17,9 +17,6 @@ import ( "github.com/go-sql-driver/mysql" "github.com/pingcap/errors" "github.com/pingcap/failpoint" - "go.uber.org/multierr" - "go.uber.org/zap" - "github.com/pingcap/tidb/br/pkg/version" dbconfig "github.com/pingcap/tidb/config" tcontext "github.com/pingcap/tidb/dumpling/context" @@ -27,6 +24,8 @@ import ( "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/store/helper" + "go.uber.org/multierr" + "go.uber.org/zap" ) const ( diff --git a/dumpling/export/sql_test.go b/dumpling/export/sql_test.go index a752df681c146..d79f74993c10b 100644 --- a/dumpling/export/sql_test.go +++ b/dumpling/export/sql_test.go @@ -15,16 +15,14 @@ import ( "strings" "testing" - "github.com/go-sql-driver/mysql" - "github.com/pingcap/tidb/util/promutil" - "github.com/DATA-DOG/go-sqlmock" + "github.com/go-sql-driver/mysql" "github.com/pingcap/errors" - "github.com/stretchr/testify/require" - "github.com/pingcap/tidb/br/pkg/version" dbconfig "github.com/pingcap/tidb/config" tcontext "github.com/pingcap/tidb/dumpling/context" + "github.com/pingcap/tidb/util/promutil" + "github.com/stretchr/testify/require" ) var showIndexHeaders = []string{ diff --git a/dumpling/export/status.go b/dumpling/export/status.go index 4b7e706898724..e7d708bb49f2f 100644 --- a/dumpling/export/status.go +++ b/dumpling/export/status.go @@ -8,9 +8,8 @@ import ( "time" "github.com/docker/go-units" - "go.uber.org/zap" - tcontext "github.com/pingcap/tidb/dumpling/context" + "go.uber.org/zap" ) const logProgressTick = 2 * time.Minute diff --git a/dumpling/export/util.go b/dumpling/export/util.go index 1506a424946f3..cad703f9f23f4 100644 --- a/dumpling/export/util.go +++ b/dumpling/export/util.go @@ -5,15 +5,14 @@ package export import ( "context" "database/sql" - "sort" "strings" "time" "github.com/pingcap/errors" - clientv3 "go.etcd.io/etcd/client/v3" - "github.com/pingcap/tidb/br/pkg/version" tcontext "github.com/pingcap/tidb/dumpling/context" + clientv3 "go.etcd.io/etcd/client/v3" + "golang.org/x/exp/slices" ) const tidbServerInformationPath = "/tidb/server/info" @@ -50,8 +49,8 @@ func checkSameCluster(tctx *tcontext.Context, db *sql.DB, pdAddrs []string) (boo if err != nil { return false, err } - sort.Strings(tidbDDLIDs) - sort.Strings(pdDDLIDs) + slices.Sort(tidbDDLIDs) + slices.Sort(pdDDLIDs) return sameStringArray(tidbDDLIDs, pdDDLIDs), nil } diff --git a/dumpling/export/util_for_test.go b/dumpling/export/util_for_test.go index 67d7f62f9fcfb..739a431b2230e 100644 --- a/dumpling/export/util_for_test.go +++ b/dumpling/export/util_for_test.go @@ -9,7 +9,6 @@ import ( "fmt" "github.com/DATA-DOG/go-sqlmock" - tcontext "github.com/pingcap/tidb/dumpling/context" ) diff --git a/dumpling/export/util_test.go b/dumpling/export/util_test.go index 5932dcc9888f4..1686a24902825 100644 --- a/dumpling/export/util_test.go +++ b/dumpling/export/util_test.go @@ -6,9 +6,8 @@ import ( "fmt" "testing" - "github.com/stretchr/testify/require" - "github.com/pingcap/tidb/br/pkg/version" + "github.com/stretchr/testify/require" ) func TestRepeatableRead(t *testing.T) { diff --git a/dumpling/export/writer.go b/dumpling/export/writer.go index 06ba5a1605b29..d3545aa2e4f18 100644 --- a/dumpling/export/writer.go +++ b/dumpling/export/writer.go @@ -10,11 +10,10 @@ import ( "text/template" "github.com/pingcap/errors" - "go.uber.org/zap" - "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/utils" tcontext "github.com/pingcap/tidb/dumpling/context" + "go.uber.org/zap" ) // Writer is the abstraction that keep pulling data from database and write to files. diff --git a/dumpling/export/writer_serial_test.go b/dumpling/export/writer_serial_test.go index d08015cb9d0c0..2290ca86cdfa2 100644 --- a/dumpling/export/writer_serial_test.go +++ b/dumpling/export/writer_serial_test.go @@ -9,11 +9,10 @@ import ( "testing" "github.com/pingcap/errors" - "github.com/pingcap/tidb/util/promutil" - "github.com/stretchr/testify/require" - "github.com/pingcap/tidb/br/pkg/storage" tcontext "github.com/pingcap/tidb/dumpling/context" + "github.com/pingcap/tidb/util/promutil" + "github.com/stretchr/testify/require" ) func TestWriteMeta(t *testing.T) { diff --git a/dumpling/export/writer_test.go b/dumpling/export/writer_test.go index 93fde0ebdd5e4..4192a86179163 100644 --- a/dumpling/export/writer_test.go +++ b/dumpling/export/writer_test.go @@ -12,10 +12,9 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" + tcontext "github.com/pingcap/tidb/dumpling/context" "github.com/pingcap/tidb/util/promutil" "github.com/stretchr/testify/require" - - tcontext "github.com/pingcap/tidb/dumpling/context" ) func TestWriteDatabaseMeta(t *testing.T) { diff --git a/dumpling/export/writer_util.go b/dumpling/export/writer_util.go index d3aa417d550b8..c43a1d140cab4 100755 --- a/dumpling/export/writer_util.go +++ b/dumpling/export/writer_util.go @@ -13,13 +13,12 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/summary" tcontext "github.com/pingcap/tidb/dumpling/context" "github.com/pingcap/tidb/dumpling/log" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" ) const lengthLimit = 1048576 diff --git a/errno/errcode.go b/errno/errcode.go index a26a1a1eaea6e..4054229f8dce5 100644 --- a/errno/errcode.go +++ b/errno/errcode.go @@ -1025,6 +1025,7 @@ const ( ErrNonTransactionalJobFailure = 8143 ErrSettingNoopVariable = 8144 ErrGettingNoopVariable = 8145 + ErrCannotMigrateSession = 8146 // Error codes used by TiDB ddl package ErrUnsupportedDDLOperation = 8200 diff --git a/errno/errname.go b/errno/errname.go index 58866b7564cd0..a8be48e6eed06 100644 --- a/errno/errname.go +++ b/errno/errname.go @@ -1020,6 +1020,7 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrNonTransactionalJobFailure: mysql.Message("non-transactional job failed, job id: %d, total jobs: %d. job range: [%s, %s], job sql: %s, err: %v", []int{2, 3, 4}), ErrSettingNoopVariable: mysql.Message("setting %s has no effect in TiDB", nil), ErrGettingNoopVariable: mysql.Message("variable %s has no effect in TiDB", nil), + ErrCannotMigrateSession: mysql.Message("cannot migrate the current session: %s", nil), ErrWarnOptimizerHintInvalidInteger: mysql.Message("integer value is out of range in '%s'", nil), ErrWarnOptimizerHintUnsupportedHint: mysql.Message("Optimizer hint %s is not supported by TiDB and is ignored", nil), diff --git a/errors.toml b/errors.toml index 11518f664cda6..c1c62e3d89c88 100755 --- a/errors.toml +++ b/errors.toml @@ -2261,6 +2261,11 @@ error = ''' non-transactional job failed, job id: %d, total jobs: %d. job range: [%s, %s], job sql: %s, err: %v ''' +["session:8146"] +error = ''' +cannot migrate the current session: %s +''' + ["structure:8217"] error = ''' invalid encoded hash key flag diff --git a/executor/BUILD.bazel b/executor/BUILD.bazel index e9b7c23298645..6b5590f9429a0 100644 --- a/executor/BUILD.bazel +++ b/executor/BUILD.bazel @@ -132,7 +132,6 @@ go_library( "//sessionctx/stmtctx", "//sessionctx/variable", "//sessiontxn", - "//sessiontxn/legacy", "//sessiontxn/staleread", "//statistics", "//statistics/handle", @@ -227,6 +226,7 @@ go_library( "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials", "@org_golang_google_grpc//status", + "@org_golang_x_exp//slices", "@org_golang_x_sync//errgroup", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", @@ -424,6 +424,7 @@ go_test( "@com_github_tikv_client_go_v2//tikv", "@com_github_tikv_client_go_v2//tikvrpc", "@org_golang_google_grpc//:grpc", + "@org_golang_x_exp//slices", "@org_uber_go_atomic//:atomic", "@org_uber_go_goleak//:goleak", "@org_uber_go_zap//:zap", diff --git a/executor/adapter.go b/executor/adapter.go index 31a8c8d20150f..b1a759cc1be27 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -189,16 +189,15 @@ func (a *recordSet) OnFetchReturned() { // TelemetryInfo records some telemetry information during execution. type TelemetryInfo struct { - UseNonRecursive bool - UseRecursive bool + UseNonRecursive bool + UseRecursive bool + UseMultiSchemaChange bool } // ExecStmt implements the sqlexec.Statement interface, it builds a planner.Plan to an sqlexec.Statement. type ExecStmt struct { // GoCtx stores parent go context.Context for a stmt. GoCtx context.Context - // ReplicaReadScope indicates the scope the store selector scope the request visited - ReplicaReadScope string // InfoSchema stores a reference to the schema information. InfoSchema infoschema.InfoSchema // Plan stores a reference to the final physical plan. @@ -223,8 +222,13 @@ type ExecStmt struct { Ti *TelemetryInfo } +// GetStmtNode returns the stmtNode inside Statement +func (a ExecStmt) GetStmtNode() ast.StmtNode { + return a.StmtNode +} + // PointGet short path for point exec directly from plan, keep only necessary steps -func (a *ExecStmt) PointGet(ctx context.Context, is infoschema.InfoSchema) (*recordSet, error) { +func (a *ExecStmt) PointGet(ctx context.Context) (*recordSet, error) { if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { span1 := span.Tracer().StartSpan("ExecStmt.PointGet", opentracing.ChildOf(span.Context())) span1.LogKV("sql", a.OriginText()) @@ -235,7 +239,7 @@ func (a *ExecStmt) PointGet(ctx context.Context, is infoschema.InfoSchema) (*rec sessiontxn.RecordAssert(a.Ctx, "assertTxnManagerInShortPointGetPlan", true) // stale read should not reach here staleread.AssertStmtStaleness(a.Ctx, false) - sessiontxn.AssertTxnManagerInfoSchema(a.Ctx, is) + sessiontxn.AssertTxnManagerInfoSchema(a.Ctx, a.InfoSchema) }) ctx = a.observeStmtBeginForTopSQL(ctx) @@ -254,12 +258,12 @@ func (a *ExecStmt) PointGet(ctx context.Context, is infoschema.InfoSchema) (*rec } else { // CachedPlan type is already checked in last step pointGetPlan := a.PsStmt.PreparedAst.CachedPlan.(*plannercore.PointGetPlan) - exec.Init(pointGetPlan, startTs) + exec.Init(pointGetPlan) a.PsStmt.Executor = exec } } if a.PsStmt.Executor == nil { - b := newExecutorBuilder(a.Ctx, is, a.Ti, a.ReplicaReadScope) + b := newExecutorBuilder(a.Ctx, a.InfoSchema, a.Ti) newExecutor := b.build(a.Plan) if b.err != nil { return nil, b.err @@ -312,11 +316,14 @@ func (a *ExecStmt) RebuildPlan(ctx context.Context) (int64, error) { sessiontxn.RecordAssert(a.Ctx, "assertTxnManagerInRebuildPlan", true) sessiontxn.AssertTxnManagerInfoSchema(a.Ctx, ret.InfoSchema) staleread.AssertStmtStaleness(a.Ctx, ret.IsStaleness) + if ret.IsStaleness { + sessiontxn.AssertTxnManagerReadTS(a.Ctx, ret.LastSnapshotTS) + } }) a.InfoSchema = sessiontxn.GetTxnManager(a.Ctx).GetTxnInfoSchema() - a.ReplicaReadScope = ret.ReadReplicaScope - if a.Ctx.GetSessionVars().GetReplicaRead().IsClosestRead() && a.ReplicaReadScope == kv.GlobalReplicaScope { + replicaReadScope := sessiontxn.GetTxnManager(a.Ctx).GetReadReplicaScope() + if a.Ctx.GetSessionVars().GetReplicaRead().IsClosestRead() && replicaReadScope == kv.GlobalReplicaScope { logutil.BgLogger().Warn(fmt.Sprintf("tidb can't read closest replicas due to it haven't %s label", placement.DCLabelKey)) } p, names, err := planner.Optimize(ctx, a.Ctx, a.StmtNode, a.InfoSchema) @@ -819,7 +826,7 @@ func (a *ExecStmt) buildExecutor() (Executor, error) { ctx.GetSessionVars().StmtCtx.Priority = kv.PriorityLow } - b := newExecutorBuilder(ctx, a.InfoSchema, a.Ti, a.ReplicaReadScope) + b := newExecutorBuilder(ctx, a.InfoSchema, a.Ti) e := b.build(a.Plan) if b.err != nil { return nil, errors.Trace(b.err) diff --git a/executor/admin.go b/executor/admin.go index 974f9d1b177d9..ba219b70b6db3 100644 --- a/executor/admin.go +++ b/executor/admin.go @@ -317,7 +317,8 @@ func (e *RecoverIndexExec) backfillIndex(ctx context.Context) (int64, int64, err result backfillResult ) for { - errInTxn := kv.RunInNewTxn(context.Background(), e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnAdmin) + errInTxn := kv.RunInNewTxn(ctx, e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { setOptionForTopSQL(e.ctx.GetSessionVars().StmtCtx, txn) var err error result, err = e.backfillIndexInTxn(ctx, txn, currentHandle) @@ -694,7 +695,8 @@ func (e *CleanupIndexExec) Next(ctx context.Context, req *chunk.Chunk) error { func (e *CleanupIndexExec) cleanTableIndex(ctx context.Context) error { for { - errInTxn := kv.RunInNewTxn(context.Background(), e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnAdmin) + errInTxn := kv.RunInNewTxn(ctx, e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { txn.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) setOptionForTopSQL(e.ctx.GetSessionVars().StmtCtx, txn) err := e.fetchIndex(ctx, txn) diff --git a/executor/aggfuncs/func_group_concat.go b/executor/aggfuncs/func_group_concat.go index e838c78f1ff62..7a178cc4c6426 100644 --- a/executor/aggfuncs/func_group_concat.go +++ b/executor/aggfuncs/func_group_concat.go @@ -353,16 +353,17 @@ func (h *topNRows) tryToAdd(row sortRow) (truncated bool, memDelta int64) { for h.currSize > h.limitSize { debt := h.currSize - h.limitSize - if uint64(h.rows[0].buffer.Len()) > debt { + heapPopRow := heap.Pop(h).(sortRow) + if uint64(heapPopRow.buffer.Len()) > debt { h.currSize -= debt - h.rows[0].buffer.Truncate(h.rows[0].buffer.Len() - int(debt)) + heapPopRow.buffer.Truncate(heapPopRow.buffer.Len() - int(debt)) + heap.Push(h, heapPopRow) } else { - h.currSize -= uint64(h.rows[0].buffer.Len()) + h.sepSize - memDelta -= int64(h.rows[0].buffer.Cap()) - for _, dt := range h.rows[0].byItems { + h.currSize -= uint64(heapPopRow.buffer.Len()) + h.sepSize + memDelta -= int64(heapPopRow.buffer.Cap()) + for _, dt := range heapPopRow.byItems { memDelta -= GetDatumMemSize(dt) } - heap.Pop(h) h.isSepTruncated = true } } diff --git a/executor/aggregate.go b/executor/aggregate.go index 68dae4d4fe10d..52705422a0d16 100644 --- a/executor/aggregate.go +++ b/executor/aggregate.go @@ -18,7 +18,6 @@ import ( "bytes" "context" "fmt" - "sort" "sync" "sync/atomic" "time" @@ -45,6 +44,7 @@ import ( "github.com/pingcap/tidb/util/set" "github.com/twmb/murmur3" "go.uber.org/zap" + "golang.org/x/exp/slices" ) type aggPartialResultMapper map[string][]aggfuncs.PartialResult @@ -1146,7 +1146,7 @@ func (e *HashAggRuntimeStats) workerString(buf *bytes.Buffer, prefix string, con time.Duration(wallTime), concurrency, totalTaskNum, time.Duration(totalWait), time.Duration(totalExec), time.Duration(totalTime))) n := len(workerStats) if n > 0 { - sort.Slice(workerStats, func(i, j int) bool { return workerStats[i].WorkerTime < workerStats[j].WorkerTime }) + slices.SortFunc(workerStats, func(i, j *AggWorkerStat) bool { return i.WorkerTime < j.WorkerTime }) buf.WriteString(fmt.Sprintf(", max:%v, p95:%v", time.Duration(workerStats[n-1].WorkerTime), time.Duration(workerStats[n*19/20].WorkerTime))) } diff --git a/executor/aggregate_test.go b/executor/aggregate_test.go index f36e066505ad1..e804f3069f445 100644 --- a/executor/aggregate_test.go +++ b/executor/aggregate_test.go @@ -1629,3 +1629,17 @@ PARTITION p20220624 VALUES LESS THAN ("20220625") tk.MustQuery("SELECT /*+STREAM_AGG()*/ col1,sum(money) FROM t100 WHERE logtime>='2022-06-09 00:00:00' AND col1=100 ;").Check(testkit.Rows("100 20")) tk.MustQuery("SELECT /*+HASH_AGG()*/ col1,sum(money) FROM t100 WHERE logtime>='2022-06-09 00:00:00' AND col1=100 ;").Check(testkit.Rows("100 20")) } + +// https://github.com/pingcap/tidb/issues/27751 +func TestIssue27751(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table test.t(nname char(20));") + tk.MustExec("insert into test.t values ('2'),(null),('11'),('2'),(null),('2'),(null),('11'),('33');") + tk.MustExec("set @@group_concat_max_len=0;") + tk.MustQuery("select group_concat(nname order by 1 separator '#' ) from t;").Check(testkit.Rows("11#1")) + tk.MustQuery("select group_concat(nname order by 1 desc separator '#' ) from t;").Check(testkit.Rows("33#2")) +} diff --git a/executor/analyze.go b/executor/analyze.go index c9559ccff3cd4..927c5c834e756 100644 --- a/executor/analyze.go +++ b/executor/analyze.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/planner/core" @@ -175,8 +176,9 @@ func (e *AnalyzeExec) saveV2AnalyzeOpts() error { } idx += 1 } + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) exec := e.ctx.(sqlexec.RestrictedSQLExecutor) - _, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, sql.String()) + _, _, err := exec.ExecRestrictedSQL(ctx, nil, sql.String()) if err != nil { return err } @@ -342,22 +344,23 @@ func AddNewAnalyzeJob(ctx sessionctx.Context, job *statistics.AnalyzeJob) { } // StartAnalyzeJob marks the state of the analyze job as running and sets the start time. -func StartAnalyzeJob(ctx sessionctx.Context, job *statistics.AnalyzeJob) { +func StartAnalyzeJob(sctx sessionctx.Context, job *statistics.AnalyzeJob) { if job == nil || job.ID == nil { return } job.StartTime = time.Now() job.Progress.SetLastDumpTime(job.StartTime) - exec := ctx.(sqlexec.RestrictedSQLExecutor) + exec := sctx.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) const sql = "UPDATE mysql.analyze_jobs SET start_time = CONVERT_TZ(%?, '+00:00', @@TIME_ZONE), state = %? WHERE id = %?" - _, _, err := exec.ExecRestrictedSQL(context.TODO(), []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool}, sql, job.StartTime.UTC().Format(types.TimeFormat), statistics.AnalyzeRunning, *job.ID) + _, _, err := exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool}, sql, job.StartTime.UTC().Format(types.TimeFormat), statistics.AnalyzeRunning, *job.ID) if err != nil { logutil.BgLogger().Warn("failed to update analyze job", zap.String("update", fmt.Sprintf("%s->%s", statistics.AnalyzePending, statistics.AnalyzeRunning)), zap.Error(err)) } } // UpdateAnalyzeJob updates count of the processed rows when increment reaches a threshold. -func UpdateAnalyzeJob(ctx sessionctx.Context, job *statistics.AnalyzeJob, rowCount int64) { +func UpdateAnalyzeJob(sctx sessionctx.Context, job *statistics.AnalyzeJob, rowCount int64) { if job == nil || job.ID == nil { return } @@ -365,16 +368,17 @@ func UpdateAnalyzeJob(ctx sessionctx.Context, job *statistics.AnalyzeJob, rowCou if delta == 0 { return } - exec := ctx.(sqlexec.RestrictedSQLExecutor) + exec := sctx.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) const sql = "UPDATE mysql.analyze_jobs SET processed_rows = processed_rows + %? WHERE id = %?" - _, _, err := exec.ExecRestrictedSQL(context.TODO(), []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool}, sql, delta, *job.ID) + _, _, err := exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool}, sql, delta, *job.ID) if err != nil { logutil.BgLogger().Warn("failed to update analyze job", zap.String("update", fmt.Sprintf("process %v rows", delta)), zap.Error(err)) } } // FinishAnalyzeJob updates the state of the analyze job to finished/failed according to `meetError` and sets the end time. -func FinishAnalyzeJob(ctx sessionctx.Context, job *statistics.AnalyzeJob, analyzeErr error) { +func FinishAnalyzeJob(sctx sessionctx.Context, job *statistics.AnalyzeJob, analyzeErr error) { if job == nil || job.ID == nil { return } @@ -395,8 +399,9 @@ func FinishAnalyzeJob(ctx sessionctx.Context, job *statistics.AnalyzeJob, analyz sql = "UPDATE mysql.analyze_jobs SET processed_rows = processed_rows + %?, end_time = CONVERT_TZ(%?, '+00:00', @@TIME_ZONE), state = %?, process_id = NULL WHERE id = %?" args = []interface{}{job.Progress.GetDeltaCount(), job.EndTime.UTC().Format(types.TimeFormat), statistics.AnalyzeFinished, *job.ID} } - exec := ctx.(sqlexec.RestrictedSQLExecutor) - _, _, err := exec.ExecRestrictedSQL(context.TODO(), []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool}, sql, args...) + exec := sctx.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + _, _, err := exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool}, sql, args...) if err != nil { var state string if analyzeErr != nil { diff --git a/executor/analyze_fast.go b/executor/analyze_fast.go index 27e514150e314..8af6486ba2ca1 100644 --- a/executor/analyze_fast.go +++ b/executor/analyze_fast.go @@ -114,7 +114,8 @@ type AnalyzeFastExec struct { func (e *AnalyzeFastExec) calculateEstimateSampleStep() (err error) { exec := e.ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, "select flag from mysql.stats_histograms where table_id = %?", e.tableID.GetStatisticsID()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, "select flag from mysql.stats_histograms where table_id = %?", e.tableID.GetStatisticsID()) if err != nil { return } @@ -150,7 +151,7 @@ func (e *AnalyzeFastExec) calculateEstimateSampleStep() (err error) { } } var rs sqlexec.RecordSet - rs, err = e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql.String()) + rs, err = e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) if err != nil { return } @@ -173,15 +174,16 @@ func (e *AnalyzeFastExec) calculateEstimateSampleStep() (err error) { } func (e *AnalyzeFastExec) activateTxnForRowCount() (rollbackFn func() error, err error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) txn, err := e.ctx.Txn(true) if err != nil { if kv.ErrInvalidTxn.Equal(err) { - _, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "begin") + _, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "begin") if err != nil { return nil, errors.Trace(err) } rollbackFn = func() error { - _, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "rollback") + _, err := e.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "rollback") return err } } else { diff --git a/executor/analyzetest/analyze_test.go b/executor/analyzetest/analyze_test.go index b37376132ec5b..5a12296d0643a 100644 --- a/executor/analyzetest/analyze_test.go +++ b/executor/analyzetest/analyze_test.go @@ -15,6 +15,7 @@ package analyzetest import ( + "context" "encoding/json" "fmt" "strconv" @@ -169,9 +170,10 @@ func TestAnalyzeRestrict(t *testing.T) { tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t(a int)") - ctx := tk.Session().(sessionctx.Context) - ctx.GetSessionVars().InRestrictedSQL = true - tk.MustExec("analyze table t") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + rs, err := tk.Session().ExecuteInternal(ctx, "analyze table t") + require.Nil(t, err) + require.Nil(t, rs) } func TestAnalyzeParameters(t *testing.T) { diff --git a/executor/batch_point_get.go b/executor/batch_point_get.go index b5eb68a8b12de..1af256ade8c31 100644 --- a/executor/batch_point_get.go +++ b/executor/batch_point_get.go @@ -22,8 +22,6 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" - "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -40,35 +38,32 @@ import ( "github.com/pingcap/tidb/util/logutil/consistency" "github.com/pingcap/tidb/util/mathutil" "github.com/pingcap/tidb/util/rowcodec" - "github.com/tikv/client-go/v2/txnkv/txnsnapshot" + "golang.org/x/exp/slices" ) // BatchPointGetExec executes a bunch of point select queries. type BatchPointGetExec struct { baseExecutor - tblInfo *model.TableInfo - idxInfo *model.IndexInfo - handles []kv.Handle - physIDs []int64 - partExpr *tables.PartitionExpr - partPos int - singlePart bool - partTblID int64 - idxVals [][]types.Datum - readReplicaScope string - isStaleness bool - snapshotTS uint64 - txn kv.Transaction - lock bool - waitTime int64 - inited uint32 - values [][]byte - index int - rowDecoder *rowcodec.ChunkDecoder - keepOrder bool - desc bool - batchGetter kv.BatchGetter + tblInfo *model.TableInfo + idxInfo *model.IndexInfo + handles []kv.Handle + physIDs []int64 + partExpr *tables.PartitionExpr + partPos int + singlePart bool + partTblID int64 + idxVals [][]types.Datum + txn kv.Transaction + lock bool + waitTime int64 + inited uint32 + values [][]byte + index int + rowDecoder *rowcodec.ChunkDecoder + keepOrder bool + desc bool + batchGetter kv.BatchGetter columns []*model.ColumnInfo // virtualColumnIndex records all the indices of virtual columns and sort them in definition @@ -78,9 +73,8 @@ type BatchPointGetExec struct { // virtualColumnRetFieldTypes records the RetFieldTypes of virtual columns. virtualColumnRetFieldTypes []*types.FieldType - snapshot kv.Snapshot - stats *runtimeStatsWithSnapshot - cacheTable kv.MemBuffer + snapshot kv.Snapshot + stats *runtimeStatsWithSnapshot } // buildVirtualColumnInfo saves virtual column indices and sort them in definition order @@ -98,69 +92,24 @@ func (e *BatchPointGetExec) buildVirtualColumnInfo() { func (e *BatchPointGetExec) Open(context.Context) error { sessVars := e.ctx.GetSessionVars() txnCtx := sessVars.TxnCtx - stmtCtx := sessVars.StmtCtx txn, err := e.ctx.Txn(false) if err != nil { return err } e.txn = txn - var snapshot kv.Snapshot - if txn.Valid() && txnCtx.StartTS == txnCtx.GetForUpdateTS() && txnCtx.StartTS == e.snapshotTS { - // We can safely reuse the transaction snapshot if snapshotTS is equal to forUpdateTS. - // The snapshot may contain cache that can reduce RPC call. - snapshot = txn.GetSnapshot() - } else { - snapshot = e.ctx.GetSnapshotWithTS(e.snapshotTS) - } - if e.ctx.GetSessionVars().StmtCtx.RCCheckTS { - snapshot.SetOption(kv.IsolationLevel, kv.RCCheckTS) - } - if e.cacheTable != nil { - snapshot = cacheTableSnapshot{snapshot, e.cacheTable} - } - if e.runtimeStats != nil { - snapshotStats := &txnsnapshot.SnapshotRuntimeStats{} - e.stats = &runtimeStatsWithSnapshot{ - SnapshotRuntimeStats: snapshotStats, - } - snapshot.SetOption(kv.CollectRuntimeStats, snapshotStats) - stmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) - } - replicaReadType := e.ctx.GetSessionVars().GetReplicaRead() - if replicaReadType.IsFollowerRead() && !e.ctx.GetSessionVars().StmtCtx.RCCheckTS { - snapshot.SetOption(kv.ReplicaRead, replicaReadType) - } - snapshot.SetOption(kv.TaskID, stmtCtx.TaskID) - snapshot.SetOption(kv.ReadReplicaScope, e.readReplicaScope) - snapshot.SetOption(kv.IsStalenessReadOnly, e.isStaleness) - failpoint.Inject("assertBatchPointReplicaOption", func(val failpoint.Value) { - assertScope := val.(string) - if replicaReadType.IsClosestRead() && assertScope != e.readReplicaScope { - panic("batch point get replica option fail") - } - }) - - if replicaReadType.IsClosestRead() && e.readReplicaScope != kv.GlobalTxnScope { - snapshot.SetOption(kv.MatchStoreLabels, []*metapb.StoreLabel{ - { - Key: placement.DCLabelKey, - Value: e.readReplicaScope, - }, - }) - } - setOptionForTopSQL(stmtCtx, snapshot) - var batchGetter kv.BatchGetter = snapshot + + setOptionForTopSQL(e.ctx.GetSessionVars().StmtCtx, e.snapshot) + var batchGetter kv.BatchGetter = e.snapshot if txn.Valid() { lock := e.tblInfo.Lock if e.lock { - batchGetter = driver.NewBufferBatchGetter(txn.GetMemBuffer(), &PessimisticLockCacheGetter{txnCtx: txnCtx}, snapshot) + batchGetter = driver.NewBufferBatchGetter(txn.GetMemBuffer(), &PessimisticLockCacheGetter{txnCtx: txnCtx}, e.snapshot) } else if lock != nil && (lock.Tp == model.TableLockRead || lock.Tp == model.TableLockReadOnly) && e.ctx.GetSessionVars().EnablePointGetCache { - batchGetter = newCacheBatchGetter(e.ctx, e.tblInfo.ID, snapshot) + batchGetter = newCacheBatchGetter(e.ctx, e.tblInfo.ID, e.snapshot) } else { - batchGetter = driver.NewBufferBatchGetter(txn.GetMemBuffer(), nil, snapshot) + batchGetter = driver.NewBufferBatchGetter(txn.GetMemBuffer(), nil, e.snapshot) } } - e.snapshot = snapshot e.batchGetter = batchGetter return nil } @@ -297,11 +246,11 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { toFetchIndexKeys = append(toFetchIndexKeys, idxKey) } if e.keepOrder { - sort.Slice(toFetchIndexKeys, func(i int, j int) bool { + slices.SortFunc(toFetchIndexKeys, func(i, j kv.Key) bool { if e.desc { - return toFetchIndexKeys[i].Cmp(toFetchIndexKeys[j]) > 0 + return i.Cmp(j) > 0 } - return toFetchIndexKeys[i].Cmp(toFetchIndexKeys[j]) < 0 + return i.Cmp(j) < 0 }) } @@ -365,12 +314,11 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { failpoint.InjectContext(ctx, "batchPointGetRepeatableReadTest-step2", nil) }) } else if e.keepOrder { - less := func(i int, j int) bool { + less := func(i, j kv.Handle) bool { if e.desc { - return e.handles[i].Compare(e.handles[j]) > 0 + return i.Compare(j) > 0 } - return e.handles[i].Compare(e.handles[j]) < 0 - + return i.Compare(j) < 0 } if e.tblInfo.PKIsHandle && mysql.HasUnsignedFlag(e.tblInfo.GetPkColInfo().GetFlag()) { uintComparator := func(i, h kv.Handle) int { @@ -387,14 +335,14 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { } return 0 } - less = func(i int, j int) bool { + less = func(i, j kv.Handle) bool { if e.desc { - return uintComparator(e.handles[i], e.handles[j]) > 0 + return uintComparator(i, j) > 0 } - return uintComparator(e.handles[i], e.handles[j]) < 0 + return uintComparator(i, j) < 0 } } - sort.Slice(e.handles, less) + slices.SortFunc(e.handles, less) } keys := make([]kv.Key, 0, len(e.handles)) diff --git a/executor/benchmark_test.go b/executor/benchmark_test.go index d843da3b3d415..06282390677e4 100644 --- a/executor/benchmark_test.go +++ b/executor/benchmark_test.go @@ -34,7 +34,6 @@ import ( "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/planner/core" - plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/planner/property" "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx" @@ -45,7 +44,6 @@ import ( "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/stringutil" - "github.com/tikv/client-go/v2/oracle" "go.uber.org/zap/zapcore" ) @@ -292,7 +290,7 @@ func buildHashAggExecutor(ctx sessionctx.Context, src Executor, schema *expressi plan.SetSchema(schema) plan.Init(ctx, nil, 0) plan.SetChildren(nil) - b := newExecutorBuilder(ctx, nil, nil, oracle.GlobalTxnScope) + b := newExecutorBuilder(ctx, nil, nil) exec := b.build(plan) hashAgg := exec.(*HashAggExec) hashAgg.children[0] = src @@ -344,7 +342,7 @@ func buildStreamAggExecutor(ctx sessionctx.Context, srcExec Executor, schema *ex plan = sg } - b := newExecutorBuilder(ctx, nil, nil, oracle.GlobalTxnScope) + b := newExecutorBuilder(ctx, nil, nil) return b.build(plan) } @@ -567,8 +565,8 @@ func buildWindowExecutor(ctx sessionctx.Context, windowFunc string, funcs int, f plan = core.PhysicalShuffle{ Concurrency: concurrency, - Tails: []plannercore.PhysicalPlan{tail}, - DataSources: []plannercore.PhysicalPlan{src}, + Tails: []core.PhysicalPlan{tail}, + DataSources: []core.PhysicalPlan{src}, SplitterType: core.PartitionHashSplitterType, ByItemArrays: [][]expression.Expression{byItems}, }.Init(ctx, nil, 0) @@ -577,7 +575,7 @@ func buildWindowExecutor(ctx sessionctx.Context, windowFunc string, funcs int, f plan = win } - b := newExecutorBuilder(ctx, nil, nil, oracle.GlobalTxnScope) + b := newExecutorBuilder(ctx, nil, nil) exec := b.build(plan) return exec } @@ -1317,7 +1315,7 @@ func prepare4IndexInnerHashJoin(tc *indexJoinTestCase, outerDS *mockDataSource, keyOff2IdxOff[i] = i } - readerBuilder, err := newExecutorBuilder(tc.ctx, nil, nil, oracle.GlobalTxnScope). + readerBuilder, err := newExecutorBuilder(tc.ctx, nil, nil). newDataReaderBuilder(&mockPhysicalIndexReader{e: innerDS}) if err != nil { return nil, err @@ -1391,7 +1389,7 @@ func prepare4IndexMergeJoin(tc *indexJoinTestCase, outerDS *mockDataSource, inne outerCompareFuncs = append(outerCompareFuncs, expression.GetCmpFunction(nil, outerJoinKeys[i], outerJoinKeys[i])) } - readerBuilder, err := newExecutorBuilder(tc.ctx, nil, nil, oracle.GlobalTxnScope). + readerBuilder, err := newExecutorBuilder(tc.ctx, nil, nil). newDataReaderBuilder(&mockPhysicalIndexReader{e: innerDS}) if err != nil { return nil, err diff --git a/executor/brie.go b/executor/brie.go index 3cd1ce7ea9e1e..6dd1d9053a4ac 100644 --- a/executor/brie.go +++ b/executor/brie.go @@ -249,7 +249,6 @@ func (b *executorBuilder) buildBRIE(s *ast.BRIEStmt, schema *expression.Schema) return nil } default: - break } if tidbCfg.Store != "tikv" { @@ -464,11 +463,13 @@ func (gs *tidbGlueSession) CreateSession(store kv.Storage) (glue.Session, error) // such as BACKUP and RESTORE have already been privilege checked. // NOTE: Maybe drain the restult too? See `gluetidb.tidbSession.ExecuteInternal` for more details. func (gs *tidbGlueSession) Execute(ctx context.Context, sql string) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) _, _, err := gs.se.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, nil, sql) return err } func (gs *tidbGlueSession) ExecuteInternal(ctx context.Context, sql string, args ...interface{}) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBR) exec := gs.se.(sqlexec.SQLExecutor) _, err := exec.ExecuteInternal(ctx, sql, args...) return err diff --git a/executor/builder.go b/executor/builder.go index db0acf8eb02aa..da3ba39fb2622 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -17,9 +17,7 @@ package executor import ( "bytes" "context" - "fmt" "math" - "sort" "strconv" "strings" "sync" @@ -30,8 +28,10 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/diagnosticspb" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor/aggfuncs" @@ -49,7 +49,6 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/sessiontxn" - "github.com/pingcap/tidb/sessiontxn/legacy" "github.com/pingcap/tidb/sessiontxn/staleread" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/store/helper" @@ -70,6 +69,8 @@ import ( "github.com/pingcap/tidb/util/timeutil" "github.com/pingcap/tipb/go-tipb" "github.com/tikv/client-go/v2/tikv" + "github.com/tikv/client-go/v2/txnkv/txnsnapshot" + "golang.org/x/exp/slices" ) var ( @@ -88,16 +89,14 @@ var ( // executorBuilder builds an Executor from a Plan. // The InfoSchema must not change during execution. type executorBuilder struct { - ctx sessionctx.Context - is infoschema.InfoSchema - snapshotTS uint64 // The ts for snapshot-read. A select statement without for update will use this ts - forUpdateTS uint64 // The ts should be used by insert/update/delete/select-for-update statement - snapshotTSCached bool - err error // err is set when there is error happened during Executor building process. - hasLock bool - Ti *TelemetryInfo + ctx sessionctx.Context + is infoschema.InfoSchema + err error // err is set when there is error happened during Executor building process. + hasLock bool + Ti *TelemetryInfo // isStaleness means whether this statement use stale read. isStaleness bool + txnScope string readReplicaScope string inUpdateStmt bool inDeleteStmt bool @@ -120,27 +119,16 @@ type CTEStorages struct { IterInTbl cteutil.Storage } -func newExecutorBuilder(ctx sessionctx.Context, is infoschema.InfoSchema, ti *TelemetryInfo, replicaReadScope string) *executorBuilder { - b := &executorBuilder{ +func newExecutorBuilder(ctx sessionctx.Context, is infoschema.InfoSchema, ti *TelemetryInfo) *executorBuilder { + txnManager := sessiontxn.GetTxnManager(ctx) + return &executorBuilder{ ctx: ctx, is: is, Ti: ti, isStaleness: staleread.IsStmtStaleness(ctx), - readReplicaScope: replicaReadScope, - } - - txnManager := sessiontxn.GetTxnManager(ctx) - if provider, ok := txnManager.GetContextProvider().(*legacy.SimpleTxnContextProvider); ok { - provider.GetReadTSFunc = b.getReadTS - provider.GetForUpdateTSFunc = func() (uint64, error) { - if b.forUpdateTS != 0 { - return b.forUpdateTS, nil - } - return b.getReadTS() - } + txnScope: txnManager.GetTxnScope(), + readReplicaScope: txnManager.GetReadReplicaScope(), } - - return b } // MockPhysicalPlan is used to return a specified executor in when build. @@ -157,9 +145,9 @@ type MockExecutorBuilder struct { } // NewMockExecutorBuilderForTest is ONLY used in test. -func NewMockExecutorBuilderForTest(ctx sessionctx.Context, is infoschema.InfoSchema, ti *TelemetryInfo, replicaReadScope string) *MockExecutorBuilder { +func NewMockExecutorBuilderForTest(ctx sessionctx.Context, is infoschema.InfoSchema, ti *TelemetryInfo) *MockExecutorBuilder { return &MockExecutorBuilder{ - executorBuilder: newExecutorBuilder(ctx, is, ti, replicaReadScope)} + executorBuilder: newExecutorBuilder(ctx, is, ti)} } // Build builds an executor tree according to `p`. @@ -363,13 +351,14 @@ func (b *executorBuilder) buildShowDDL(v *plannercore.ShowDDL) Executor { b.err = err return nil } - txn, err := e.ctx.Txn(true) + + session, err := e.getSysSession() if err != nil { b.err = err return nil } - - ddlInfo, err := ddl.GetDDLInfo(txn) + ddlInfo, err := ddl.GetDDLInfoWithNewTxn(session) + e.releaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), session) if err != nil { b.err = err return nil @@ -657,9 +646,7 @@ func (b *executorBuilder) buildSelectLock(v *plannercore.PhysicalLock) Executor defer func() { b.inSelectLockStmt = false }() } b.hasLock = true - - // Build 'select for update' using the 'for update' ts. - if b.forUpdateTS, b.err = b.getSnapshotTS(); b.err != nil { + if b.err = b.updateForUpdateTS(); b.err != nil { return nil } @@ -746,41 +733,9 @@ func (b *executorBuilder) buildExecute(v *plannercore.Execute) Executor { outputNames: v.OutputNames(), } - failpoint.Inject("assertStaleReadValuesSameWithExecuteAndBuilder", func() { - // This fail point is used to assert the behavior after refactoring is exactly the same with the previous implement. - // Some variables in `plannercore.Execute` is deprecated and only be used for asserting now. - if b.isStaleness != v.IsStaleness { - panic(fmt.Sprintf("%v != %v", b.isStaleness, v.IsStaleness)) - } - - if b.readReplicaScope != v.ReadReplicaScope { - panic(fmt.Sprintf("%s != %s", b.readReplicaScope, v.ReadReplicaScope)) - } - - if v.SnapshotTS != 0 { - is, err := domain.GetDomain(b.ctx).GetSnapshotInfoSchema(v.SnapshotTS) - if err != nil { - panic(err) - } - - if b.is.SchemaMetaVersion() != is.SchemaMetaVersion() { - panic(fmt.Sprintf("%d != %d", b.is.SchemaMetaVersion(), is.SchemaMetaVersion())) - } - - ts, err := sessiontxn.GetTxnManager(b.ctx).GetStmtReadTS() - if err != nil { - panic(e) - } - - if v.SnapshotTS != ts { - panic(fmt.Sprintf("%d != %d", ts, v.SnapshotTS)) - } - } - }) - failpoint.Inject("assertExecutePrepareStatementStalenessOption", func(val failpoint.Value) { vs := strings.Split(val.(string), "_") - assertTS, assertTxnScope := vs[0], vs[1] + assertTS, assertReadReplicaScope := vs[0], vs[1] staleread.AssertStmtStaleness(b.ctx, true) ts, err := sessiontxn.GetTxnManager(b.ctx).GetStmtReadTS() if err != nil { @@ -788,7 +743,7 @@ func (b *executorBuilder) buildExecute(v *plannercore.Execute) Executor { } if strconv.FormatUint(ts, 10) != assertTS || - assertTxnScope != b.readReplicaScope { + assertReadReplicaScope != b.readReplicaScope { panic("execute prepare statement have wrong staleness option") } }) @@ -865,8 +820,7 @@ func (b *executorBuilder) buildSetConfig(v *plannercore.SetConfig) Executor { func (b *executorBuilder) buildInsert(v *plannercore.Insert) Executor { b.inInsertStmt = true - - if b.forUpdateTS, b.err = b.getSnapshotTS(); b.err != nil { + if b.err = b.updateForUpdateTS(); b.err != nil { return nil } @@ -1029,6 +983,12 @@ func (b *executorBuilder) buildRevoke(revoke *ast.RevokeStmt) Executor { } func (b *executorBuilder) buildDDL(v *plannercore.DDL) Executor { + switch v.Statement.(type) { + case *ast.AlterTableStmt: + if len(v.Statement.(*ast.AlterTableStmt).Specs) > 1 && b.Ti != nil { + b.Ti.UseMultiSchemaChange = true + } + } e := &DDLExec{ baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), stmt: v.Statement, @@ -1581,42 +1541,37 @@ func (b *executorBuilder) getSnapshotTS() (uint64, error) { return txnManager.GetStmtReadTS() } -// getReadTS returns the ts used by select (without for-update clause). The return value is affected by the isolation level -// and some stale/historical read contexts. For example, it will return txn.StartTS in RR and return -// the current timestamp in RC isolation -func (b *executorBuilder) getReadTS() (uint64, error) { - failpoint.Inject("assertNotStaleReadForExecutorGetReadTS", func() { - // after refactoring stale read will use its own context provider - staleread.AssertStmtStaleness(b.ctx, false) - }) - - if b.snapshotTSCached { - return b.snapshotTS, nil - } - - if snapshotTS := b.ctx.GetSessionVars().SnapshotTS; snapshotTS != 0 { - b.snapshotTS = snapshotTS - b.snapshotTSCached = true - return snapshotTS, nil - } +// getSnapshot get the appropriate snapshot from txnManager and set +// the relevant snapshot options before return. +func (b *executorBuilder) getSnapshot() (kv.Snapshot, error) { + var snapshot kv.Snapshot + var err error - if b.snapshotTS != 0 { - b.snapshotTSCached = true - // Return the cached value. - return b.snapshotTS, nil + txnManager := sessiontxn.GetTxnManager(b.ctx) + if b.inInsertStmt || b.inUpdateStmt || b.inDeleteStmt || b.inSelectLockStmt { + snapshot, err = txnManager.GetSnapshotWithStmtForUpdateTS() + } else { + snapshot, err = txnManager.GetSnapshotWithStmtReadTS() } - - txn, err := b.ctx.Txn(true) if err != nil { - return 0, err + return nil, err } - b.snapshotTS = txn.StartTS() - if b.snapshotTS == 0 { - return 0, errors.Trace(ErrGetStartTS) + sessVars := b.ctx.GetSessionVars() + replicaReadType := sessVars.GetReplicaRead() + snapshot.SetOption(kv.ReadReplicaScope, b.readReplicaScope) + snapshot.SetOption(kv.TaskID, sessVars.StmtCtx.TaskID) + + if replicaReadType.IsClosestRead() && b.readReplicaScope != kv.GlobalTxnScope { + snapshot.SetOption(kv.MatchStoreLabels, []*metapb.StoreLabel{ + { + Key: placement.DCLabelKey, + Value: b.readReplicaScope, + }, + }) } - b.snapshotTSCached = true - return b.snapshotTS, nil + + return snapshot, nil } func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executor { @@ -2119,8 +2074,7 @@ func (b *executorBuilder) buildUpdate(v *plannercore.Update) Executor { } } } - - if b.forUpdateTS, b.err = b.getSnapshotTS(); b.err != nil { + if b.err = b.updateForUpdateTS(); b.err != nil { return nil } @@ -2178,7 +2132,7 @@ func (b *executorBuilder) buildDelete(v *plannercore.Delete) Executor { tblID2table[info.TblID], _ = b.is.TableByID(info.TblID) } - if b.forUpdateTS, b.err = b.getSnapshotTS(); b.err != nil { + if b.err = b.updateForUpdateTS(); b.err != nil { return nil } @@ -2197,6 +2151,12 @@ func (b *executorBuilder) buildDelete(v *plannercore.Delete) Executor { return deleteExec } +func (b *executorBuilder) updateForUpdateTS() error { + // GetStmtForUpdateTS will auto update the for update ts if it is necessary + _, err := sessiontxn.GetTxnManager(b.ctx).GetStmtForUpdateTS() + return err +} + func (b *executorBuilder) buildAnalyzeIndexPushdown(task plannercore.AnalyzeIndexTask, opts map[ast.AnalyzeOptionType]uint64, autoAnalyze string) *analyzeTask { job := &statistics.AnalyzeJob{DBName: task.DBName, TableName: task.TableName, PartitionName: task.PartitionName, JobInfo: autoAnalyze + "analyze index " + task.IndexInfo.Name.O} _, offset := timeutil.Zone(b.ctx.GetSessionVars().Location()) @@ -2493,7 +2453,8 @@ func (b *executorBuilder) getApproximateTableCountFromStorage(sctx sessionctx.Co if task.PartitionName != "" { sqlexec.MustFormatSQL(sql, " partition(%n)", task.PartitionName) } - rows, _, err := b.ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(context.TODO(), nil, sql.String()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + rows, _, err := b.ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(ctx, nil, sql.String()) if err != nil { return 0, false } @@ -3159,6 +3120,7 @@ func buildNoRangeTableReader(b *executorBuilder, v *plannercore.PhysicalTableRea baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPB: dagReq, startTS: startTS, + txnScope: b.txnScope, readReplicaScope: b.readReplicaScope, isStaleness: b.isStaleness, table: tbl, @@ -3290,7 +3252,9 @@ func (b *executorBuilder) buildTableReader(v *plannercore.PhysicalTableReader) E } // Sort the partition is necessary to make the final multiple partition key ranges ordered. - sort.Sort(partitionSlice(partitions)) + slices.SortFunc(partitions, func(i, j table.PhysicalTable) bool { + return i.GetPhysicalID() < j.GetPhysicalID() + }) ret.kvRangeBuilder = kvRangeBuilderFromRangeAndPartition{ sctx: b.ctx, partitions: partitions, @@ -3412,7 +3376,9 @@ func (builder *dataReaderBuilder) prunePartitionForInnerExecutor(tbl table.Table } // To make the final key ranges involving multiple partitions ordered. - sort.Sort(partitionSlice(usedPartition)) + slices.SortFunc(usedPartition, func(i, j table.PhysicalTable) bool { + return i.GetPhysicalID() < j.GetPhysicalID() + }) return usedPartition, true, contentPos, nil } @@ -3439,6 +3405,7 @@ func buildNoRangeIndexReader(b *executorBuilder, v *plannercore.PhysicalIndexRea baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPB: dagReq, startTS: startTS, + txnScope: b.txnScope, readReplicaScope: b.readReplicaScope, isStaleness: b.isStaleness, physicalTableID: physicalTableID, @@ -3989,8 +3956,8 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte } } // The key ranges should be ordered. - sort.Slice(kvRanges, func(i, j int) bool { - return bytes.Compare(kvRanges[i].StartKey, kvRanges[j].StartKey) < 0 + slices.SortFunc(kvRanges, func(i, j kv.KeyRange) bool { + return bytes.Compare(i.StartKey, j.StartKey) < 0 }) return builder.buildTableReaderFromKvRanges(ctx, e, kvRanges) } @@ -4024,8 +3991,8 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte } // The key ranges should be ordered. - sort.Slice(kvRanges, func(i, j int) bool { - return bytes.Compare(kvRanges[i].StartKey, kvRanges[j].StartKey) < 0 + slices.SortFunc(kvRanges, func(i, j kv.KeyRange) bool { + return bytes.Compare(i.StartKey, j.StartKey) < 0 }) return builder.buildTableReaderFromKvRanges(ctx, e, kvRanges) } @@ -4055,21 +4022,6 @@ type kvRangeBuilderFromRangeAndPartition struct { partitions []table.PhysicalTable } -// partitionSlice implement the sort interface. -type partitionSlice []table.PhysicalTable - -func (s partitionSlice) Len() int { - return len(s) -} - -func (s partitionSlice) Less(i, j int) bool { - return s[i].GetPhysicalID() < s[j].GetPhysicalID() -} - -func (s partitionSlice) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - func (h kvRangeBuilderFromRangeAndPartition) buildKeyRangeSeparately(ranges []*ranger.Range) ([]int64, [][]kv.KeyRange, error) { ret := make([][]kv.KeyRange, 0, len(h.partitions)) pids := make([]int64, 0, len(h.partitions)) @@ -4111,6 +4063,7 @@ func (builder *dataReaderBuilder) buildTableReaderBase(ctx context.Context, e *T SetStartTS(startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). @@ -4131,8 +4084,8 @@ func (builder *dataReaderBuilder) buildTableReaderBase(ctx context.Context, e *T func (builder *dataReaderBuilder) buildTableReaderFromHandles(ctx context.Context, e *TableReaderExecutor, handles []kv.Handle, canReorderHandles bool) (*TableReaderExecutor, error) { if canReorderHandles { - sort.Slice(handles, func(i, j int) bool { - return handles[i].Compare(handles[j]) < 0 + slices.SortFunc(handles, func(i, j kv.Handle) bool { + return i.Compare(j) < 0 }) } var b distsql.RequestBuilder @@ -4377,8 +4330,8 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l memTracker.Consume(2 * int64(len(tmpDatumRanges)) * types.EstimatedMemUsage(tmpDatumRanges[0].LowVal, len(tmpDatumRanges))) } if cwc == nil { - sort.Slice(kvRanges, func(i, j int) bool { - return bytes.Compare(kvRanges[i].StartKey, kvRanges[j].StartKey) < 0 + slices.SortFunc(kvRanges, func(i, j kv.KeyRange) bool { + return bytes.Compare(i.StartKey, j.StartKey) < 0 }) return kvRanges, nil } @@ -4635,7 +4588,8 @@ func NewRowDecoder(ctx sessionctx.Context, schema *expression.Schema, tbl *model } func (b *executorBuilder) buildBatchPointGet(plan *plannercore.BatchPointGetPlan) Executor { - if err := b.validCanReadTemporaryOrCacheTable(plan.TblInfo); err != nil { + var err error + if err = b.validCanReadTemporaryOrCacheTable(plan.TblInfo); err != nil { b.err = err return nil } @@ -4647,34 +4601,53 @@ func (b *executorBuilder) buildBatchPointGet(plan *plannercore.BatchPointGetPlan }() } - snapshotTS, err := b.getSnapshotTS() + decoder := NewRowDecoder(b.ctx, plan.Schema(), plan.TblInfo) + e := &BatchPointGetExec{ + baseExecutor: newBaseExecutor(b.ctx, plan.Schema(), plan.ID()), + tblInfo: plan.TblInfo, + idxInfo: plan.IndexInfo, + rowDecoder: decoder, + keepOrder: plan.KeepOrder, + desc: plan.Desc, + lock: plan.Lock, + waitTime: plan.LockWaitTime, + partExpr: plan.PartitionExpr, + partPos: plan.PartitionColPos, + singlePart: plan.SinglePart, + partTblID: plan.PartTblID, + columns: plan.Columns, + } + + e.snapshot, err = b.getSnapshot() if err != nil { b.err = err return nil } - - decoder := NewRowDecoder(b.ctx, plan.Schema(), plan.TblInfo) - e := &BatchPointGetExec{ - baseExecutor: newBaseExecutor(b.ctx, plan.Schema(), plan.ID()), - tblInfo: plan.TblInfo, - idxInfo: plan.IndexInfo, - rowDecoder: decoder, - snapshotTS: snapshotTS, - readReplicaScope: b.readReplicaScope, - isStaleness: b.isStaleness, - keepOrder: plan.KeepOrder, - desc: plan.Desc, - lock: plan.Lock, - waitTime: plan.LockWaitTime, - partExpr: plan.PartitionExpr, - partPos: plan.PartitionColPos, - singlePart: plan.SinglePart, - partTblID: plan.PartTblID, - columns: plan.Columns, + if e.runtimeStats != nil { + snapshotStats := &txnsnapshot.SnapshotRuntimeStats{} + e.stats = &runtimeStatsWithSnapshot{ + SnapshotRuntimeStats: snapshotStats, + } + e.snapshot.SetOption(kv.CollectRuntimeStats, snapshotStats) + b.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) } + failpoint.Inject("assertBatchPointReplicaOption", func(val failpoint.Value) { + assertScope := val.(string) + if e.ctx.GetSessionVars().GetReplicaRead().IsClosestRead() && assertScope != b.readReplicaScope { + panic("batch point get replica option fail") + } + }) + + snapshotTS, err := b.getSnapshotTS() + if err != nil { + b.err = err + return nil + } if plan.TblInfo.TableCacheStatusType == model.TableCacheStatusEnable { - e.cacheTable = b.getCacheTable(plan.TblInfo, snapshotTS) + if cacheTable := b.getCacheTable(plan.TblInfo, snapshotTS); cacheTable != nil { + e.snapshot = cacheTableSnapshot{e.snapshot, cacheTable} + } } if plan.TblInfo.TempTableType != model.TempTableNone { diff --git a/executor/compiler.go b/executor/compiler.go index eccc3b2418776..f4633c85f70c6 100644 --- a/executor/compiler.go +++ b/executor/compiler.go @@ -91,16 +91,15 @@ func (c *Compiler) Compile(ctx context.Context, stmtNode ast.StmtNode) (*ExecStm lowerPriority = needLowerPriority(finalPlan) } return &ExecStmt{ - GoCtx: ctx, - ReplicaReadScope: ret.ReadReplicaScope, - InfoSchema: is, - Plan: finalPlan, - LowerPriority: lowerPriority, - Text: stmtNode.Text(), - StmtNode: stmtNode, - Ctx: c.Ctx, - OutputNames: names, - Ti: &TelemetryInfo{}, + GoCtx: ctx, + InfoSchema: is, + Plan: finalPlan, + LowerPriority: lowerPriority, + Text: stmtNode.Text(), + StmtNode: stmtNode, + Ctx: c.Ctx, + OutputNames: names, + Ti: &TelemetryInfo{}, }, nil } diff --git a/executor/copr_cache_test.go b/executor/copr_cache_test.go index 78bef9bed05cd..ba99c1894d864 100644 --- a/executor/copr_cache_test.go +++ b/executor/copr_cache_test.go @@ -53,9 +53,6 @@ func TestIntegrationCopCache(t *testing.T) { tk.MustExec("use test") tk.MustExec("create table t (a int primary key)") - // TODO(tiancaiamao) update the test and support cop cache for paging. - tk.MustExec("set @@tidb_enable_paging = off") - tblInfo, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) require.NoError(t, err) tid := tblInfo.Meta().ID diff --git a/executor/coprocessor.go b/executor/coprocessor.go index 5df6528d72301..93a23dd6829ca 100644 --- a/executor/coprocessor.go +++ b/executor/coprocessor.go @@ -32,7 +32,6 @@ import ( "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/timeutil" "github.com/pingcap/tipb/go-tipb" - "github.com/tikv/client-go/v2/oracle" ) // CoprocessorDAGHandler uses to handle cop dag request. @@ -170,7 +169,7 @@ func (h *CoprocessorDAGHandler) buildDAGExecutor(req *coprocessor.Request) (Exec } plan = core.InjectExtraProjection(plan) // Build executor. - b := newExecutorBuilder(h.sctx, is, nil, oracle.GlobalTxnScope) + b := newExecutorBuilder(h.sctx, is, nil) return b.build(plan), nil } diff --git a/executor/cte_test.go b/executor/cte_test.go index 5f68f140fed5e..50db46e4bc17f 100644 --- a/executor/cte_test.go +++ b/executor/cte_test.go @@ -17,7 +17,6 @@ package executor_test import ( "fmt" "math/rand" - "sort" "testing" "github.com/pingcap/failpoint" @@ -26,6 +25,7 @@ import ( "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/types" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" ) func TestBasicCTE(t *testing.T) { @@ -403,7 +403,7 @@ func TestSpillToDisk(t *testing.T) { require.Greater(t, memTracker.MaxConsumed(), int64(0)) require.Greater(t, diskTracker.MaxConsumed(), int64(0)) - sort.Ints(vals) + slices.Sort(vals) resRows := make([]string, 0, rowNum) for i := vals[0]; i <= rowNum; i++ { resRows = append(resRows, fmt.Sprintf("%d", i)) diff --git a/executor/ddl.go b/executor/ddl.go index 1553be2299fd7..2887033b78f23 100644 --- a/executor/ddl.go +++ b/executor/ddl.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -37,7 +38,6 @@ import ( "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/gcutil" "github.com/pingcap/tidb/util/logutil" - "github.com/pingcap/tidb/util/sqlexec" "go.uber.org/zap" ) @@ -89,6 +89,7 @@ func (e *DDLExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { } e.done = true + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnDDL) // For each DDL, we should commit the previous transaction and create a new transaction. // Following cases are exceptions var localTempTablesToDrop []*ast.TableName @@ -133,7 +134,10 @@ func (e *DDLExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { return err } - defer func() { e.ctx.GetSessionVars().StmtCtx.IsDDLJobInQueue = false }() + defer func() { + e.ctx.GetSessionVars().StmtCtx.IsDDLJobInQueue = false + e.ctx.GetSessionVars().StmtCtx.DDLJobID = 0 + }() switch x := e.stmt.(type) { case *ast.AlterDatabaseStmt: @@ -220,30 +224,12 @@ func (e *DDLExec) executeTruncateTable(s *ast.TruncateTableStmt) error { } func (e *DDLExec) executeRenameTable(s *ast.RenameTableStmt) error { - isAlterTable := false - var err error - if len(s.TableToTables) == 1 { - oldIdent := ast.Ident{Schema: s.TableToTables[0].OldTable.Schema, Name: s.TableToTables[0].OldTable.Name} - if _, ok := e.getLocalTemporaryTable(oldIdent.Schema, oldIdent.Name); ok { + for _, tables := range s.TableToTables { + if _, ok := e.getLocalTemporaryTable(tables.OldTable.Schema, tables.OldTable.Name); ok { return dbterror.ErrUnsupportedLocalTempTableDDL.GenWithStackByArgs("RENAME TABLE") } - newIdent := ast.Ident{Schema: s.TableToTables[0].NewTable.Schema, Name: s.TableToTables[0].NewTable.Name} - err = domain.GetDomain(e.ctx).DDL().RenameTable(e.ctx, oldIdent, newIdent, isAlterTable) - } else { - oldIdents := make([]ast.Ident, 0, len(s.TableToTables)) - newIdents := make([]ast.Ident, 0, len(s.TableToTables)) - for _, tables := range s.TableToTables { - oldIdent := ast.Ident{Schema: tables.OldTable.Schema, Name: tables.OldTable.Name} - if _, ok := e.getLocalTemporaryTable(oldIdent.Schema, oldIdent.Name); ok { - return dbterror.ErrUnsupportedLocalTempTableDDL.GenWithStackByArgs("RENAME TABLE") - } - newIdent := ast.Ident{Schema: tables.NewTable.Schema, Name: tables.NewTable.Name} - oldIdents = append(oldIdents, oldIdent) - newIdents = append(newIdents, newIdent) - } - err = domain.GetDomain(e.ctx).DDL().RenameTables(e.ctx, oldIdents, newIdents, isAlterTable) } - return err + return domain.GetDomain(e.ctx).DDL().RenameTable(e.ctx, s) } func (e *DDLExec) executeCreateDatabase(s *ast.CreateDatabaseStmt) error { @@ -300,14 +286,11 @@ func (e *DDLExec) executeCreateView(s *ast.CreateViewStmt) error { } func (e *DDLExec) executeCreateIndex(s *ast.CreateIndexStmt) error { - ident := ast.Ident{Schema: s.Table.Schema, Name: s.Table.Name} - if _, ok := e.getLocalTemporaryTable(ident.Schema, ident.Name); ok { + if _, ok := e.getLocalTemporaryTable(s.Table.Schema, s.Table.Name); ok { return dbterror.ErrUnsupportedLocalTempTableDDL.GenWithStackByArgs("CREATE INDEX") } - err := domain.GetDomain(e.ctx).DDL().CreateIndex(e.ctx, ident, s.KeyType, model.NewCIStr(s.IndexName), - s.IndexPartSpecifications, s.IndexOption, s.IfNotExists) - return err + return domain.GetDomain(e.ctx).DDL().CreateIndex(e.ctx, s) } func (e *DDLExec) executeDropDatabase(s *ast.DropDatabaseStmt) error { @@ -335,117 +318,16 @@ func (e *DDLExec) executeDropDatabase(s *ast.DropDatabaseStmt) error { return err } -// If one drop those tables by mistake, it's difficult to recover. -// In the worst case, the whole TiDB cluster fails to bootstrap, so we prevent user from dropping them. -var systemTables = map[string]struct{}{ - "tidb": {}, - "gc_delete_range": {}, - "gc_delete_range_done": {}, -} - -func isSystemTable(schema, table string) bool { - if schema != "mysql" { - return false - } - if _, ok := systemTables[table]; ok { - return true - } - return false -} - -type objectType int - -const ( - tableObject objectType = iota - viewObject - sequenceObject -) - func (e *DDLExec) executeDropTable(s *ast.DropTableStmt) error { - return e.dropTableObject(s.Tables, tableObject, s.IfExists) + return domain.GetDomain(e.ctx).DDL().DropTable(e.ctx, s) } func (e *DDLExec) executeDropView(s *ast.DropTableStmt) error { - return e.dropTableObject(s.Tables, viewObject, s.IfExists) + return domain.GetDomain(e.ctx).DDL().DropView(e.ctx, s) } func (e *DDLExec) executeDropSequence(s *ast.DropSequenceStmt) error { - return e.dropTableObject(s.Sequences, sequenceObject, s.IfExists) -} - -// dropTableObject actually applies to `tableObject`, `viewObject` and `sequenceObject`. -func (e *DDLExec) dropTableObject(objects []*ast.TableName, obt objectType, ifExists bool) error { - var notExistTables []string - sessVars := e.ctx.GetSessionVars() - for _, tn := range objects { - fullti := ast.Ident{Schema: tn.Schema, Name: tn.Name} - _, ok := e.is.SchemaByName(tn.Schema) - if !ok { - // TODO: we should return special error for table not exist, checking "not exist" is not enough, - // because some other errors may contain this error string too. - notExistTables = append(notExistTables, fullti.String()) - continue - } - _, err := e.is.TableByName(tn.Schema, tn.Name) - if err != nil && infoschema.ErrTableNotExists.Equal(err) { - notExistTables = append(notExistTables, fullti.String()) - continue - } else if err != nil { - return err - } - - // Protect important system table from been dropped by a mistake. - // I can hardly find a case that a user really need to do this. - if isSystemTable(tn.Schema.L, tn.Name.L) { - return errors.Errorf("Drop tidb system table '%s.%s' is forbidden", tn.Schema.L, tn.Name.L) - } - tableInfo, err := e.is.TableByName(tn.Schema, tn.Name) - if err != nil { - return err - } - tempTableType := tableInfo.Meta().TempTableType - if obt == tableObject && config.CheckTableBeforeDrop && tempTableType == model.TempTableNone { - logutil.BgLogger().Warn("admin check table before drop", - zap.String("database", fullti.Schema.O), - zap.String("table", fullti.Name.O), - ) - exec := e.ctx.(sqlexec.RestrictedSQLExecutor) - _, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, "admin check table %n.%n", fullti.Schema.O, fullti.Name.O) - if err != nil { - return err - } - } - switch obt { - case tableObject: - err = domain.GetDomain(e.ctx).DDL().DropTable(e.ctx, fullti) - case viewObject: - err = domain.GetDomain(e.ctx).DDL().DropView(e.ctx, fullti) - case sequenceObject: - err = domain.GetDomain(e.ctx).DDL().DropSequence(e.ctx, fullti, ifExists) - } - if infoschema.ErrDatabaseNotExists.Equal(err) || infoschema.ErrTableNotExists.Equal(err) { - notExistTables = append(notExistTables, fullti.String()) - } else if err != nil { - return err - } - } - if len(notExistTables) > 0 && !ifExists { - if obt == sequenceObject { - return infoschema.ErrSequenceDropExists.GenWithStackByArgs(strings.Join(notExistTables, ",")) - } - return infoschema.ErrTableDropExists.GenWithStackByArgs(strings.Join(notExistTables, ",")) - } - // We need add warning when use if exists. - if len(notExistTables) > 0 && ifExists { - for _, table := range notExistTables { - if obt == sequenceObject { - sessVars.StmtCtx.AppendNote(infoschema.ErrSequenceDropExists.GenWithStackByArgs(table)) - } else { - sessVars.StmtCtx.AppendNote(infoschema.ErrTableDropExists.GenWithStackByArgs(table)) - } - } - } - return nil + return domain.GetDomain(e.ctx).DDL().DropSequence(e.ctx, s) } func (e *DDLExec) dropLocalTemporaryTables(localTempTables []*ast.TableName) error { @@ -464,26 +346,19 @@ func (e *DDLExec) dropLocalTemporaryTables(localTempTables []*ast.TableName) err } func (e *DDLExec) executeDropIndex(s *ast.DropIndexStmt) error { - ti := ast.Ident{Schema: s.Table.Schema, Name: s.Table.Name} - if _, ok := e.getLocalTemporaryTable(ti.Schema, ti.Name); ok { + if _, ok := e.getLocalTemporaryTable(s.Table.Schema, s.Table.Name); ok { return dbterror.ErrUnsupportedLocalTempTableDDL.GenWithStackByArgs("DROP INDEX") } - err := domain.GetDomain(e.ctx).DDL().DropIndex(e.ctx, ti, model.NewCIStr(s.IndexName), s.IfExists) - if (infoschema.ErrDatabaseNotExists.Equal(err) || infoschema.ErrTableNotExists.Equal(err)) && s.IfExists { - err = nil - } - return err + return domain.GetDomain(e.ctx).DDL().DropIndex(e.ctx, s) } func (e *DDLExec) executeAlterTable(ctx context.Context, s *ast.AlterTableStmt) error { - ti := ast.Ident{Schema: s.Table.Schema, Name: s.Table.Name} - if _, ok := e.getLocalTemporaryTable(ti.Schema, ti.Name); ok { + if _, ok := e.getLocalTemporaryTable(s.Table.Schema, s.Table.Name); ok { return dbterror.ErrUnsupportedLocalTempTableDDL.GenWithStackByArgs("ALTER TABLE") } - err := domain.GetDomain(e.ctx).DDL().AlterTable(ctx, e.ctx, ti, s.Specs) - return err + return domain.GetDomain(e.ctx).DDL().AlterTable(ctx, e.ctx, s) } // executeRecoverTable represents a recover table executor. @@ -536,7 +411,8 @@ func (e *DDLExec) getRecoverTableByJobID(s *ast.RecoverTableStmt, dom *domain.Do if err != nil { return nil, nil, err } - defer e.releaseSysSession(se) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + defer e.releaseSysSession(ctx, se) job, err := ddl.GetHistoryJobByID(se, s.JobID) if err != nil { return nil, nil, err diff --git a/executor/ddl_test.go b/executor/ddl_test.go index bf9da41257e04..5abca41820d1c 100644 --- a/executor/ddl_test.go +++ b/executor/ddl_test.go @@ -880,7 +880,8 @@ func TestShardRowIDBits(t *testing.T) { tblInfo.ShardRowIDBits = 5 tblInfo.MaxShardRowIDBits = 5 - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) _, err = m.GenSchemaVersion() require.NoError(t, err) diff --git a/executor/distsql.go b/executor/distsql.go index d20e11ae65df4..c843bfa9ca586 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -171,6 +171,7 @@ type IndexReaderExecutor struct { kvRanges []kv.KeyRange dagPB *tipb.DAGRequest startTS uint64 + txnScope string readReplicaScope string isStaleness bool // result returns one or more distsql.PartialResult and each PartialResult is returned by one region. @@ -308,6 +309,7 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) SetStartTS(e.startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). @@ -582,6 +584,7 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, workCh chan< SetDesc(e.desc). SetKeepOrder(e.keepOrder). SetPaging(e.indexPaging). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). @@ -680,6 +683,7 @@ func (e *IndexLookUpExecutor) buildTableReader(ctx context.Context, task *lookup table: table, dagPB: e.tableRequest, startTS: e.startTS, + txnScope: e.txnScope, readReplicaScope: e.readReplicaScope, isStaleness: e.isStaleness, columns: e.columns, diff --git a/executor/executor.go b/executor/executor.go index 9bb2cd5789d65..7d2839cb29e34 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -68,7 +68,6 @@ import ( topsqlstate "github.com/pingcap/tidb/util/topsql/state" tikverr "github.com/tikv/client-go/v2/error" tikvstore "github.com/tikv/client-go/v2/kv" - "github.com/tikv/client-go/v2/oracle" tikvutil "github.com/tikv/client-go/v2/util" atomicutil "go.uber.org/atomic" "go.uber.org/zap" @@ -341,7 +340,8 @@ type CancelDDLJobsExec struct { // Open implements the Executor Open interface. func (e *CancelDDLJobsExec) Open(ctx context.Context) error { // We want to use a global transaction to execute the admin command, so we don't use e.ctx here. - errInTxn := kv.RunInNewTxn(context.Background(), e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) (err error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnDDL) + errInTxn := kv.RunInNewTxn(ctx, e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) (err error) { e.errs, err = ddl.CancelJobs(txn, e.jobIDs) return }) @@ -597,11 +597,12 @@ func (e *ShowDDLJobQueriesExec) Open(ctx context.Context) error { if err != nil { return err } - jobs, err := ddl.GetAllDDLJobs(meta.NewMeta(txn)) + m := meta.NewMeta(txn) + jobs, err := ddl.GetAllDDLJobs(m) if err != nil { return err } - historyJobs, err := ddl.GetHistoryDDLJobs(txn, ddl.DefNumHistoryJobs) + historyJobs, err := ddl.GetLastNHistoryDDLJobs(m, ddl.DefNumHistoryJobs) if err != nil { return err } @@ -1298,7 +1299,7 @@ func init() { ctx = opentracing.ContextWithSpan(ctx, span1) } - e := newExecutorBuilder(sctx, is, nil, oracle.GlobalTxnScope) + e := newExecutorBuilder(sctx, is, nil) exec := e.build(p) if e.err != nil { return nil, e.err @@ -1922,11 +1923,6 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { sc.NotFillCache = !opts.SQLCache } sc.WeakConsistency = isWeakConsistencyRead(ctx, stmt) - // Try to mark the `RCCheckTS` flag for the first time execution of in-transaction read requests - // using read-consistency isolation level. - if NeedSetRCCheckTSFlag(ctx, stmt) { - sc.RCCheckTS = true - } case *ast.SetOprStmt: sc.InSelectStmt = true sc.OverflowAsWarning = true @@ -2065,13 +2061,3 @@ func isWeakConsistencyRead(ctx sessionctx.Context, node ast.Node) bool { return sessionVars.ConnectionID > 0 && sessionVars.ReadConsistency.IsWeak() && plannercore.IsAutoCommitTxn(ctx) && plannercore.IsReadOnly(node, sessionVars) } - -// NeedSetRCCheckTSFlag checks whether it's needed to set `RCCheckTS` flag in current stmtctx. -func NeedSetRCCheckTSFlag(ctx sessionctx.Context, node ast.Node) bool { - sessionVars := ctx.GetSessionVars() - if sessionVars.ConnectionID > 0 && sessionVars.RcReadCheckTS && sessionVars.InTxn() && - sessionVars.IsPessimisticReadConsistency() && !sessionVars.RetryInfo.Retrying && plannercore.IsReadOnly(node, sessionVars) { - return true - } - return false -} diff --git a/executor/executor_required_rows_test.go b/executor/executor_required_rows_test.go index 8ee1224a8fe6a..d378185361f00 100644 --- a/executor/executor_required_rows_test.go +++ b/executor/executor_required_rows_test.go @@ -38,7 +38,6 @@ import ( "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mock" "github.com/stretchr/testify/require" - "github.com/tikv/client-go/v2/oracle" ) type requiredRowsDataSource struct { @@ -846,7 +845,7 @@ func buildMergeJoinExec(ctx sessionctx.Context, joinType plannercore.JoinType, i j.CompareFuncs = append(j.CompareFuncs, expression.GetCmpFunction(nil, j.LeftJoinKeys[i], j.RightJoinKeys[i])) } - b := newExecutorBuilder(ctx, nil, nil, oracle.GlobalTxnScope) + b := newExecutorBuilder(ctx, nil, nil) return b.build(j) } diff --git a/executor/executor_test.go b/executor/executor_test.go index c8e4304f1c22c..3d68a859c6dff 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -3470,10 +3470,10 @@ func TestUnreasonablyClose(t *testing.T) { err = sessiontxn.NewTxn(context.Background(), tk.Session()) require.NoError(t, err, comment) - err = sessiontxn.GetTxnManager(tk.Session()).OnStmtStart(context.TODO()) + err = sessiontxn.GetTxnManager(tk.Session()).OnStmtStart(context.TODO(), stmt) require.NoError(t, err, comment) - executorBuilder := executor.NewMockExecutorBuilderForTest(tk.Session(), is, nil, oracle.GlobalTxnScope) + executorBuilder := executor.NewMockExecutorBuilderForTest(tk.Session(), is, nil) p, _, _ := planner.Optimize(context.TODO(), tk.Session(), stmt, is) require.NotNil(t, p) @@ -5417,7 +5417,8 @@ func TestHistoryReadInTxn(t *testing.T) { // After `ExecRestrictedSQL` with a specified snapshot and use current session, the original snapshot ts should not be reset // See issue: https://github.com/pingcap/tidb/issues/34529 exec := tk.Session().(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), []sqlexec.OptionFuncAlias{sqlexec.ExecOptionWithSnapshot(ts2), sqlexec.ExecOptionUseCurSession}, "select * from his_t0 where id=1") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) + rows, _, err := exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionWithSnapshot(ts2), sqlexec.ExecOptionUseCurSession}, "select * from his_t0 where id=1") require.NoError(t, err) require.Equal(t, 1, len(rows)) require.Equal(t, int64(1), rows[0].GetInt64(0)) @@ -5622,9 +5623,10 @@ func TestAdmin(t *testing.T) { require.NoError(t, err) row = req.GetRow(0) require.Equal(t, 6, row.Len()) - txn, err := store.Begin() - require.NoError(t, err) - ddlInfo, err := ddl.GetDDLInfo(txn) + tk = testkit.NewTestKit(t, store) + tk.MustExec("begin") + sess := tk.Session() + ddlInfo, err := ddl.GetDDLInfo(sess) require.NoError(t, err) require.Equal(t, ddlInfo.SchemaVer, row.GetInt64(0)) // TODO: Pass this test. @@ -5639,8 +5641,7 @@ func TestAdmin(t *testing.T) { err = r.Next(ctx, req) require.NoError(t, err) require.Zero(t, req.NumRows()) - err = txn.Rollback() - require.NoError(t, err) + tk.MustExec("rollback") // show DDL jobs test r, err = tk.Exec("admin show ddl jobs") @@ -5650,9 +5651,9 @@ func TestAdmin(t *testing.T) { require.NoError(t, err) row = req.GetRow(0) require.Equal(t, 12, row.Len()) - txn, err = store.Begin() + txn, err := store.Begin() require.NoError(t, err) - historyJobs, err := ddl.GetHistoryDDLJobs(txn, ddl.DefNumHistoryJobs) + historyJobs, err := ddl.GetLastNHistoryDDLJobs(meta.NewMeta(txn), ddl.DefNumHistoryJobs) require.Greater(t, len(historyJobs), 1) require.Greater(t, len(row.GetString(1)), 0) require.NoError(t, err) @@ -5677,7 +5678,7 @@ func TestAdmin(t *testing.T) { result.Check(testkit.Rows()) result = tk.MustQuery(`admin show ddl job queries 1, 2, 3, 4`) result.Check(testkit.Rows()) - historyJobs, err = ddl.GetHistoryDDLJobs(txn, ddl.DefNumHistoryJobs) + historyJobs, err = ddl.GetLastNHistoryDDLJobs(meta.NewMeta(txn), ddl.DefNumHistoryJobs) result = tk.MustQuery(fmt.Sprintf("admin show ddl job queries %d", historyJobs[0].ID)) result.Check(testkit.Rows(historyJobs[0].Query)) require.NoError(t, err) @@ -5741,7 +5742,7 @@ func TestAdmin(t *testing.T) { // Test for reverse scan get history ddl jobs when ddl history jobs queue has multiple regions. txn, err = store.Begin() require.NoError(t, err) - historyJobs, err = ddl.GetHistoryDDLJobs(txn, 20) + historyJobs, err = ddl.GetLastNHistoryDDLJobs(meta.NewMeta(txn), 20) require.NoError(t, err) // Split region for history ddl job queues. @@ -5750,7 +5751,7 @@ func TestAdmin(t *testing.T) { endKey := meta.DDLJobHistoryKey(m, historyJobs[0].ID) cluster.SplitKeys(startKey, endKey, int(historyJobs[0].ID/5)) - historyJobs2, err := ddl.GetHistoryDDLJobs(txn, 20) + historyJobs2, err := ddl.GetLastNHistoryDDLJobs(meta.NewMeta(txn), 20) require.NoError(t, err) require.Equal(t, historyJobs2, historyJobs) } diff --git a/executor/grant.go b/executor/grant.go index 99db32abe79d1..b0f29e586fb7a 100644 --- a/executor/grant.go +++ b/executor/grant.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -67,6 +68,7 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { return nil } e.done = true + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) dbName := e.Level.DBName if len(dbName) == 0 { @@ -132,15 +134,15 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { } defer func() { if !isCommit { - _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "rollback") + _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "rollback") if err != nil { logutil.BgLogger().Error("rollback error occur at grant privilege", zap.Error(err)) } } - e.releaseSysSession(internalSession) + e.releaseSysSession(internalCtx, internalSession) }() - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "begin") + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "begin") if err != nil { return err } @@ -166,7 +168,7 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { if user.AuthOpt != nil && user.AuthOpt.AuthPlugin != "" { authPlugin = user.AuthOpt.AuthPlugin } - _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, + _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, `INSERT INTO %n.%n (Host, User, authentication_string, plugin) VALUES (%?, %?, %?, %?);`, mysql.SystemDB, mysql.UserTable, user.User.Hostname, user.User.Username, pwd, authPlugin) if err != nil { @@ -234,7 +236,7 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { } } - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "commit") + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "commit") if err != nil { return err } @@ -322,31 +324,36 @@ func (e *GrantExec) checkAndInitColumnPriv(user string, host string, cols []*ast } // initGlobalPrivEntry inserts a new row into mysql.DB with empty privilege. -func initGlobalPrivEntry(ctx sessionctx.Context, user string, host string) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `INSERT INTO %n.%n (Host, User, PRIV) VALUES (%?, %?, %?)`, mysql.SystemDB, mysql.GlobalPrivTable, host, user, "{}") +func initGlobalPrivEntry(sctx sessionctx.Context, user string, host string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `INSERT INTO %n.%n (Host, User, PRIV) VALUES (%?, %?, %?)`, mysql.SystemDB, mysql.GlobalPrivTable, host, user, "{}") return err } // initDBPrivEntry inserts a new row into mysql.DB with empty privilege. -func initDBPrivEntry(ctx sessionctx.Context, user string, host string, db string) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `INSERT INTO %n.%n (Host, User, DB) VALUES (%?, %?, %?)`, mysql.SystemDB, mysql.DBTable, host, user, db) +func initDBPrivEntry(sctx sessionctx.Context, user string, host string, db string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `INSERT INTO %n.%n (Host, User, DB) VALUES (%?, %?, %?)`, mysql.SystemDB, mysql.DBTable, host, user, db) return err } // initTablePrivEntry inserts a new row into mysql.Tables_priv with empty privilege. -func initTablePrivEntry(ctx sessionctx.Context, user string, host string, db string, tbl string) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `INSERT INTO %n.%n (Host, User, DB, Table_name, Table_priv, Column_priv) VALUES (%?, %?, %?, %?, '', '')`, mysql.SystemDB, mysql.TablePrivTable, host, user, db, tbl) +func initTablePrivEntry(sctx sessionctx.Context, user string, host string, db string, tbl string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `INSERT INTO %n.%n (Host, User, DB, Table_name, Table_priv, Column_priv) VALUES (%?, %?, %?, %?, '', '')`, mysql.SystemDB, mysql.TablePrivTable, host, user, db, tbl) return err } // initColumnPrivEntry inserts a new row into mysql.Columns_priv with empty privilege. -func initColumnPrivEntry(ctx sessionctx.Context, user string, host string, db string, tbl string, col string) error { - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `INSERT INTO %n.%n (Host, User, DB, Table_name, Column_name, Column_priv) VALUES (%?, %?, %?, %?, %?, '')`, mysql.SystemDB, mysql.ColumnPrivTable, host, user, db, tbl, col) +func initColumnPrivEntry(sctx sessionctx.Context, user string, host string, db string, tbl string, col string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `INSERT INTO %n.%n (Host, User, DB, Table_name, Column_name, Column_priv) VALUES (%?, %?, %?, %?, %?, '')`, mysql.SystemDB, mysql.ColumnPrivTable, host, user, db, tbl, col) return err } // grantGlobalPriv grants priv to user in global scope. -func (e *GrantExec) grantGlobalPriv(ctx sessionctx.Context, user *ast.UserSpec) error { +func (e *GrantExec) grantGlobalPriv(sctx sessionctx.Context, user *ast.UserSpec) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) if len(e.TLSOptions) == 0 { return nil } @@ -354,7 +361,7 @@ func (e *GrantExec) grantGlobalPriv(ctx sessionctx.Context, user *ast.UserSpec) if err != nil { return errors.Trace(err) } - _, err = ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `UPDATE %n.%n SET PRIV=%? WHERE User=%? AND Host=%?`, mysql.SystemDB, mysql.GlobalPrivTable, priv, user.User.Username, user.User.Hostname) + _, err = sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `UPDATE %n.%n SET PRIV=%? WHERE User=%? AND Host=%?`, mysql.SystemDB, mysql.GlobalPrivTable, priv, user.User.Username, user.User.Hostname) return err } @@ -473,7 +480,8 @@ func (e *GrantExec) grantDynamicPriv(privName string, user *ast.UserSpec, intern if e.WithGrant { grantOption = "Y" } - _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `REPLACE INTO %n.global_grants (user,host,priv,with_grant_option) VALUES (%?, %?, %?, %?)`, mysql.SystemDB, user.User.Username, user.User.Hostname, privName, grantOption) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `REPLACE INTO %n.global_grants (user,host,priv,with_grant_option) VALUES (%?, %?, %?, %?)`, mysql.SystemDB, user.User.Username, user.User.Hostname, privName, grantOption) return err } @@ -491,7 +499,8 @@ func (e *GrantExec) grantGlobalLevel(priv *ast.PrivElem, user *ast.UserSpec, int } sqlexec.MustFormatSQL(sql, ` WHERE User=%? AND Host=%?`, user.User.Username, user.User.Hostname) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err } @@ -519,7 +528,8 @@ func (e *GrantExec) grantDBLevel(priv *ast.PrivElem, user *ast.UserSpec, interna } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%?", user.User.Username, user.User.Hostname, dbName) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err } @@ -542,7 +552,8 @@ func (e *GrantExec) grantTableLevel(priv *ast.PrivElem, user *ast.UserSpec, inte } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?", user.User.Username, user.User.Hostname, dbName, tblName) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err } @@ -567,7 +578,8 @@ func (e *GrantExec) grantColumnLevel(priv *ast.PrivElem, user *ast.UserSpec, int } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?", user.User.Username, user.User.Hostname, dbName, tbl.Meta().Name.O, col.Name.O) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) if err != nil { return err } @@ -663,12 +675,13 @@ func composeColumnPrivUpdateForGrant(ctx sessionctx.Context, sql *strings.Builde } // recordExists is a helper function to check if the sql returns any row. -func recordExists(ctx sessionctx.Context, sql string, args ...interface{}) (bool, error) { - rs, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql, args...) +func recordExists(sctx sessionctx.Context, sql string, args ...interface{}) (bool, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql, args...) if err != nil { return false, err } - rows, _, err := getRowsAndFields(ctx, rs) + rows, _, err := getRowsAndFields(sctx, rs) if err != nil { return false, err } @@ -697,13 +710,14 @@ func columnPrivEntryExists(ctx sessionctx.Context, name string, host string, db // getTablePriv gets current table scope privilege set from mysql.Tables_priv. // Return Table_priv and Column_priv. -func getTablePriv(ctx sessionctx.Context, name string, host string, db string, tbl string) (string, string, error) { - rs, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `SELECT Table_priv, Column_priv FROM %n.%n WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?`, mysql.SystemDB, mysql.TablePrivTable, name, host, db, tbl) +func getTablePriv(sctx sessionctx.Context, name string, host string, db string, tbl string) (string, string, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `SELECT Table_priv, Column_priv FROM %n.%n WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?`, mysql.SystemDB, mysql.TablePrivTable, name, host, db, tbl) if err != nil { return "", "", err } var tPriv, cPriv string - rows, fields, err := getRowsAndFields(ctx, rs) + rows, fields, err := getRowsAndFields(sctx, rs) if err != nil { return "", "", errors.Errorf("get table privilege fail for %s %s %s %s: %v", name, host, db, tbl, err) } @@ -724,12 +738,13 @@ func getTablePriv(ctx sessionctx.Context, name string, host string, db string, t // getColumnPriv gets current column scope privilege set from mysql.Columns_priv. // Return Column_priv. -func getColumnPriv(ctx sessionctx.Context, name string, host string, db string, tbl string, col string) (string, error) { - rs, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), `SELECT Column_priv FROM %n.%n WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?;`, mysql.SystemDB, mysql.ColumnPrivTable, name, host, db, tbl, col) +func getColumnPriv(sctx sessionctx.Context, name string, host string, db string, tbl string, col string) (string, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `SELECT Column_priv FROM %n.%n WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?;`, mysql.SystemDB, mysql.ColumnPrivTable, name, host, db, tbl, col) if err != nil { return "", err } - rows, fields, err := getRowsAndFields(ctx, rs) + rows, fields, err := getRowsAndFields(sctx, rs) if err != nil { return "", errors.Errorf("get column privilege fail for %s %s %s %s: %s", name, host, db, tbl, err) } @@ -764,11 +779,12 @@ func getTargetSchemaAndTable(ctx sessionctx.Context, dbName, tableName string, i } // getRowsAndFields is used to extract rows from record sets. -func getRowsAndFields(ctx sessionctx.Context, rs sqlexec.RecordSet) ([]chunk.Row, []*ast.ResultField, error) { +func getRowsAndFields(sctx sessionctx.Context, rs sqlexec.RecordSet) ([]chunk.Row, []*ast.ResultField, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) if rs == nil { return nil, nil, errors.Errorf("nil recordset") } - rows, err := getRowFromRecordSet(context.Background(), ctx, rs) + rows, err := getRowFromRecordSet(ctx, sctx, rs) if err != nil { return nil, nil, err } diff --git a/executor/index_lookup_join.go b/executor/index_lookup_join.go index b33fccf970af9..87ab4514e52f5 100644 --- a/executor/index_lookup_join.go +++ b/executor/index_lookup_join.go @@ -18,7 +18,6 @@ import ( "bytes" "context" "runtime/trace" - "sort" "strconv" "sync" "sync/atomic" @@ -43,6 +42,7 @@ import ( "github.com/pingcap/tidb/util/mvmap" "github.com/pingcap/tidb/util/ranger" "go.uber.org/zap" + "golang.org/x/exp/slices" ) var _ Executor = &IndexLookUpJoin{} @@ -644,12 +644,12 @@ func (iw *innerWorker) sortAndDedupLookUpContents(lookUpContents []*indexJoinLoo return lookUpContents } sc := iw.ctx.GetSessionVars().StmtCtx - sort.Slice(lookUpContents, func(i, j int) bool { - cmp := compareRow(sc, lookUpContents[i].keys, lookUpContents[j].keys, iw.keyCollators) + slices.SortFunc(lookUpContents, func(i, j *indexJoinLookUpContent) bool { + cmp := compareRow(sc, i.keys, j.keys, iw.keyCollators) if cmp != 0 || iw.nextColCompareFilters == nil { return cmp < 0 } - return iw.nextColCompareFilters.CompareRow(lookUpContents[i].row, lookUpContents[j].row) < 0 + return iw.nextColCompareFilters.CompareRow(i.row, j.row) < 0 }) deDupedLookupKeys := lookUpContents[:1] for i := 1; i < len(lookUpContents); i++ { diff --git a/executor/index_lookup_merge_join.go b/executor/index_lookup_merge_join.go index d0ebd5fae5eb1..25cec9a52ee95 100644 --- a/executor/index_lookup_merge_join.go +++ b/executor/index_lookup_merge_join.go @@ -18,7 +18,6 @@ import ( "context" "fmt" "runtime/trace" - "sort" "sync" "sync/atomic" @@ -37,6 +36,7 @@ import ( "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/ranger" "go.uber.org/zap" + "golang.org/x/exp/slices" ) // IndexLookUpMergeJoin realizes IndexLookUpJoin by merge join @@ -449,8 +449,7 @@ func (imw *innerMergeWorker) handleTask(ctx context.Context, task *lookUpMergeJo // Because the necessary condition of merge join is both outer and inner keep order of join keys. // In this case, we need sort the outer side. if imw.outerMergeCtx.needOuterSort { - sort.Slice(task.outerOrderIdx, func(i, j int) bool { - idxI, idxJ := task.outerOrderIdx[i], task.outerOrderIdx[j] + slices.SortFunc(task.outerOrderIdx, func(idxI, idxJ chunk.RowPtr) bool { rowI, rowJ := task.outerResult.GetRow(idxI), task.outerResult.GetRow(idxJ) var cmp int64 var err error diff --git a/executor/index_merge_reader.go b/executor/index_merge_reader.go index e3b8e6391f2f5..3ae11e92f484b 100644 --- a/executor/index_merge_reader.go +++ b/executor/index_merge_reader.go @@ -304,6 +304,7 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, SetStartTS(e.startTS). SetDesc(e.descs[workID]). SetKeepOrder(false). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). @@ -383,6 +384,7 @@ func (e *IndexMergeReaderExecutor) startPartialTableWorker(ctx context.Context, baseExecutor: newBaseExecutor(e.ctx, ts.Schema(), e.getPartitalPlanID(workID)), dagPB: e.dagPBs[workID], startTS: e.startTS, + txnScope: e.txnScope, readReplicaScope: e.readReplicaScope, isStaleness: e.isStaleness, feedback: statistics.NewQueryFeedback(0, nil, 0, false), @@ -603,6 +605,7 @@ func (e *IndexMergeReaderExecutor) buildFinalTableReader(ctx context.Context, tb table: tbl, dagPB: e.tableRequest, startTS: e.startTS, + txnScope: e.txnScope, readReplicaScope: e.readReplicaScope, isStaleness: e.isStaleness, columns: e.columns, diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index 7f7766eda2b23..4ead26ba4b1d6 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -22,7 +22,6 @@ import ( "fmt" "io" "net/http" - "sort" "strconv" "strings" "sync" @@ -74,6 +73,7 @@ import ( "github.com/pingcap/tidb/util/stringutil" "github.com/tikv/client-go/v2/txnkv/txnlock" "go.uber.org/zap" + "golang.org/x/exp/slices" ) type memtableRetriever struct { @@ -100,7 +100,7 @@ func (e *memtableRetriever) retrieve(ctx context.Context, sctx sessionctx.Contex if !e.initialized { is := sctx.GetInfoSchema().(infoschema.InfoSchema) dbs := is.AllSchemas() - sort.Sort(infoschema.SchemasSorter(dbs)) + slices.SortFunc(dbs, model.LessDBInfo) var err error switch e.table.Name.O { case infoschema.TableSchemata: @@ -289,6 +289,7 @@ func (c *statsCache) get(ctx context.Context, sctx sessionctx.Context) (map[int6 } c.mu.RUnlock() + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats) c.mu.Lock() defer c.mu.Unlock() if time.Since(c.modifyTime) < TableStatsCacheExpiry { @@ -690,8 +691,9 @@ func (e *hugeMemTableRetriever) dataForColumnsInTable(ctx context.Context, sctx _, ok := e.viewSchemaMap[tbl.ID] if !ok { var viewLogicalPlan plannercore.Plan + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) // Build plan is not thread safe, there will be concurrency on sessionctx. - if err := runWithSystemSession(sctx, func(s sessionctx.Context) error { + if err := runWithSystemSession(internalCtx, sctx, func(s sessionctx.Context) error { planBuilder, _ := plannercore.NewPlanBuilder().Init(s, is, &hint.BlockHintProcessor{}) var err error viewLogicalPlan, err = planBuilder.BuildDataSourceFromView(ctx, schema.Name, tbl) @@ -1419,7 +1421,7 @@ func (e *memtableRetriever) setDataForMetricTables(ctx sessionctx.Context) { for name := range infoschema.MetricTableMap { tables = append(tables, name) } - sort.Strings(tables) + slices.Sort(tables) rows := make([][]types.Datum, 0, len(tables)) for _, name := range tables { schema := infoschema.MetricTableMap[name] @@ -1906,7 +1908,8 @@ func dataForAnalyzeStatusHelper(sctx sessionctx.Context) (rows [][]types.Datum, const maxAnalyzeJobs = 30 const sql = "SELECT table_schema, table_name, partition_name, job_info, processed_rows, CONVERT_TZ(start_time, @@TIME_ZONE, '+00:00'), CONVERT_TZ(end_time, @@TIME_ZONE, '+00:00'), state, fail_reason, instance, process_id FROM mysql.analyze_jobs ORDER BY update_time DESC LIMIT %?" exec := sctx.(sqlexec.RestrictedSQLExecutor) - chunkRows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, sql, maxAnalyzeJobs) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + chunkRows, _, err := exec.ExecRestrictedSQL(ctx, nil, sql, maxAnalyzeJobs) if err != nil { return nil, err } @@ -2718,7 +2721,9 @@ func (e *hugeMemTableRetriever) retrieve(ctx context.Context, sctx sessionctx.Co if !e.initialized { is := sctx.GetInfoSchema().(infoschema.InfoSchema) dbs := is.AllSchemas() - sort.Sort(infoschema.SchemasSorter(dbs)) + slices.SortFunc(dbs, func(i, j *model.DBInfo) bool { + return i.Name.L < j.Name.L + }) e.dbs = dbs e.initialized = true e.rows = make([][]types.Datum, 0, 1024) diff --git a/executor/inspection_common.go b/executor/inspection_common.go index c3622c115efe6..f30e8d26e3413 100644 --- a/executor/inspection_common.go +++ b/executor/inspection_common.go @@ -16,11 +16,11 @@ package executor import ( "context" - "sort" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" + "golang.org/x/exp/slices" ) type inspectionRuleRetriever struct { @@ -61,7 +61,7 @@ func (e *inspectionRuleRetriever) retrieve(ctx context.Context, sctx sessionctx. for rule := range inspectionSummaryRules { summaryRules = append(summaryRules, rule) } - sort.Strings(summaryRules) + slices.Sort(summaryRules) for _, rule := range summaryRules { finalRows = append(finalRows, types.MakeDatums( diff --git a/executor/inspection_profile.go b/executor/inspection_profile.go index 15885010dce25..a46bee924bb2c 100644 --- a/executor/inspection_profile.go +++ b/executor/inspection_profile.go @@ -24,6 +24,7 @@ import ( "time" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/sqlexec" ) @@ -167,7 +168,8 @@ func (n *metricNode) getLabelValue(label string) *metricValue { func (n *metricNode) queryRowsByLabel(pb *profileBuilder, query string, handleRowFn func(label string, v float64)) error { exec := pb.sctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, query) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, query) if err != nil { return err } diff --git a/executor/inspection_result.go b/executor/inspection_result.go index 741508c5cf88f..78443b29c4e07 100644 --- a/executor/inspection_result.go +++ b/executor/inspection_result.go @@ -18,13 +18,13 @@ import ( "context" "fmt" "math" - "sort" "strconv" "strings" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/set" "github.com/pingcap/tidb/util/sqlexec" + "golang.org/x/exp/slices" ) type ( @@ -117,6 +118,7 @@ func (e *inspectionResultRetriever) retrieve(ctx context.Context, sctx sessionct } e.retrieved = true + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) // Some data of cluster-level memory tables will be retrieved many times in different inspection rules, // and the cost of retrieving some data is expensive. We use the `TableSnapshot` to cache those data // and obtain them lazily, and provide a consistent view of inspection tables for each inspection rules. @@ -166,20 +168,20 @@ func (e *inspectionResultRetriever) retrieve(ctx context.Context, sctx sessionct continue } // make result stable - sort.Slice(results, func(i, j int) bool { - if results[i].degree != results[j].degree { - return results[i].degree > results[j].degree + slices.SortFunc(results, func(i, j inspectionResult) bool { + if i.degree != j.degree { + return i.degree > j.degree } - if lhs, rhs := results[i].item, results[j].item; lhs != rhs { + if lhs, rhs := i.item, j.item; lhs != rhs { return lhs < rhs } - if results[i].actual != results[j].actual { - return results[i].actual < results[j].actual + if i.actual != j.actual { + return i.actual < j.actual } - if lhs, rhs := results[i].tp, results[j].tp; lhs != rhs { + if lhs, rhs := i.tp, j.tp; lhs != rhs { return lhs < rhs } - return results[i].instance < results[j].instance + return i.instance < j.instance }) for _, result := range results { if len(result.instance) == 0 { @@ -267,10 +269,10 @@ func (configInspection) inspectDiffConfig(ctx context.Context, sctx sessionctx.C } groups := make([]string, 0, len(m)) for k, v := range m { - sort.Strings(v) + slices.Sort(v) groups = append(groups, fmt.Sprintf("%s config value is %s", strings.Join(v, ","), k)) } - sort.Strings(groups) + slices.Sort(groups) return strings.Join(groups, "\n") } diff --git a/executor/inspection_summary.go b/executor/inspection_summary.go index ebd3f69abc4f8..30fc542a9898b 100644 --- a/executor/inspection_summary.go +++ b/executor/inspection_summary.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" @@ -417,6 +418,7 @@ func (e *inspectionSummaryRetriever) retrieve(ctx context.Context, sctx sessionc return nil, nil } e.retrieved = true + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) rules := inspectionFilter{set: e.extractor.Rules} names := inspectionFilter{set: e.extractor.MetricNames} diff --git a/executor/memtable_reader.go b/executor/memtable_reader.go index 080a7d5d83a4d..7bd2cf3b63be3 100644 --- a/executor/memtable_reader.go +++ b/executor/memtable_reader.go @@ -22,7 +22,6 @@ import ( "fmt" "io" "net/http" - "sort" "strings" "sync" "time" @@ -49,6 +48,7 @@ import ( "github.com/pingcap/tidb/util/pdapi" "github.com/pingcap/tidb/util/set" "go.uber.org/zap" + "golang.org/x/exp/slices" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) @@ -252,7 +252,7 @@ func fetchClusterConfig(sctx sessionctx.Context, nodeTypes, nodeAddrs set.String } items = append(items, item{key: key, val: str}) } - sort.Slice(items, func(i, j int) bool { return items[i].key < items[j].key }) + slices.SortFunc(items, func(i, j item) bool { return i.key < j.key }) var rows [][]types.Datum for _, item := range items { rows = append(rows, types.MakeDatums( @@ -279,7 +279,7 @@ func fetchClusterConfig(sctx sessionctx.Context, nodeTypes, nodeAddrs set.String } results = append(results, result) } - sort.Slice(results, func(i, j int) bool { return results[i].idx < results[j].idx }) + slices.SortFunc(results, func(i, j result) bool { return i.idx < j.idx }) for _, result := range results { finalRows = append(finalRows, result.rows...) } @@ -357,7 +357,7 @@ func (e *clusterServerInfoRetriever) retrieve(ctx context.Context, sctx sessionc } results = append(results, result) } - sort.Slice(results, func(i, j int) bool { return results[i].idx < results[j].idx }) + slices.SortFunc(results, func(i, j result) bool { return i.idx < j.idx }) for _, result := range results { finalRows = append(finalRows, result.rows...) } diff --git a/executor/merge_join_test.go b/executor/merge_join_test.go index a140809fd46d2..2a46243c6e6cc 100644 --- a/executor/merge_join_test.go +++ b/executor/merge_join_test.go @@ -785,11 +785,11 @@ func TestVectorizedMergeJoin(t *testing.T) { )).Check(testkit.Rows( fmt.Sprintf(`MergeJoin 4150.01 root inner join, left key:test.%s.a, right key:test.%s.a`, t1, t2), fmt.Sprintf(`├─Sort(Build) 3320.01 root test.%s.a`, t2), - fmt.Sprintf(`│ └─TableReader 3320.01 root data:Selection`), + `│ └─TableReader 3320.01 root data:Selection`, fmt.Sprintf(`│ └─Selection 3320.01 cop[tikv] lt(test.%s.b, 5), not(isnull(test.%s.a))`, t2, t2), fmt.Sprintf(`│ └─TableFullScan 10000.00 cop[tikv] table:%s keep order:false, stats:pseudo`, t2), fmt.Sprintf(`└─Sort(Probe) 3330.00 root test.%s.a`, t1), - fmt.Sprintf(` └─TableReader 3330.00 root data:Selection`), + ` └─TableReader 3330.00 root data:Selection`, fmt.Sprintf(` └─Selection 3330.00 cop[tikv] gt(test.%s.b, 5), not(isnull(test.%s.a))`, t1, t1), fmt.Sprintf(` └─TableFullScan 10000.00 cop[tikv] table:%s keep order:false, stats:pseudo`, t1), )) @@ -797,10 +797,10 @@ func TestVectorizedMergeJoin(t *testing.T) { t1, t2, t1, t2, t1, t2, t1, t2, )).Check(testkit.Rows( fmt.Sprintf(`HashJoin 4150.01 root inner join, equal:[eq(test.%s.a, test.%s.a)]`, t1, t2), - fmt.Sprintf(`├─TableReader(Build) 3320.01 root data:Selection`), + `├─TableReader(Build) 3320.01 root data:Selection`, fmt.Sprintf(`│ └─Selection 3320.01 cop[tikv] lt(test.%s.b, 5), not(isnull(test.%s.a))`, t2, t2), fmt.Sprintf(`│ └─TableFullScan 10000.00 cop[tikv] table:%s keep order:false, stats:pseudo`, t2), - fmt.Sprintf(`└─TableReader(Probe) 3330.00 root data:Selection`), + `└─TableReader(Probe) 3330.00 root data:Selection`, fmt.Sprintf(` └─Selection 3330.00 cop[tikv] gt(test.%s.b, 5), not(isnull(test.%s.a))`, t1, t1), fmt.Sprintf(` └─TableFullScan 10000.00 cop[tikv] table:%s keep order:false, stats:pseudo`, t1), )) @@ -903,14 +903,14 @@ func TestVectorizedShuffleMergeJoin(t *testing.T) { tk.MustQuery(fmt.Sprintf("explain format = 'brief' select /*+ TIDB_SMJ(%s, %s) */ * from %s, %s where %s.a=%s.a and %s.b>5 and %s.b<5", t1, t2, t1, t2, t1, t2, t1, t2, )).Check(testkit.Rows( - fmt.Sprintf(`Shuffle 4150.01 root execution info: concurrency:4, data sources:[TableReader TableReader]`), + `Shuffle 4150.01 root execution info: concurrency:4, data sources:[TableReader TableReader]`, fmt.Sprintf(`└─MergeJoin 4150.01 root inner join, left key:test.%s.a, right key:test.%s.a`, t1, t2), fmt.Sprintf(` ├─Sort(Build) 3320.01 root test.%s.a`, t2), - fmt.Sprintf(` │ └─TableReader 3320.01 root data:Selection`), + ` │ └─TableReader 3320.01 root data:Selection`, fmt.Sprintf(` │ └─Selection 3320.01 cop[tikv] lt(test.%s.b, 5), not(isnull(test.%s.a))`, t2, t2), fmt.Sprintf(` │ └─TableFullScan 10000.00 cop[tikv] table:%s keep order:false, stats:pseudo`, t2), fmt.Sprintf(` └─Sort(Probe) 3330.00 root test.%s.a`, t1), - fmt.Sprintf(` └─TableReader 3330.00 root data:Selection`), + ` └─TableReader 3330.00 root data:Selection`, fmt.Sprintf(` └─Selection 3330.00 cop[tikv] gt(test.%s.b, 5), not(isnull(test.%s.a))`, t1, t1), fmt.Sprintf(` └─TableFullScan 10000.00 cop[tikv] table:%s keep order:false, stats:pseudo`, t1), )) @@ -918,10 +918,10 @@ func TestVectorizedShuffleMergeJoin(t *testing.T) { t1, t2, t1, t2, t1, t2, t1, t2, )).Check(testkit.Rows( fmt.Sprintf(`HashJoin 4150.01 root inner join, equal:[eq(test.%s.a, test.%s.a)]`, t1, t2), - fmt.Sprintf(`├─TableReader(Build) 3320.01 root data:Selection`), + `├─TableReader(Build) 3320.01 root data:Selection`, fmt.Sprintf(`│ └─Selection 3320.01 cop[tikv] lt(test.%s.b, 5), not(isnull(test.%s.a))`, t2, t2), fmt.Sprintf(`│ └─TableFullScan 10000.00 cop[tikv] table:%s keep order:false, stats:pseudo`, t2), - fmt.Sprintf(`└─TableReader(Probe) 3330.00 root data:Selection`), + `└─TableReader(Probe) 3330.00 root data:Selection`, fmt.Sprintf(` └─Selection 3330.00 cop[tikv] gt(test.%s.b, 5), not(isnull(test.%s.a))`, t1, t1), fmt.Sprintf(` └─TableFullScan 10000.00 cop[tikv] table:%s keep order:false, stats:pseudo`, t1), )) diff --git a/executor/metrics_reader.go b/executor/metrics_reader.go index 3e90897d03192..2a5d552fcf530 100644 --- a/executor/metrics_reader.go +++ b/executor/metrics_reader.go @@ -18,7 +18,6 @@ import ( "context" "fmt" "math" - "sort" "strings" "time" @@ -26,6 +25,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" plannercore "github.com/pingcap/tidb/planner/core" @@ -36,6 +36,7 @@ import ( "github.com/prometheus/client_golang/api" promv1 "github.com/prometheus/client_golang/api/prometheus/v1" pmodel "github.com/prometheus/common/model" + "golang.org/x/exp/slices" ) const promReadTimeout = time.Second * 10 @@ -202,8 +203,9 @@ func (e *MetricsSummaryRetriever) retrieve(ctx context.Context, sctx sessionctx. for name := range infoschema.MetricTableMap { tables = append(tables, name) } - sort.Strings(tables) + slices.Sort(tables) + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnOthers) filter := inspectionFilter{set: e.extractor.MetricsNames} condition := e.timeRange.Condition() for _, name := range tables { @@ -278,8 +280,9 @@ func (e *MetricsSummaryByLabelRetriever) retrieve(ctx context.Context, sctx sess for name := range infoschema.MetricTableMap { tables = append(tables, name) } - sort.Strings(tables) + slices.Sort(tables) + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnOthers) filter := inspectionFilter{set: e.extractor.MetricsNames} condition := e.timeRange.Condition() for _, name := range tables { diff --git a/executor/mpp_gather.go b/executor/mpp_gather.go index a9a6032d1f779..42526774dbdd5 100644 --- a/executor/mpp_gather.go +++ b/executor/mpp_gather.go @@ -77,7 +77,9 @@ func (e *MPPGather) appendMPPDispatchReq(pf *plannercore.Fragment) error { if err != nil { return errors.Trace(err) } - logutil.BgLogger().Info("Dispatch mpp task", zap.Uint64("timestamp", mppTask.StartTs), zap.Int64("ID", mppTask.ID), zap.String("address", mppTask.Meta.GetAddress()), zap.String("plan", plannercore.ToString(pf.ExchangeSender))) + logutil.BgLogger().Info("Dispatch mpp task", zap.Uint64("timestamp", mppTask.StartTs), + zap.Int64("ID", mppTask.ID), zap.String("address", mppTask.Meta.GetAddress()), + zap.String("plan", plannercore.ToString(pf.ExchangeSender))) req := &kv.MPPDispatchRequest{ Data: pbData, Meta: mppTask.Meta, diff --git a/executor/opt_rule_blacklist.go b/executor/opt_rule_blacklist.go index 5773f80efe7a2..f711d2feacb03 100644 --- a/executor/opt_rule_blacklist.go +++ b/executor/opt_rule_blacklist.go @@ -17,6 +17,7 @@ package executor import ( "context" + "github.com/pingcap/tidb/kv" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/chunk" @@ -31,13 +32,14 @@ type ReloadOptRuleBlacklistExec struct { // Next implements the Executor Next interface. func (e *ReloadOptRuleBlacklistExec) Next(ctx context.Context, _ *chunk.Chunk) error { - return LoadOptRuleBlacklist(e.ctx) + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + return LoadOptRuleBlacklist(internalCtx, e.ctx) } // LoadOptRuleBlacklist loads the latest data from table mysql.opt_rule_blacklist. -func LoadOptRuleBlacklist(ctx sessionctx.Context) (err error) { - exec := ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, "select HIGH_PRIORITY name from mysql.opt_rule_blacklist") +func LoadOptRuleBlacklist(ctx context.Context, sctx sessionctx.Context) (err error) { + exec := sctx.(sqlexec.RestrictedSQLExecutor) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, "select HIGH_PRIORITY name from mysql.opt_rule_blacklist") if err != nil { return err } diff --git a/executor/plan_replayer.go b/executor/plan_replayer.go index 22520d5d3b722..99868e19d1d52 100644 --- a/executor/plan_replayer.go +++ b/executor/plan_replayer.go @@ -129,6 +129,7 @@ func (e *PlanReplayerSingleExec) dumpSingle(path string) (fileName string, err e // Generate key and create zip file time := time.Now().UnixNano() b := make([]byte, 16) + //nolint: gosec _, err = rand.Read(b) if err != nil { return "", err @@ -525,6 +526,7 @@ func loadVariables(ctx sessionctx.Context, z *zip.Reader) error { if err != nil { return errors.AddStack(err) } + //nolint: errcheck defer v.Close() _, err = toml.DecodeReader(v, &varMap) if err != nil { @@ -556,6 +558,7 @@ func createSchemaAndTables(ctx sessionctx.Context, f *zip.File) error { if err != nil { return errors.AddStack(err) } + //nolint: errcheck defer r.Close() buf := new(bytes.Buffer) _, err = buf.ReadFrom(r) @@ -589,6 +592,7 @@ func loadStats(ctx sessionctx.Context, f *zip.File) error { if err != nil { return errors.AddStack(err) } + //nolint: errcheck defer r.Close() buf := new(bytes.Buffer) _, err = buf.ReadFrom(r) diff --git a/executor/point_get.go b/executor/point_get.go index 1b4d6666663b5..04687403c0d4e 100644 --- a/executor/point_get.go +++ b/executor/point_get.go @@ -20,8 +20,6 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" - "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" @@ -44,7 +42,8 @@ import ( ) func (b *executorBuilder) buildPointGet(p *plannercore.PointGetPlan) Executor { - if err := b.validCanReadTemporaryOrCacheTable(p.TblInfo); err != nil { + var err error + if err = b.validCanReadTemporaryOrCacheTable(p.TblInfo); err != nil { b.err = err return nil } @@ -56,25 +55,48 @@ func (b *executorBuilder) buildPointGet(p *plannercore.PointGetPlan) Executor { }() } - snapshotTS, err := b.getSnapshotTS() - if err != nil { - b.err = err - return nil - } - e := &PointGetExecutor{ baseExecutor: newBaseExecutor(b.ctx, p.Schema(), p.ID()), + txnScope: b.txnScope, readReplicaScope: b.readReplicaScope, isStaleness: b.isStaleness, } - if p.TblInfo.TableCacheStatusType == model.TableCacheStatusEnable { - e.cacheTable = b.getCacheTable(p.TblInfo, snapshotTS) - } - e.base().initCap = 1 e.base().maxChunkSize = 1 - e.Init(p, snapshotTS) + e.Init(p) + + e.snapshot, err = b.getSnapshot() + if err != nil { + b.err = err + return nil + } + if e.runtimeStats != nil { + snapshotStats := &txnsnapshot.SnapshotRuntimeStats{} + e.stats = &runtimeStatsWithSnapshot{ + SnapshotRuntimeStats: snapshotStats, + } + e.snapshot.SetOption(kv.CollectRuntimeStats, snapshotStats) + b.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + } + + failpoint.Inject("assertPointReplicaOption", func(val failpoint.Value) { + assertScope := val.(string) + if e.ctx.GetSessionVars().GetReplicaRead().IsClosestRead() && assertScope != e.readReplicaScope { + panic("point get replica option fail") + } + }) + + snapshotTS, err := b.getSnapshotTS() + if err != nil { + b.err = err + return nil + } + if p.TblInfo.TableCacheStatusType == model.TableCacheStatusEnable { + if cacheTable := b.getCacheTable(p.TblInfo, snapshotTS); cacheTable != nil { + e.snapshot = cacheTableSnapshot{e.snapshot, cacheTable} + } + } if e.lock { b.hasLock = true @@ -94,7 +116,7 @@ type PointGetExecutor struct { idxKey kv.Key handleVal []byte idxVals []types.Datum - snapshotTS uint64 + txnScope string readReplicaScope string isStaleness bool txn kv.Transaction @@ -112,18 +134,16 @@ type PointGetExecutor struct { // virtualColumnRetFieldTypes records the RetFieldTypes of virtual columns. virtualColumnRetFieldTypes []*types.FieldType - stats *runtimeStatsWithSnapshot - cacheTable kv.MemBuffer + stats *runtimeStatsWithSnapshot } // Init set fields needed for PointGetExecutor reuse, this does NOT change baseExecutor field -func (e *PointGetExecutor) Init(p *plannercore.PointGetPlan, snapshotTS uint64) { +func (e *PointGetExecutor) Init(p *plannercore.PointGetPlan) { decoder := NewRowDecoder(e.ctx, p.Schema(), p.TblInfo) e.tblInfo = p.TblInfo e.handle = p.Handle e.idxInfo = p.IndexInfo e.idxVals = p.IndexValues - e.snapshotTS = snapshotTS e.done = false if e.tblInfo.TempTableType == model.TempTableNone { e.lock = p.Lock @@ -152,56 +172,14 @@ func (e *PointGetExecutor) buildVirtualColumnInfo() { // Open implements the Executor interface. func (e *PointGetExecutor) Open(context.Context) error { - txnCtx := e.ctx.GetSessionVars().TxnCtx - snapshotTS := e.snapshotTS var err error e.txn, err = e.ctx.Txn(false) if err != nil { return err } - if e.txn.Valid() && txnCtx.StartTS == txnCtx.GetForUpdateTS() && txnCtx.StartTS == snapshotTS { - e.snapshot = e.txn.GetSnapshot() - } else { - e.snapshot = e.ctx.GetSnapshotWithTS(snapshotTS) - } - if e.ctx.GetSessionVars().StmtCtx.RCCheckTS { - e.snapshot.SetOption(kv.IsolationLevel, kv.RCCheckTS) - } - if e.cacheTable != nil { - e.snapshot = cacheTableSnapshot{e.snapshot, e.cacheTable} - } if err := e.verifyTxnScope(); err != nil { return err } - if e.runtimeStats != nil { - snapshotStats := &txnsnapshot.SnapshotRuntimeStats{} - e.stats = &runtimeStatsWithSnapshot{ - SnapshotRuntimeStats: snapshotStats, - } - e.snapshot.SetOption(kv.CollectRuntimeStats, snapshotStats) - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) - } - readReplicaType := e.ctx.GetSessionVars().GetReplicaRead() - if readReplicaType.IsFollowerRead() && !e.ctx.GetSessionVars().StmtCtx.RCCheckTS { - e.snapshot.SetOption(kv.ReplicaRead, readReplicaType) - } - e.snapshot.SetOption(kv.TaskID, e.ctx.GetSessionVars().StmtCtx.TaskID) - e.snapshot.SetOption(kv.ReadReplicaScope, e.readReplicaScope) - e.snapshot.SetOption(kv.IsStalenessReadOnly, e.isStaleness) - if readReplicaType.IsClosestRead() && e.readReplicaScope != kv.GlobalTxnScope { - e.snapshot.SetOption(kv.MatchStoreLabels, []*metapb.StoreLabel{ - { - Key: placement.DCLabelKey, - Value: e.readReplicaScope, - }, - }) - } - failpoint.Inject("assertPointReplicaOption", func(val failpoint.Value) { - assertScope := val.(string) - if readReplicaType.IsClosestRead() && assertScope != e.readReplicaScope { - panic("point get replica option fail") - } - }) setOptionForTopSQL(e.ctx.GetSessionVars().StmtCtx, e.snapshot) return nil } @@ -457,15 +435,10 @@ func (e *PointGetExecutor) get(ctx context.Context, key kv.Key) ([]byte, error) } func (e *PointGetExecutor) verifyTxnScope() error { - // Stale Read uses the calculated TSO for the read, - // so there is no need to check the TxnScope here. - if e.isStaleness { - return nil - } - txnScope := e.readReplicaScope - if txnScope == "" || txnScope == kv.GlobalTxnScope { + if e.txnScope == "" || e.txnScope == kv.GlobalTxnScope { return nil } + var tblID int64 var tblName string var partName string @@ -480,16 +453,16 @@ func (e *PointGetExecutor) verifyTxnScope() error { tblInfo, _ := is.TableByID(tblID) tblName = tblInfo.Meta().Name.String() } - valid := distsql.VerifyTxnScope(txnScope, tblID, is) + valid := distsql.VerifyTxnScope(e.txnScope, tblID, is) if valid { return nil } if len(partName) > 0 { return dbterror.ErrInvalidPlacementPolicyCheck.GenWithStackByArgs( - fmt.Sprintf("table %v's partition %v can not be read by %v txn_scope", tblName, partName, txnScope)) + fmt.Sprintf("table %v's partition %v can not be read by %v txn_scope", tblName, partName, e.txnScope)) } return dbterror.ErrInvalidPlacementPolicyCheck.GenWithStackByArgs( - fmt.Sprintf("table %v can not be read by %v txn_scope", tblName, txnScope)) + fmt.Sprintf("table %v can not be read by %v txn_scope", tblName, e.txnScope)) } // EncodeUniqueIndexKey encodes a unique index key. diff --git a/executor/prepared.go b/executor/prepared.go index e6395371c95d5..dcfe727d6b024 100644 --- a/executor/prepared.go +++ b/executor/prepared.go @@ -17,7 +17,6 @@ package executor import ( "context" "math" - "sort" "time" "github.com/pingcap/errors" @@ -32,7 +31,6 @@ import ( plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessiontxn" - "github.com/pingcap/tidb/sessiontxn/staleread" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util" @@ -42,6 +40,7 @@ import ( "github.com/pingcap/tidb/util/topsql" topsqlstate "github.com/pingcap/tidb/util/topsql/state" "go.uber.org/zap" + "golang.org/x/exp/slices" ) var ( @@ -50,22 +49,6 @@ var ( _ Executor = &PrepareExec{} ) -type paramMarkerSorter struct { - markers []ast.ParamMarkerExpr -} - -func (p *paramMarkerSorter) Len() int { - return len(p.markers) -} - -func (p *paramMarkerSorter) Less(i, j int) bool { - return p.markers[i].(*driver.ParamMarkerExpr).Offset < p.markers[j].(*driver.ParamMarkerExpr).Offset -} - -func (p *paramMarkerSorter) Swap(i, j int) { - p.markers[i], p.markers[j] = p.markers[j], p.markers[i] -} - type paramMarkerExtractor struct { markers []ast.ParamMarkerExpr } @@ -184,16 +167,17 @@ func (e *PrepareExec) Next(ctx context.Context, req *chunk.Chunk) error { // The parameter markers are appended in visiting order, which may not // be the same as the position order in the query string. We need to // sort it by position. - sorter := ¶mMarkerSorter{markers: extractor.markers} - sort.Sort(sorter) - e.ParamCount = len(sorter.markers) + slices.SortFunc(extractor.markers, func(i, j ast.ParamMarkerExpr) bool { + return i.(*driver.ParamMarkerExpr).Offset < j.(*driver.ParamMarkerExpr).Offset + }) + e.ParamCount = len(extractor.markers) for i := 0; i < e.ParamCount; i++ { - sorter.markers[i].SetOrder(i) + extractor.markers[i].SetOrder(i) } prepared := &ast.Prepared{ Stmt: stmt, StmtType: GetStmtLabel(stmt), - Params: sorter.markers, + Params: extractor.markers, SchemaVersion: ret.InfoSchema.SchemaMetaVersion(), } normalizedSQL, digest := parser.NormalizeDigest(prepared.Stmt.Text()) @@ -333,18 +317,11 @@ func (e *DeallocateExec) Next(ctx context.Context, req *chunk.Chunk) error { // CompileExecutePreparedStmt compiles a session Execute command to a stmt.Statement. func CompileExecutePreparedStmt(ctx context.Context, sctx sessionctx.Context, - ID uint32, is infoschema.InfoSchema, snapshotTS uint64, replicaReadScope string, args []types.Datum) (*ExecStmt, bool, bool, error) { + execStmt *ast.ExecuteStmt, is infoschema.InfoSchema) (*ExecStmt, bool, bool, error) { startTime := time.Now() defer func() { sctx.GetSessionVars().DurationCompile = time.Since(startTime) }() - execStmt := &ast.ExecuteStmt{ExecID: ID} - if err := ResetContextOfStmt(sctx, execStmt); err != nil { - return nil, false, false, err - } - isStaleness := snapshotTS != 0 - sctx.GetSessionVars().StmtCtx.IsStaleness = isStaleness - execStmt.BinaryArgs = args execPlan, names, err := planner.Optimize(ctx, sctx, execStmt, is) if err != nil { return nil, false, false, err @@ -353,23 +330,18 @@ func CompileExecutePreparedStmt(ctx context.Context, sctx sessionctx.Context, failpoint.Inject("assertTxnManagerInCompile", func() { sessiontxn.RecordAssert(sctx, "assertTxnManagerInCompile", true) sessiontxn.AssertTxnManagerInfoSchema(sctx, is) - staleread.AssertStmtStaleness(sctx, snapshotTS != 0) - if snapshotTS != 0 { - sessiontxn.AssertTxnManagerReadTS(sctx, snapshotTS) - } }) stmt := &ExecStmt{ - GoCtx: ctx, - InfoSchema: is, - Plan: execPlan, - StmtNode: execStmt, - Ctx: sctx, - OutputNames: names, - Ti: &TelemetryInfo{}, - ReplicaReadScope: replicaReadScope, + GoCtx: ctx, + InfoSchema: is, + Plan: execPlan, + StmtNode: execStmt, + Ctx: sctx, + OutputNames: names, + Ti: &TelemetryInfo{}, } - if preparedPointer, ok := sctx.GetSessionVars().PreparedStmts[ID]; ok { + if preparedPointer, ok := sctx.GetSessionVars().PreparedStmts[execStmt.ExecID]; ok { preparedObj, ok := preparedPointer.(*plannercore.CachedPrepareStmt) if !ok { return nil, false, false, errors.Errorf("invalid CachedPrepareStmt type") diff --git a/executor/reload_expr_pushdown_blacklist.go b/executor/reload_expr_pushdown_blacklist.go index 1511e7a280195..c32f84c957e1e 100644 --- a/executor/reload_expr_pushdown_blacklist.go +++ b/executor/reload_expr_pushdown_blacklist.go @@ -37,9 +37,10 @@ func (e *ReloadExprPushdownBlacklistExec) Next(ctx context.Context, _ *chunk.Chu } // LoadExprPushdownBlacklist loads the latest data from table mysql.expr_pushdown_blacklist. -func LoadExprPushdownBlacklist(ctx sessionctx.Context) (err error) { - exec := ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, "select HIGH_PRIORITY name, store_type from mysql.expr_pushdown_blacklist") +func LoadExprPushdownBlacklist(sctx sessionctx.Context) (err error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnSysVar) + exec := sctx.(sqlexec.RestrictedSQLExecutor) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, "select HIGH_PRIORITY name, store_type from mysql.expr_pushdown_blacklist") if err != nil { return err } diff --git a/executor/revoke.go b/executor/revoke.go index 2165f6aaa50ff..36f7a36ac75de 100644 --- a/executor/revoke.go +++ b/executor/revoke.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" @@ -62,6 +63,7 @@ func (e *RevokeExec) Next(ctx context.Context, req *chunk.Chunk) error { return nil } e.done = true + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) // Commit the old transaction, like DDL. if err := sessiontxn.NewTxnInStmt(ctx, e.ctx); err != nil { @@ -77,15 +79,15 @@ func (e *RevokeExec) Next(ctx context.Context, req *chunk.Chunk) error { } defer func() { if !isCommit { - _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "rollback") + _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "rollback") if err != nil { logutil.BgLogger().Error("rollback error occur at grant privilege", zap.Error(err)) } } - e.releaseSysSession(internalSession) + e.releaseSysSession(internalCtx, internalSession) }() - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "begin") + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "begin") if err != nil { return err } @@ -116,7 +118,7 @@ func (e *RevokeExec) Next(ctx context.Context, req *chunk.Chunk) error { } } - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "commit") + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "commit") if err != nil { return err } @@ -197,16 +199,18 @@ func (e *RevokeExec) revokeDynamicPriv(internalSession sessionctx.Context, privN if !privilege.GetPrivilegeManager(e.ctx).IsDynamicPrivilege(privName) { // for MySQL compatibility e.ctx.GetSessionVars().StmtCtx.AppendWarning(ErrDynamicPrivilegeNotRegistered.GenWithStackByArgs(privName)) } - _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "DELETE FROM mysql.global_grants WHERE user = %? AND host = %? AND priv = %?", user, host, privName) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "DELETE FROM mysql.global_grants WHERE user = %? AND host = %? AND priv = %?", user, host, privName) return err } func (e *RevokeExec) revokeGlobalPriv(internalSession sessionctx.Context, priv *ast.PrivElem, user, host string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) if priv.Priv == mysql.ExtendedPriv { return e.revokeDynamicPriv(internalSession, priv.Name, user, host) } if priv.Priv == mysql.AllPriv { // If ALL, also revoke dynamic privileges - _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), "DELETE FROM mysql.global_grants WHERE user = %? AND host = %?", user, host) + _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "DELETE FROM mysql.global_grants WHERE user = %? AND host = %?", user, host) if err != nil { return err } @@ -219,11 +223,12 @@ func (e *RevokeExec) revokeGlobalPriv(internalSession sessionctx.Context, priv * } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%?", user, strings.ToLower(host)) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err } func (e *RevokeExec) revokeDBPriv(internalSession sessionctx.Context, priv *ast.PrivElem, userName, host string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) dbName := e.Level.DBName if len(dbName) == 0 { dbName = e.ctx.GetSessionVars().CurrentDB @@ -237,11 +242,12 @@ func (e *RevokeExec) revokeDBPriv(internalSession sessionctx.Context, priv *ast. } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%?", userName, host, dbName) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err } func (e *RevokeExec) revokeTablePriv(internalSession sessionctx.Context, priv *ast.PrivElem, user, host string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) dbName, tbl, err := getTargetSchemaAndTable(e.ctx, e.Level.DBName, e.Level.TableName, e.is) if err != nil && !terror.ErrorEqual(err, infoschema.ErrTableNotExists) { return err @@ -260,11 +266,12 @@ func (e *RevokeExec) revokeTablePriv(internalSession sessionctx.Context, priv *a } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?", user, host, dbName, tblName) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err } func (e *RevokeExec) revokeColumnPriv(internalSession sessionctx.Context, priv *ast.PrivElem, user, host string) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) dbName, tbl, err := getTargetSchemaAndTable(e.ctx, e.Level.DBName, e.Level.TableName, e.is) if err != nil { return err @@ -284,7 +291,7 @@ func (e *RevokeExec) revokeColumnPriv(internalSession sessionctx.Context, priv * } sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?", user, host, dbName, tbl.Meta().Name.O, col.Name.O) - _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) + _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) if err != nil { return err } diff --git a/executor/sample.go b/executor/sample.go index 4741cc8f87b2d..e7eb9bd223639 100644 --- a/executor/sample.go +++ b/executor/sample.go @@ -16,7 +16,6 @@ package executor import ( "context" - "sort" "github.com/opentracing/opentracing-go" "github.com/pingcap/errors" @@ -30,6 +29,7 @@ import ( "github.com/pingcap/tidb/util/chunk" decoder "github.com/pingcap/tidb/util/rowDecoder" "github.com/tikv/client-go/v2/tikv" + "golang.org/x/exp/slices" ) var _ Executor = &TableSampleExecutor{} @@ -228,8 +228,8 @@ func splitIntoMultiRanges(store kv.Storage, startKey, endKey kv.Key) ([]kv.KeyRa } func sortRanges(ranges []kv.KeyRange, isDesc bool) { - sort.Slice(ranges, func(i, j int) bool { - ir, jr := ranges[i].StartKey, ranges[j].StartKey + slices.SortFunc(ranges, func(i, j kv.KeyRange) bool { + ir, jr := i.StartKey, j.StartKey if !isDesc { return ir.Cmp(jr) < 0 } diff --git a/executor/seqtest/BUILD.bazel b/executor/seqtest/BUILD.bazel index 3f0a4bfd1291d..c248e2e1fd30a 100644 --- a/executor/seqtest/BUILD.bazel +++ b/executor/seqtest/BUILD.bazel @@ -17,6 +17,7 @@ go_test( "//kv", "//meta/autoid", "//metrics", + "//parser/ast", "//parser/model", "//parser/mysql", "//parser/terror", diff --git a/executor/seqtest/prepared_test.go b/executor/seqtest/prepared_test.go index b39f66d3030ee..4e1ff9ed02e4d 100644 --- a/executor/seqtest/prepared_test.go +++ b/executor/seqtest/prepared_test.go @@ -24,8 +24,8 @@ import ( "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/infoschema" - "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" + "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" @@ -157,9 +157,10 @@ func TestPrepared(t *testing.T) { require.NoError(t, err) tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows()) + execStmt := &ast.ExecuteStmt{ExecID: stmtID, BinaryArgs: []types.Datum{types.NewDatum(1)}} // Check that ast.Statement created by executor.CompileExecutePreparedStmt has query text. - stmt, _, _, err := executor.CompileExecutePreparedStmt(context.TODO(), tk.Session(), stmtID, - tk.Session().GetInfoSchema().(infoschema.InfoSchema), 0, kv.GlobalReplicaScope, []types.Datum{types.NewDatum(1)}) + stmt, _, _, err := executor.CompileExecutePreparedStmt(context.TODO(), tk.Session(), execStmt, + tk.Session().GetInfoSchema().(infoschema.InfoSchema)) require.NoError(t, err) require.Equal(t, query, stmt.OriginText()) diff --git a/executor/set_test.go b/executor/set_test.go index 9a7213571fddc..f91d1f1e80388 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -679,7 +679,6 @@ func TestSetVar(t *testing.T) { tk.MustQuery("select @@global.tidb_enable_new_cost_interface").Check(testkit.Rows("0")) // default value is 0 tk.MustExec("set global tidb_enable_new_cost_interface=1") tk.MustQuery("select @@global.tidb_enable_new_cost_interface").Check(testkit.Rows("1")) - tk.MustQuery("show global variables like 'tidb_enable_new_cost_interface'").Check(testkit.Rows()) // hidden tk.MustExec("set global tidb_enable_new_cost_interface=0") tk.MustQuery("select @@global.tidb_enable_new_cost_interface").Check(testkit.Rows("0")) @@ -1835,3 +1834,45 @@ func TestGcMaxWaitTime(t *testing.T) { tk.MustExec("set global tidb_gc_life_time = \"72h\"") tk.MustExec("set global tidb_gc_max_wait_time = 1000") } + +func TestTiFlashFineGrainedShuffle(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + // Default is -1. + tk.MustQuery("select @@tiflash_fine_grained_shuffle_stream_count;").Check(testkit.Rows("-1")) + + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = -1") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_stream_count;").Check(testkit.Rows("-1")) + // Min val is -1. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = -2") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_stream_count;").Check(testkit.Rows("-1")) + + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = 0") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_stream_count;").Check(testkit.Rows("0")) + + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = 1024") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_stream_count;").Check(testkit.Rows("1024")) + // Max val is 1024. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = 1025") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_stream_count;").Check(testkit.Rows("1024")) + + // Default is 8192. + tk.MustQuery("select @@tiflash_fine_grained_shuffle_batch_size;").Check(testkit.Rows("8192")) + + // Min is 1. + tk.MustExec("set @@tiflash_fine_grained_shuffle_batch_size = 0") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_batch_size;").Check(testkit.Rows("1")) + tk.MustExec("set @@tiflash_fine_grained_shuffle_batch_size = -1") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_batch_size;").Check(testkit.Rows("1")) + + // Max is uint64_max. + tk.MustExec("set @@tiflash_fine_grained_shuffle_batch_size = 18446744073709551615") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_batch_size;").Check(testkit.Rows("18446744073709551615")) + + // Test set global. + tk.MustExec("set global tiflash_fine_grained_shuffle_stream_count = -1") + tk.MustExec("set global tiflash_fine_grained_shuffle_batch_size = 8192") +} diff --git a/executor/show.go b/executor/show.go index d1424244bdb47..1ea1b658cd1de 100644 --- a/executor/show.go +++ b/executor/show.go @@ -68,6 +68,7 @@ import ( "github.com/pingcap/tidb/util/set" "github.com/pingcap/tidb/util/sqlexec" "github.com/pingcap/tidb/util/stringutil" + "golang.org/x/exp/slices" ) var etcdDialTimeout = 5 * time.Second @@ -354,6 +355,7 @@ func (e *ShowExec) fetchShowBind() error { func (e *ShowExec) fetchShowBindingCacheStatus(ctx context.Context) error { exec := e.ctx.(sqlexec.RestrictedSQLExecutor) + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBindInfo) rows, _, err := exec.ExecRestrictedSQL(ctx, nil, fmt.Sprintf("SELECT count(*) FROM mysql.bind_info where status = '%s' or status = '%s';", bindinfo.Enabled, bindinfo.Using)) if err != nil { @@ -384,6 +386,7 @@ func (e *ShowExec) fetchShowBindingCacheStatus(ctx context.Context) error { } func (e *ShowExec) fetchShowEngines(ctx context.Context) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) exec := e.ctx.(sqlexec.RestrictedSQLExecutor) rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT * FROM information_schema.engines`) @@ -411,7 +414,7 @@ func moveInfoSchemaToFront(dbs []string) { func (e *ShowExec) fetchShowDatabases() error { dbs := e.is.AllSchemaNames() checker := privilege.GetPrivilegeManager(e.ctx) - sort.Strings(dbs) + slices.Sort(dbs) var ( fieldPatternsLike collate.WildcardPattern fieldFilter string @@ -516,7 +519,7 @@ func (e *ShowExec) fetchShowTables() error { tableTypes[v.Meta().Name.O] = "BASE TABLE" } } - sort.Strings(tableNames) + slices.Sort(tableNames) for _, v := range tableNames { if e.Full { e.appendRow([]interface{}{v, tableTypes[v]}) @@ -539,6 +542,7 @@ func (e *ShowExec) fetchShowTableStatus(ctx context.Context) error { } exec := e.ctx.(sqlexec.RestrictedSQLExecutor) + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats) var snapshot uint64 txn, err := e.ctx.Txn(false) @@ -835,7 +839,7 @@ func (e *ShowExec) fetchShowVariables() (err error) { } else if fieldPatternsLike != nil && !fieldPatternsLike.DoMatch(v.Name) { continue } - if v.Hidden || e.sysVarHiddenForSem(v.Name) { + if e.sysVarHiddenForSem(v.Name) { continue } value, err = variable.GetGlobalSystemVar(sessionVars, v.Name) @@ -860,7 +864,7 @@ func (e *ShowExec) fetchShowVariables() (err error) { } else if fieldPatternsLike != nil && !fieldPatternsLike.DoMatch(v.Name) { continue } - if v.Hidden || e.sysVarHiddenForSem(v.Name) { + if e.sysVarHiddenForSem(v.Name) { continue } value, err = variable.GetSessionOrGlobalSystemVar(sessionVars, v.Name) @@ -1520,6 +1524,7 @@ func (e *ShowExec) fetchShowCreateUser(ctx context.Context) error { if checker == nil { return errors.New("miss privilege checker") } + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnPrivilege) userName, hostName := e.User.Username, e.User.Hostname sessVars := e.ctx.GetSessionVars() @@ -2039,10 +2044,11 @@ func tryFillViewColumnType(ctx context.Context, sctx sessionctx.Context, is info if !tbl.IsView() { return nil } + ctx = kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) // We need to run the build plan process in another session because there may be // multiple goroutines running at the same time while session is not goroutine-safe. // Take joining system table as an example, `fetchBuildSideRows` and `fetchProbeSideChunks` can be run concurrently. - return runWithSystemSession(sctx, func(s sessionctx.Context) error { + return runWithSystemSession(ctx, sctx, func(s sessionctx.Context) error { // Retrieve view columns info. planBuilder, _ := plannercore.NewPlanBuilder().Init(s, is, &hint.BlockHintProcessor{}) if viewLogicalPlan, err := planBuilder.BuildDataSourceFromView(ctx, dbName, tbl); err == nil { @@ -2064,12 +2070,12 @@ func tryFillViewColumnType(ctx context.Context, sctx sessionctx.Context, is info }) } -func runWithSystemSession(sctx sessionctx.Context, fn func(sessionctx.Context) error) error { +func runWithSystemSession(ctx context.Context, sctx sessionctx.Context, fn func(sessionctx.Context) error) error { b := &baseExecutor{ctx: sctx} sysCtx, err := b.getSysSession() if err != nil { return err } - defer b.releaseSysSession(sysCtx) + defer b.releaseSysSession(ctx, sysCtx) return fn(sysCtx) } diff --git a/executor/show_placement.go b/executor/show_placement.go index acd6d9cccecfc..63e34bb246cea 100644 --- a/executor/show_placement.go +++ b/executor/show_placement.go @@ -18,7 +18,6 @@ import ( "context" gjson "encoding/json" "fmt" - "sort" "github.com/pingcap/errors" "github.com/pingcap/tidb/domain/infosync" @@ -33,6 +32,7 @@ import ( "github.com/pingcap/tidb/types/json" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/sqlexec" + "golang.org/x/exp/slices" ) type showPlacementLabelsResultBuilder struct { @@ -101,7 +101,7 @@ func (b *showPlacementLabelsResultBuilder) sortMapKeys(m map[string]interface{}) sorted = append(sorted, key) } - sort.Strings(sorted) + slices.Sort(sorted) return sorted } @@ -251,7 +251,7 @@ func (e *ShowExec) fetchShowPlacement(ctx context.Context) error { func (e *ShowExec) fetchAllPlacementPolicies() error { policies := e.is.AllPlacementPolicies() - sort.Slice(policies, func(i, j int) bool { return policies[i].Name.O < policies[j].Name.O }) + slices.SortFunc(policies, func(i, j *model.PolicyInfo) bool { return i.Name.O < j.Name.O }) for _, policy := range policies { name := policy.Name settings := policy.PlacementSettings @@ -266,7 +266,7 @@ func (e *ShowExec) fetchAllDBPlacements(ctx context.Context, scheduleState map[i activeRoles := e.ctx.GetSessionVars().ActiveRoles dbs := e.is.AllSchemas() - sort.Slice(dbs, func(i, j int) bool { return dbs[i].Name.O < dbs[j].Name.O }) + slices.SortFunc(dbs, func(i, j *model.DBInfo) bool { return i.Name.O < j.Name.O }) for _, dbInfo := range dbs { if e.ctx.GetSessionVars().User != nil && checker != nil && !checker.DBIsVisible(activeRoles, dbInfo.Name.O) { @@ -290,18 +290,20 @@ func (e *ShowExec) fetchAllDBPlacements(ctx context.Context, scheduleState map[i return nil } +type tableRowSet struct { + name string + rows [][]interface{} +} + func (e *ShowExec) fetchAllTablePlacements(ctx context.Context, scheduleState map[int64]infosync.PlacementScheduleState) error { checker := privilege.GetPrivilegeManager(e.ctx) activeRoles := e.ctx.GetSessionVars().ActiveRoles dbs := e.is.AllSchemas() - sort.Slice(dbs, func(i, j int) bool { return dbs[i].Name.O < dbs[j].Name.O }) + slices.SortFunc(dbs, func(i, j *model.DBInfo) bool { return i.Name.O < j.Name.O }) for _, dbInfo := range dbs { - tableRowSets := make([]struct { - name string - rows [][]interface{} - }, 0) + tableRowSets := make([]tableRowSet, 0) for _, tbl := range e.is.SchemaTables(dbInfo.Name) { tblInfo := tbl.Meta() @@ -357,7 +359,7 @@ func (e *ShowExec) fetchAllTablePlacements(ctx context.Context, scheduleState ma } } - sort.Slice(tableRowSets, func(i, j int) bool { return tableRowSets[i].name < tableRowSets[j].name }) + slices.SortFunc(tableRowSets, func(i, j tableRowSet) bool { return i.name < j.name }) for _, rowSet := range tableRowSets { for _, row := range rowSet.rows { e.appendRow(row) diff --git a/executor/show_stats.go b/executor/show_stats.go index 417e285573d75..af5c1e27ad696 100644 --- a/executor/show_stats.go +++ b/executor/show_stats.go @@ -16,7 +16,6 @@ package executor import ( "fmt" - "sort" "strings" "github.com/pingcap/errors" @@ -27,6 +26,7 @@ import ( "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/types" "github.com/tikv/client-go/v2/oracle" + "golang.org/x/exp/slices" ) func (e *ShowExec) fetchShowStatsExtended() error { @@ -329,7 +329,7 @@ func stableColsStats(colStats map[int64]*statistics.Column) (cols []*statistics. for _, col := range colStats { cols = append(cols, col) } - sort.Slice(cols, func(i, j int) bool { return cols[i].ID < cols[j].ID }) + slices.SortFunc(cols, func(i, j *statistics.Column) bool { return i.ID < j.ID }) return } @@ -337,7 +337,7 @@ func stableIdxsStats(idxStats map[int64]*statistics.Index) (idxs []*statistics.I for _, idx := range idxStats { idxs = append(idxs, idx) } - sort.Slice(idxs, func(i, j int) bool { return idxs[i].ID < idxs[j].ID }) + slices.SortFunc(idxs, func(i, j *statistics.Index) bool { return i.ID < j.ID }) return } @@ -439,7 +439,7 @@ func (e *ShowExec) appendTableForStatsHealthy(dbName, tblName, partitionName str } func (e *ShowExec) fetchShowHistogramsInFlight() { - e.appendRow([]interface{}{statistics.HistogramNeededColumns.Length()}) + e.appendRow([]interface{}{statistics.HistogramNeededItems.Length()}) } func (e *ShowExec) fetchShowAnalyzeStatus() error { diff --git a/executor/showtest/show_test.go b/executor/showtest/show_test.go index 75e93d9655854..2994069bcb0f6 100644 --- a/executor/showtest/show_test.go +++ b/executor/showtest/show_test.go @@ -1586,10 +1586,6 @@ func TestShowVar(t *testing.T) { sessionVars := make([]string, 0, len(variable.GetSysVars())) globalVars := make([]string, 0, len(variable.GetSysVars())) for _, v := range variable.GetSysVars() { - if v.Hidden { - continue - } - if v.Scope == variable.ScopeSession { sessionVars = append(sessionVars, v.Name) } else { @@ -1614,10 +1610,6 @@ func TestShowVar(t *testing.T) { res = tk.MustQuery(showSQL) require.Len(t, res.Rows(), len(globalVars)) - // Test Hidden tx_read_ts - res = tk.MustQuery("show variables like '%tx_read_ts'") - require.Len(t, res.Rows(), 0) - // Test versions' related variables res = tk.MustQuery("show variables like 'version%'") for _, row := range res.Rows() { diff --git a/executor/simple.go b/executor/simple.go index cfede41d49057..a7794d4fb8498 100644 --- a/executor/simple.go +++ b/executor/simple.go @@ -39,7 +39,6 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/planner/core" - plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/plugin" "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" @@ -98,17 +97,17 @@ func (e *baseExecutor) getSysSession() (sessionctx.Context, error) { return restrictedCtx, nil } -func (e *baseExecutor) releaseSysSession(ctx sessionctx.Context) { - if ctx == nil { +func (e *baseExecutor) releaseSysSession(ctx context.Context, sctx sessionctx.Context) { + if sctx == nil { return } dom := domain.GetDomain(e.ctx) sysSessionPool := dom.SysSessionPool() - if _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "rollback"); err != nil { - ctx.(pools.Resource).Close() + if _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "rollback"); err != nil { + sctx.(pools.Resource).Close() return } - sysSessionPool.Put(ctx.(pools.Resource)) + sysSessionPool.Put(sctx.(pools.Resource)) } // Next implements the Executor Next interface. @@ -183,9 +182,10 @@ func (e *SimpleExec) setDefaultRoleNone(s *ast.SetDefaultRoleStmt) error { if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + defer e.releaseSysSession(ctx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "begin"); err != nil { return err } sql := new(strings.Builder) @@ -195,15 +195,15 @@ func (e *SimpleExec) setDefaultRoleNone(s *ast.SetDefaultRoleStmt) error { } sql.Reset() sqlexec.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", u.Username, u.Hostname) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(ctx, "rollback"); rollbackErr != nil { return rollbackErr } return err } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "commit"); err != nil { return err } return nil @@ -233,9 +233,10 @@ func (e *SimpleExec) setDefaultRoleRegular(ctx context.Context, s *ast.SetDefaul if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + defer e.releaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return err } sql := new(strings.Builder) @@ -245,9 +246,9 @@ func (e *SimpleExec) setDefaultRoleRegular(ctx context.Context, s *ast.SetDefaul } sql.Reset() sqlexec.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return err @@ -258,22 +259,22 @@ func (e *SimpleExec) setDefaultRoleRegular(ctx context.Context, s *ast.SetDefaul if ok { sql.Reset() sqlexec.MustFormatSQL(sql, "INSERT IGNORE INTO mysql.default_roles values(%?, %?, %?, %?);", user.Hostname, user.Username, role.Hostname, role.Username) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return err } } else { - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return ErrRoleNotGranted.GenWithStackByArgs(role.String(), user.String()) } } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return err } return nil @@ -289,13 +290,14 @@ func (e *SimpleExec) setDefaultRoleAll(ctx context.Context, s *ast.SetDefaultRol return ErrCannotUser.GenWithStackByArgs("SET DEFAULT ROLE", user.String()) } } + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) restrictedCtx, err := e.getSysSession() if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + defer e.releaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return err } sql := new(strings.Builder) @@ -305,24 +307,24 @@ func (e *SimpleExec) setDefaultRoleAll(ctx context.Context, s *ast.SetDefaultRol } sql.Reset() sqlexec.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return err } sql.Reset() sqlexec.MustFormatSQL(sql, "INSERT IGNORE INTO mysql.default_roles(HOST,USER,DEFAULT_ROLE_HOST,DEFAULT_ROLE_USER) SELECT TO_HOST,TO_USER,FROM_HOST,FROM_USER FROM mysql.role_edges WHERE TO_HOST=%? AND TO_USER=%?;", user.Hostname, user.Username) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return err } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return err } return nil @@ -338,18 +340,19 @@ func (e *SimpleExec) setDefaultRoleForCurrentUser(s *ast.SetDefaultRoleStmt) (er if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + defer e.releaseSysSession(ctx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "begin"); err != nil { return err } sql := new(strings.Builder) sqlexec.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(ctx, "rollback"); rollbackErr != nil { return rollbackErr } return err @@ -375,14 +378,14 @@ func (e *SimpleExec) setDefaultRoleForCurrentUser(s *ast.SetDefaultRoleStmt) (er } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(ctx, "rollback"); rollbackErr != nil { return rollbackErr } return err } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "commit"); err != nil { return err } return nil @@ -639,6 +642,7 @@ func (e *SimpleExec) executeReleaseSavepoint(s *ast.ReleaseSavepointStmt) error } func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStmt) error { + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) for _, role := range s.Roles { exists, err := userExists(ctx, e.ctx, role.Username, role.Hostname) if err != nil { @@ -653,11 +657,11 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + defer e.releaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) // begin a transaction to insert role graph edges. - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return errors.Trace(err) } sql := new(strings.Builder) @@ -673,7 +677,7 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm return errors.Trace(err) } if !exists { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return errors.Trace(err) } return ErrCannotUser.GenWithStackByArgs("REVOKE ROLE", user.String()) @@ -684,8 +688,8 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm } sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE IGNORE FROM %n.%n WHERE FROM_HOST=%? and FROM_USER=%? and TO_HOST=%? and TO_USER=%?`, mysql.SystemDB, mysql.RoleEdgeTable, role.Hostname, role.Username, user.Hostname, user.Username) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return errors.Trace(err) } return ErrCannotUser.GenWithStackByArgs("REVOKE ROLE", role.String()) @@ -693,8 +697,8 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE IGNORE FROM %n.%n WHERE DEFAULT_ROLE_HOST=%? and DEFAULT_ROLE_USER=%? and HOST=%? and USER=%?`, mysql.SystemDB, mysql.DefaultRoleTable, role.Hostname, role.Username, user.Hostname, user.Username) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return errors.Trace(err) } return ErrCannotUser.GenWithStackByArgs("REVOKE ROLE", role.String()) @@ -711,7 +715,7 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm } } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return err } checker := privilege.GetPrivilegeManager(e.ctx) @@ -763,6 +767,7 @@ func (e *SimpleExec) executeRollback(s *ast.RollbackStmt) error { } func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStmt) error { + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) // Check `CREATE USER` privilege. if !config.GetGlobalConfig().Security.SkipGrantTable { checker := privilege.GetPrivilegeManager(e.ctx) @@ -854,15 +859,15 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + defer e.releaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return errors.Trace(err) } - _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()) + _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()) if err != nil { - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return err @@ -876,21 +881,22 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm } sqlexec.MustFormatSQL(sql, `(%?, %?, %?)`, user.Hostname, user.Username, string(hack.String(privData))) } - _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()) + _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()) if err != nil { - if _, rollbackErr := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); rollbackErr != nil { + if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr } return err } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return errors.Trace(err) } return domain.GetDomain(e.ctx).NotifyUpdatePrivilege() } func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnPrivilege) if s.CurrentAuth != nil { user := e.ctx.GetSessionVars().User if user == nil { @@ -946,13 +952,13 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) // For simplicity: RESTRICTED_USER_ADMIN also counts for SYSTEM_USER here. if !(hasCreateUserPriv || hasSystemSchemaPriv) { - return plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("CREATE USER") + return core.ErrSpecificAccessDenied.GenWithStackByArgs("CREATE USER") } if checker.RequestDynamicVerificationWithUser("SYSTEM_USER", false, spec.User) && !(hasSystemUserPriv || hasRestrictedUserPriv) { - return plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("SYSTEM_USER or SUPER") + return core.ErrSpecificAccessDenied.GenWithStackByArgs("SYSTEM_USER or SUPER") } if sem.IsEnabled() && checker.RequestDynamicVerificationWithUser("RESTRICTED_USER_ADMIN", false, spec.User) && !hasRestrictedUserPriv { - return plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("RESTRICTED_USER_ADMIN") + return core.ErrSpecificAccessDenied.GenWithStackByArgs("RESTRICTED_USER_ADMIN") } } @@ -1022,6 +1028,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) } func (e *SimpleExec) executeGrantRole(ctx context.Context, s *ast.GrantRoleStmt) error { + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) sessionVars := e.ctx.GetSessionVars() for i, user := range s.Users { if user.CurrentUser { @@ -1053,11 +1060,11 @@ func (e *SimpleExec) executeGrantRole(ctx context.Context, s *ast.GrantRoleStmt) if err != nil { return err } - defer e.releaseSysSession(restrictedCtx) + defer e.releaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) // begin a transaction to insert role graph edges. - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return err } @@ -1066,16 +1073,16 @@ func (e *SimpleExec) executeGrantRole(ctx context.Context, s *ast.GrantRoleStmt) for _, role := range s.Roles { sql.Reset() sqlexec.MustFormatSQL(sql, `INSERT IGNORE INTO %n.%n (FROM_HOST, FROM_USER, TO_HOST, TO_USER) VALUES (%?,%?,%?,%?)`, mysql.SystemDB, mysql.RoleEdgeTable, role.Hostname, role.Username, user.Hostname, user.Username) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return err } return ErrCannotUser.GenWithStackByArgs("GRANT ROLE", user.String()) } } } - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return err } return domain.GetDomain(e.ctx).NotifyUpdatePrivilege() @@ -1083,16 +1090,16 @@ func (e *SimpleExec) executeGrantRole(ctx context.Context, s *ast.GrantRoleStmt) // Should cover same internal mysql.* tables as DROP USER, so this function is very similar func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { - + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) var failedUser string sysSession, err := e.getSysSession() - defer e.releaseSysSession(sysSession) + defer e.releaseSysSession(ctx, sysSession) if err != nil { return err } sqlExecutor := sysSession.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "begin"); err != nil { return err } @@ -1104,7 +1111,7 @@ func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { if len(newUser.Hostname) > auth.HostNameMaxLength { return ErrWrongStringLength.GenWithStackByArgs(newUser.Hostname, "host name", auth.HostNameMaxLength) } - exists, err := userExistsInternal(sqlExecutor, oldUser.Username, oldUser.Hostname) + exists, err := userExistsInternal(ctx, sqlExecutor, oldUser.Username, oldUser.Hostname) if err != nil { return err } @@ -1113,7 +1120,7 @@ func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { break } - exists, err = userExistsInternal(sqlExecutor, newUser.Username, newUser.Hostname) + exists, err = userExistsInternal(ctx, sqlExecutor, newUser.Username, newUser.Hostname) if err != nil { return err } @@ -1181,11 +1188,11 @@ func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { } if failedUser == "" { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "commit"); err != nil { return err } } else { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(ctx, "rollback"); err != nil { return err } return ErrCannotUser.GenWithStackByArgs("RENAME USER", failedUser) @@ -1194,16 +1201,18 @@ func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { } func renameUserHostInSystemTable(sqlExecutor sqlexec.SQLExecutor, tableName, usernameColumn, hostColumn string, users *ast.UserToUser) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) sql := new(strings.Builder) sqlexec.MustFormatSQL(sql, `UPDATE %n.%n SET %n = %?, %n = %? WHERE %n = %? and %n = %?;`, mysql.SystemDB, tableName, usernameColumn, users.NewUser.Username, hostColumn, strings.ToLower(users.NewUser.Hostname), usernameColumn, users.OldUser.Username, hostColumn, strings.ToLower(users.OldUser.Hostname)) - _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()) + _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) return err } func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) error { + internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) // Check privileges. // Check `CREATE USER` privilege. checker := privilege.GetPrivilegeManager(e.ctx) @@ -1226,13 +1235,13 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e hasRestrictedUserPriv := checker.RequestDynamicVerification(activeRoles, "RESTRICTED_USER_ADMIN", false) failedUsers := make([]string, 0, len(s.UserList)) sysSession, err := e.getSysSession() - defer e.releaseSysSession(sysSession) + defer e.releaseSysSession(internalCtx, sysSession) if err != nil { return err } sqlExecutor := sysSession.(sqlexec.SQLExecutor) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "begin"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return err } @@ -1257,16 +1266,16 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // any user with SUPER requires a user with SUPER to be able to DROP the user. // We also allow RESTRICTED_USER_ADMIN to count for simplicity. if checker.RequestDynamicVerificationWithUser("SYSTEM_USER", false, user) && !(hasSystemUserPriv || hasRestrictedUserPriv) { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return err } - return plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("SYSTEM_USER or SUPER") + return core.ErrSpecificAccessDenied.GenWithStackByArgs("SYSTEM_USER or SUPER") } // begin a transaction to delete a user. sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.UserTable, strings.ToLower(user.Hostname), user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1274,9 +1283,9 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete privileges from mysql.global_priv sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.GlobalPrivTable, user.Hostname, user.Username) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return err } continue @@ -1285,7 +1294,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete privileges from mysql.db sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.DBTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1293,7 +1302,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete privileges from mysql.tables_priv sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.TablePrivTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1301,7 +1310,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete privileges from mysql.columns_priv sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.ColumnPrivTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1309,14 +1318,14 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete relationship from mysql.role_edges sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE TO_HOST = %? and TO_USER = %?;`, mysql.SystemDB, mysql.RoleEdgeTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE FROM_HOST = %? and FROM_USER = %?;`, mysql.SystemDB, mysql.RoleEdgeTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1324,14 +1333,14 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete relationship from mysql.default_roles sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE DEFAULT_ROLE_HOST = %? and DEFAULT_ROLE_USER = %?;`, mysql.SystemDB, mysql.DefaultRoleTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE HOST = %? and USER = %?;`, mysql.SystemDB, mysql.DefaultRoleTable, user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1339,7 +1348,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete relationship from mysql.global_grants sql.Reset() sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, "global_grants", user.Hostname, user.Username) - if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { + if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } @@ -1358,7 +1367,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e } if len(failedUsers) == 0 { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "commit"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return err } if s.IsDropRole { @@ -1369,7 +1378,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e } } } else { - if _, err := sqlExecutor.ExecuteInternal(context.TODO(), "rollback"); err != nil { + if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return err } if s.IsDropRole { @@ -1382,6 +1391,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e func userExists(ctx context.Context, sctx sessionctx.Context, name string, host string) (bool, error) { exec := sctx.(sqlexec.RestrictedSQLExecutor) + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnPrivilege) rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT * FROM %n.%n WHERE User=%? AND Host=%?;`, mysql.SystemDB, mysql.UserTable, name, strings.ToLower(host)) if err != nil { return false, err @@ -1390,15 +1400,15 @@ func userExists(ctx context.Context, sctx sessionctx.Context, name string, host } // use the same internal executor to read within the same transaction, otherwise same as userExists -func userExistsInternal(sqlExecutor sqlexec.SQLExecutor, name string, host string) (bool, error) { +func userExistsInternal(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, name string, host string) (bool, error) { sql := new(strings.Builder) sqlexec.MustFormatSQL(sql, `SELECT * FROM %n.%n WHERE User=%? AND Host=%?;`, mysql.SystemDB, mysql.UserTable, name, strings.ToLower(host)) - recordSet, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()) + recordSet, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { return false, err } req := recordSet.NewChunk(nil) - err = recordSet.Next(context.TODO(), req) + err = recordSet.Next(ctx, req) var rows int = 0 if err == nil { rows = req.NumRows() @@ -1420,6 +1430,7 @@ func (e *SimpleExec) userAuthPlugin(name string, host string) (string, error) { } func (e *SimpleExec) executeSetPwd(ctx context.Context, s *ast.SetPwdStmt) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnPrivilege) var u, h string if s.User == nil || s.User.CurrentUser { if e.ctx.GetSessionVars().User == nil { @@ -1726,7 +1737,7 @@ func (e *SimpleExec) executeAdminFlushPlanCache(s *ast.AdminStmt) error { if s.StatementScope == ast.StatementScopeGlobal { return errors.New("Do not support the 'admin flush global scope.'") } - if !plannercore.PreparedPlanCacheEnabled() { + if !core.PreparedPlanCacheEnabled() { e.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.New("The plan cache is disable. So there no need to flush the plan cache")) return nil } diff --git a/executor/slow_query.go b/executor/slow_query.go index 5661fc343c8c1..1708ebe7cc6f4 100755 --- a/executor/slow_query.go +++ b/executor/slow_query.go @@ -23,7 +23,6 @@ import ( "path/filepath" "regexp" "runtime" - "sort" "strconv" "strings" "sync" @@ -50,6 +49,7 @@ import ( "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/plancodec" "go.uber.org/zap" + "golang.org/x/exp/slices" ) // ParseSlowLogBatchSize is the batch size of slow-log lines for a worker to parse, exported for testing. @@ -855,6 +855,7 @@ func (e *slowQueryRetriever) getAllFiles(ctx context.Context, sctx sessionctx.Co } if e.extractor == nil || !e.extractor.Enable { totalFileNum = 1 + //nolint: gosec file, err := os.Open(logFilePath) if err != nil { if os.IsNotExist(err) { @@ -953,8 +954,8 @@ func (e *slowQueryRetriever) getAllFiles(ctx context.Context, sctx sessionctx.Co } } // Sort by start time - sort.Slice(logFiles, func(i, j int) bool { - return logFiles[i].start.Before(logFiles[j].start) + slices.SortFunc(logFiles, func(i, j logFile) bool { + return i.start.Before(j.start) }) return logFiles, err } diff --git a/executor/sort.go b/executor/sort.go index 83b7bc59264af..49215a4d5cb0c 100644 --- a/executor/sort.go +++ b/executor/sort.go @@ -18,7 +18,6 @@ import ( "container/heap" "context" "errors" - "sort" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" @@ -29,6 +28,7 @@ import ( "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/mathutil" "github.com/pingcap/tidb/util/memory" + "golang.org/x/exp/slices" ) // SortExec represents sorting executor. @@ -362,9 +362,9 @@ func (h *topNChunkHeap) Swap(i, j int) { } // keyColumnsLess is the less function for key columns. -func (e *TopNExec) keyColumnsLess(i, j int) bool { - rowI := e.rowChunks.GetRow(e.rowPtrs[i]) - rowJ := e.rowChunks.GetRow(e.rowPtrs[j]) +func (e *TopNExec) keyColumnsLess(i, j chunk.RowPtr) bool { + rowI := e.rowChunks.GetRow(i) + rowJ := e.rowChunks.GetRow(j) return e.lessRow(rowI, rowJ) } @@ -473,7 +473,7 @@ func (e *TopNExec) executeTopN(ctx context.Context) error { } } } - sort.Slice(e.rowPtrs, e.keyColumnsLess) + slices.SortFunc(e.rowPtrs, e.keyColumnsLess) return nil } diff --git a/executor/split.go b/executor/split.go index 52dba35747c43..3aa6bb2bda4b9 100644 --- a/executor/split.go +++ b/executor/split.go @@ -366,6 +366,7 @@ func (e *SplitTableRegionExec) splitTableRegion(ctx context.Context) error { start := time.Now() ctxWithTimeout, cancel := context.WithTimeout(ctx, e.ctx.GetSessionVars().GetSplitRegionTimeout()) defer cancel() + ctxWithTimeout = kv.WithInternalSourceType(ctxWithTimeout, kv.InternalTxnDDL) regionIDs, err := s.SplitRegions(ctxWithTimeout, e.splitKeys, true, &e.tableInfo.ID) if err != nil { diff --git a/executor/stale_txn_test.go b/executor/stale_txn_test.go index 62255326cbce0..36f099178c8d0 100644 --- a/executor/stale_txn_test.go +++ b/executor/stale_txn_test.go @@ -31,20 +31,7 @@ import ( "github.com/tikv/client-go/v2/oracle" ) -func enableStaleReadCommonFailPoint(t *testing.T) func() { - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/assertStaleReadValuesSameWithExecuteAndBuilder", "return")) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/planner/core/assertStaleReadForOptimizePreparedPlan", "return")) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/assertNotStaleReadForExecutorGetReadTS", "return")) - return func() { - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/assertStaleReadValuesSameWithExecuteAndBuilder")) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/assertNotStaleReadForExecutorGetReadTS")) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/planner/core/assertStaleReadForOptimizePreparedPlan")) - } -} - func TestExactStalenessTransaction(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() testcases := []struct { name string preSQL string @@ -108,9 +95,6 @@ func TestExactStalenessTransaction(t *testing.T) { } func TestSelectAsOf(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -266,9 +250,6 @@ func TestSelectAsOf(t *testing.T) { } func TestStaleReadKVRequest(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -364,9 +345,6 @@ func TestStaleReadKVRequest(t *testing.T) { } func TestStalenessAndHistoryRead(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -451,9 +429,6 @@ func TestStalenessAndHistoryRead(t *testing.T) { } func TestTimeBoundedStalenessTxn(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -550,9 +525,6 @@ func TestStalenessTransactionSchemaVer(t *testing.T) { } func TestSetTransactionReadOnlyAsOf(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - t1, err := time.Parse(types.TimeFormat, "2016-09-21 09:53:04") require.NoError(t, err) store, clean := testkit.CreateMockStore(t) @@ -620,9 +592,6 @@ func TestSetTransactionReadOnlyAsOf(t *testing.T) { } func TestValidateReadOnlyInStalenessTransaction(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - errMsg1 := ".*only support read-only statement during read-only staleness transactions.*" errMsg2 := ".*select lock hasn't been supported in stale read yet.*" testcases := []struct { @@ -802,9 +771,6 @@ func TestValidateReadOnlyInStalenessTransaction(t *testing.T) { } func TestSpecialSQLInStalenessTxn(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -860,9 +826,6 @@ func TestSpecialSQLInStalenessTxn(t *testing.T) { } func TestAsOfTimestampCompatibility(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -920,9 +883,6 @@ func TestAsOfTimestampCompatibility(t *testing.T) { } func TestSetTransactionInfoSchema(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -964,9 +924,6 @@ func TestSetTransactionInfoSchema(t *testing.T) { } func TestStaleSelect(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -1051,9 +1008,6 @@ func TestStaleReadFutureTime(t *testing.T) { } func TestStaleReadPrepare(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -1110,9 +1064,6 @@ func TestStaleReadPrepare(t *testing.T) { } func TestStmtCtxStaleFlag(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -1208,9 +1159,6 @@ func TestStmtCtxStaleFlag(t *testing.T) { } func TestStaleSessionQuery(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -1251,9 +1199,6 @@ func TestStaleSessionQuery(t *testing.T) { } func TestStaleReadCompatibility(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -1300,9 +1245,6 @@ func TestStaleReadCompatibility(t *testing.T) { } func TestStaleReadNoExtraTSORequest(t *testing.T) { - disableCommonFailPoint := enableStaleReadCommonFailPoint(t) - defer disableCommonFailPoint() - store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -1441,3 +1383,12 @@ func TestIssue31954(t *testing.T) { tk.MustQuery("select (select v from t1 as of timestamp @a where id=1) as v"). Check(testkit.Rows("10")) } + +func TestIssue35686(t *testing.T) { + store, _, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + // This query should not panic + tk.MustQuery("select * from information_schema.ddl_jobs as of timestamp now()") +} diff --git a/executor/table_reader.go b/executor/table_reader.go index 30fedbd85737d..560eb1fd2ecd8 100644 --- a/executor/table_reader.go +++ b/executor/table_reader.go @@ -16,7 +16,6 @@ package executor import ( "context" - "sort" "time" "github.com/opentracing/opentracing-go" @@ -36,6 +35,7 @@ import ( "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/stringutil" "github.com/pingcap/tipb/go-tipb" + "golang.org/x/exp/slices" ) // make sure `TableReaderExecutor` implements `Executor`. @@ -79,6 +79,7 @@ type TableReaderExecutor struct { kvRanges []kv.KeyRange dagPB *tipb.DAGRequest startTS uint64 + txnScope string readReplicaScope string isStaleness bool // columns are only required by union scan and virtual column. @@ -332,6 +333,7 @@ func (e *TableReaderExecutor) buildKVReqSeparately(ctx context.Context, ranges [ SetStartTS(e.startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetFromSessionVars(e.ctx.GetSessionVars()). SetFromInfoSchema(e.ctx.GetInfoSchema()). @@ -370,6 +372,7 @@ func (e *TableReaderExecutor) buildKVReqForPartitionTableScan(ctx context.Contex SetStartTS(e.startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetFromSessionVars(e.ctx.GetSessionVars()). SetFromInfoSchema(e.ctx.GetInfoSchema()). @@ -400,6 +403,7 @@ func (e *TableReaderExecutor) buildKVReq(ctx context.Context, ranges []*ranger.R SetStartTS(e.startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). @@ -418,9 +422,9 @@ func buildVirtualColumnIndex(schema *expression.Schema, columns []*model.ColumnI virtualColumnIndex = append(virtualColumnIndex, i) } } - sort.Slice(virtualColumnIndex, func(i, j int) bool { - return plannercore.FindColumnInfoByID(columns, schema.Columns[virtualColumnIndex[i]].ID).Offset < - plannercore.FindColumnInfoByID(columns, schema.Columns[virtualColumnIndex[j]].ID).Offset + slices.SortFunc(virtualColumnIndex, func(i, j int) bool { + return plannercore.FindColumnInfoByID(columns, schema.Columns[i].ID).Offset < + plannercore.FindColumnInfoByID(columns, schema.Columns[j].ID).Offset }) return virtualColumnIndex } diff --git a/executor/trace.go b/executor/trace.go index bc842802feee6..940420c64a0c1 100644 --- a/executor/trace.go +++ b/executor/trace.go @@ -23,7 +23,6 @@ import ( "fmt" "os" "path/filepath" - "sort" "strings" "time" @@ -32,6 +31,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" @@ -42,6 +42,7 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/sqlexec" "go.uber.org/zap" + "golang.org/x/exp/slices" "sourcegraph.com/sourcegraph/appdash" traceImpl "sourcegraph.com/sourcegraph/appdash/opentracing" ) @@ -242,6 +243,7 @@ func (e *TraceExec) executeChild(ctx context.Context, se sqlexec.SQLExecutor) { defer func() { vars.InRestrictedSQL = origin }() + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnTrace) rs, err := se.ExecuteStmt(ctx, e.stmtNode) if err != nil { var errCode uint16 @@ -308,12 +310,12 @@ func dfsTree(t *appdash.Trace, prefix string, isLast bool, chk *chunk.Chunk) { chk.AppendString(2, duration.String()) // Sort events by their start time - sort.Slice(t.Sub, func(i, j int) bool { + slices.SortFunc(t.Sub, func(i, j *appdash.Trace) bool { var istart, jstart time.Time - if ievent, err := t.Sub[i].TimespanEvent(); err == nil { + if ievent, err := i.TimespanEvent(); err == nil { istart = ievent.Start() } - if jevent, err := t.Sub[j].TimespanEvent(); err == nil { + if jevent, err := j.TimespanEvent(); err == nil { jstart = jevent.Start() } return istart.Before(jstart) @@ -360,6 +362,7 @@ func generateOptimizerTraceFile() (*os.File, string, error) { // Generate key and create zip file time := time.Now().UnixNano() b := make([]byte, 16) + //nolint: gosec _, err = rand.Read(b) if err != nil { return nil, "", errors.AddStack(err) diff --git a/expression/BUILD.bazel b/expression/BUILD.bazel index 031d7ad48efce..55e7dc328f0af 100644 --- a/expression/BUILD.bazel +++ b/expression/BUILD.bazel @@ -106,6 +106,7 @@ go_library( "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_tipb//go-tipb", "@com_github_tikv_client_go_v2//oracle", + "@org_golang_x_exp//slices", "@org_golang_x_tools//container/intsets", "@org_uber_go_zap//:zap", ], @@ -113,6 +114,7 @@ go_library( go_test( name = "expression_test", + timeout = "short", srcs = [ "bench_test.go", "builtin_arithmetic_test.go", diff --git a/expression/builtin.go b/expression/builtin.go index ebd37910dd279..772ef5c5d48f1 100644 --- a/expression/builtin.go +++ b/expression/builtin.go @@ -25,7 +25,6 @@ package expression import ( - "sort" "strings" "sync" @@ -42,6 +41,7 @@ import ( "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tipb/go-tipb" + "golang.org/x/exp/slices" ) // baseBuiltinFunc will be contained in every struct that implement builtinFunc interface. @@ -882,7 +882,7 @@ func GetBuiltinList() []string { } res = append(res, funcName) } - sort.Strings(res) + slices.Sort(res) return res } diff --git a/expression/builtin_compare.go b/expression/builtin_compare.go index 3fc5c9a58adf0..6ef85a4b9c057 100644 --- a/expression/builtin_compare.go +++ b/expression/builtin_compare.go @@ -2540,21 +2540,21 @@ func (b *builtinNullEQIntSig) evalInt(row chunk.Row) (val int64, isNull bool, er case isNull0 && isNull1: res = 1 case isNull0 != isNull1: - break + return res, false, nil case isUnsigned0 && isUnsigned1 && types.CompareUint64(uint64(arg0), uint64(arg1)) == 0: res = 1 case !isUnsigned0 && !isUnsigned1 && types.CompareInt64(arg0, arg1) == 0: res = 1 case isUnsigned0 && !isUnsigned1: if arg1 < 0 { - break + return res, false, nil } if types.CompareInt64(arg0, arg1) == 0 { res = 1 } case !isUnsigned0 && isUnsigned1: if arg0 < 0 { - break + return res, false, nil } if types.CompareInt64(arg0, arg1) == 0 { res = 1 @@ -2587,7 +2587,7 @@ func (b *builtinNullEQRealSig) evalInt(row chunk.Row) (val int64, isNull bool, e case isNull0 && isNull1: res = 1 case isNull0 != isNull1: - break + return res, false, nil case types.CompareFloat64(arg0, arg1) == 0: res = 1 } @@ -2618,7 +2618,7 @@ func (b *builtinNullEQDecimalSig) evalInt(row chunk.Row) (val int64, isNull bool case isNull0 && isNull1: res = 1 case isNull0 != isNull1: - break + return res, false, nil case arg0.Compare(arg1) == 0: res = 1 } @@ -2649,7 +2649,7 @@ func (b *builtinNullEQStringSig) evalInt(row chunk.Row) (val int64, isNull bool, case isNull0 && isNull1: res = 1 case isNull0 != isNull1: - break + return res, false, nil case types.CompareString(arg0, arg1, b.collation) == 0: res = 1 } @@ -2680,7 +2680,7 @@ func (b *builtinNullEQDurationSig) evalInt(row chunk.Row) (val int64, isNull boo case isNull0 && isNull1: res = 1 case isNull0 != isNull1: - break + return res, false, nil case arg0.Compare(arg1) == 0: res = 1 } @@ -2711,7 +2711,7 @@ func (b *builtinNullEQTimeSig) evalInt(row chunk.Row) (val int64, isNull bool, e case isNull0 && isNull1: res = 1 case isNull0 != isNull1: - break + return res, false, nil case arg0.Compare(arg1) == 0: res = 1 } @@ -2742,7 +2742,7 @@ func (b *builtinNullEQJSONSig) evalInt(row chunk.Row) (val int64, isNull bool, e case isNull0 && isNull1: res = 1 case isNull0 != isNull1: - break + return res, false, nil default: cmpRes := json.CompareBinary(arg0, arg1) if cmpRes == 0 { diff --git a/expression/builtin_encryption.go b/expression/builtin_encryption.go index 2171cf92a6304..4229f03402422 100644 --- a/expression/builtin_encryption.go +++ b/expression/builtin_encryption.go @@ -581,6 +581,7 @@ func (b *builtinRandomBytesSig) evalString(row chunk.Row) (string, bool, error) return "", false, types.ErrOverflow.GenWithStackByArgs("length", "random_bytes") } buf := make([]byte, val) + //nolint: gosec if n, err := rand.Read(buf); err != nil { return "", true, err } else if int64(n) != val { diff --git a/expression/builtin_info.go b/expression/builtin_info.go index 1513b6bee5386..1e61039b5d018 100644 --- a/expression/builtin_info.go +++ b/expression/builtin_info.go @@ -21,7 +21,6 @@ package expression import ( "context" "encoding/json" - "sort" "strings" "time" @@ -36,6 +35,7 @@ import ( "github.com/pingcap/tidb/util/plancodec" "github.com/pingcap/tidb/util/printer" "github.com/pingcap/tipb/go-tipb" + "golang.org/x/exp/slices" ) var ( @@ -234,7 +234,7 @@ func (b *builtinCurrentRoleSig) evalString(row chunk.Row) (res string, isNull bo for _, r := range data.ActiveRoles { sortedRes = append(sortedRes, r.String()) } - sort.Strings(sortedRes) + slices.Sort(sortedRes) for i, r := range sortedRes { res += r if i != len(data.ActiveRoles)-1 { diff --git a/expression/builtin_info_vec.go b/expression/builtin_info_vec.go index e9384eaafc8c6..d86aa300783e7 100644 --- a/expression/builtin_info_vec.go +++ b/expression/builtin_info_vec.go @@ -15,7 +15,6 @@ package expression import ( - "sort" "strings" "github.com/pingcap/errors" @@ -24,6 +23,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/printer" + "golang.org/x/exp/slices" ) func (b *builtinDatabaseSig) vectorized() bool { @@ -145,7 +145,7 @@ func (b *builtinCurrentRoleSig) vecEvalString(input *chunk.Chunk, result *chunk. for _, r := range data.ActiveRoles { sortedRes = append(sortedRes, r.String()) } - sort.Strings(sortedRes) + slices.Sort(sortedRes) res := strings.Join(sortedRes, ",") for i := 0; i < n; i++ { result.AppendString(res) diff --git a/expression/column.go b/expression/column.go index b83e0c0203820..e37f0deb47054 100644 --- a/expression/column.go +++ b/expression/column.go @@ -16,7 +16,6 @@ package expression import ( "fmt" - "sort" "strings" "github.com/pingcap/errors" @@ -30,6 +29,7 @@ import ( "github.com/pingcap/tidb/types/json" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" + "golang.org/x/exp/slices" ) // CorrelatedColumn stands for a column in a correlated sub query. @@ -690,8 +690,8 @@ func (col *Column) Repertoire() Repertoire { func SortColumns(cols []*Column) []*Column { sorted := make([]*Column, len(cols)) copy(sorted, cols) - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].UniqueID < sorted[j].UniqueID + slices.SortFunc(sorted, func(i, j *Column) bool { + return i.UniqueID < j.UniqueID }) return sorted } diff --git a/expression/constant_test.go b/expression/constant_test.go index 50e5c5ce98ecc..e118fdd6a125a 100644 --- a/expression/constant_test.go +++ b/expression/constant_test.go @@ -21,14 +21,13 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/types/json" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/mock" + "github.com/stretchr/testify/require" ) func newColumn(id int) *Column { diff --git a/expression/explain.go b/expression/explain.go index daffbde3879c0..f4c97c74dab53 100644 --- a/expression/explain.go +++ b/expression/explain.go @@ -17,12 +17,12 @@ package expression import ( "bytes" "fmt" - "sort" "strings" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" + "golang.org/x/exp/slices" ) // ExplainInfo implements the Expression interface. @@ -149,7 +149,7 @@ func sortedExplainExpressionList(exprs []Expression, normalized bool) []byte { exprInfos = append(exprInfos, expr.ExplainInfo()) } } - sort.Strings(exprInfos) + slices.Sort(exprInfos) for i, info := range exprInfos { buffer.WriteString(info) if i+1 < len(exprInfos) { diff --git a/expression/helper_test.go b/expression/helper_test.go index beb69528b02cf..ed75ca82f2c25 100644 --- a/expression/helper_test.go +++ b/expression/helper_test.go @@ -20,8 +20,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/model" @@ -30,6 +28,7 @@ import ( "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util/mock" + "github.com/stretchr/testify/require" ) func TestGetTimeValue(t *testing.T) { diff --git a/expression/util.go b/expression/util.go index 40d4ab3f95774..f81737e4b7369 100644 --- a/expression/util.go +++ b/expression/util.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/opcode" @@ -1366,6 +1367,7 @@ func (r *SQLDigestTextRetriever) runMockQuery(data map[string]string, inValues [ // queries information_schema.statements_summary and information_schema.statements_summary_history; otherwise, it // queries the cluster version of these two tables. func (r *SQLDigestTextRetriever) runFetchDigestQuery(ctx context.Context, sctx sessionctx.Context, queryGlobal bool, inValues []interface{}) (map[string]string, error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnOthers) // If mock data is set, query the mock data instead of the real statements_summary tables. if !queryGlobal && r.mockLocalData != nil { return r.runMockQuery(r.mockLocalData, inValues) diff --git a/go.mod b/go.mod index 9ad9135be4085..2d67c63904fc3 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go/storage v1.21.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.12.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.2.0 - github.com/BurntSushi/toml v0.4.1 + github.com/BurntSushi/toml v1.1.0 github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/Jeffail/gabs/v2 v2.5.1 github.com/Shopify/sarama v1.29.0 @@ -46,15 +46,15 @@ require ( github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c github.com/pingcap/failpoint v0.0.0-20220423142525-ae43b7f4e5c3 github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059 - github.com/pingcap/kvproto v0.0.0-20220517085838-12e2f5a9d167 + github.com/pingcap/kvproto v0.0.0-20220705053936-aa9c2d20cd2a github.com/pingcap/log v1.1.0 github.com/pingcap/sysutil v0.0.0-20220114020952-ea68d2dbf5b4 github.com/pingcap/tidb/parser v0.0.0-20211011031125-9b13dc409c5e - github.com/pingcap/tipb v0.0.0-20220602075447-4847c5d68e73 + github.com/pingcap/tipb v0.0.0-20220706024432-7be3cc83a7d5 github.com/prometheus/client_golang v1.12.2 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.32.1 - github.com/shirou/gopsutil/v3 v3.21.12 + github.com/shirou/gopsutil/v3 v3.22.4 github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0 github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect github.com/soheilhy/cmux v0.1.5 @@ -62,7 +62,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 - github.com/tikv/client-go/v2 v2.0.1-0.20220613112734-be31f33ba03b + github.com/tikv/client-go/v2 v2.0.1-0.20220627063500-947d923945fd github.com/tikv/pd/client v0.0.0-20220307081149-841fa61e9710 github.com/twmb/murmur3 v1.1.3 github.com/uber/jaeger-client-go v2.22.1+incompatible @@ -79,15 +79,15 @@ require ( go.uber.org/goleak v1.1.12 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 - golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 + golang.org/x/exp v0.0.0-20220428152302-39d4317da171 golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 - golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f + golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 golang.org/x/text v0.3.7 golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 - golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f + golang.org/x/tools v0.1.11 google.golang.org/api v0.69.0 google.golang.org/grpc v1.44.0 gopkg.in/yaml.v2 v2.4.0 @@ -98,10 +98,24 @@ require ( require ( github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581 github.com/charithe/durationcheck v0.0.9 + github.com/daixiang0/gci v0.3.4 github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a + github.com/golangci/golangci-lint v1.46.2 + github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb + github.com/golangci/misspell v0.3.5 + github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 + github.com/kisielk/errcheck v1.6.1 github.com/kyoh86/exportloopref v0.1.8 - honnef.co/go/tools v0.0.1-2020.1.4 + github.com/nishanths/predeclared v0.2.2 + github.com/tdakkota/asciicheck v0.1.1 + honnef.co/go/tools v0.3.1 +) + +require ( + github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/kisielk/gotool v1.0.0 // indirect + github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect ) require ( @@ -117,7 +131,7 @@ require ( github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/cockroachdb/errors v1.8.1 // indirect + github.com/cockroachdb/errors v1.8.1 github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f // indirect github.com/cockroachdb/redact v1.0.8 // indirect github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect @@ -131,7 +145,6 @@ require ( github.com/eapache/queue v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/golang/glog v1.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -179,8 +192,8 @@ require ( github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/stathat/consistent v1.0.0 // indirect - github.com/tklauser/go-sysconf v0.3.9 // indirect - github.com/tklauser/numcpus v0.3.0 // indirect + github.com/tklauser/go-sysconf v0.3.10 // indirect + github.com/tklauser/numcpus v0.4.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect @@ -203,10 +216,10 @@ require ( golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8 // indirect - google.golang.org/protobuf v1.27.1 // indirect + google.golang.org/protobuf v1.28.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect sigs.k8s.io/yaml v1.2.0 // indirect diff --git a/go.sum b/go.sum index 2dcc2c3e4cfeb..f4e4fb8b40f4f 100644 --- a/go.sum +++ b/go.sum @@ -65,8 +65,9 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.1/go.mod h1:KLF4gFr6DcKFZwSu github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.2.0 h1:62Ew5xXg5UCGIXDOM7+y4IL5/6mQJq1nenhBCJAeGX8= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.2.0/go.mod h1:eHWhQKXc1Gv1DvWH//UzgWjWFEo0Pp4pH2vBzjBw8Fc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= +github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= @@ -191,8 +192,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= -github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/daixiang0/gci v0.3.4 h1:+EZ83znNs73C9ZBTM7xhNagMP6gJs5wlptiFiuce5BM= +github.com/daixiang0/gci v0.3.4/go.mod h1:pB1j339Q+2sv/EyKd4dgvGXcaBGIErim+dlhLDtqeW4= github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37 h1:X6mKGhCFOxrKeeHAjv/3UvT6e5RRxW6wRdlqlV6/H4w= github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37/go.mod h1:DC3JtzuG7kxMvJ6dZmf2ymjNyoXwgtklr7FN+Um2B0U= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -251,8 +252,7 @@ github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebP github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsouza/fake-gcs-server v1.19.0 h1:XyaGOlqo+R5sjT03x2ymk0xepaQlgwhRLTT2IopW0zA= github.com/fsouza/fake-gcs-server v1.19.0/go.mod h1:JtXHY/QzHhtyIxsNfIuQ+XgHtRb5B/w8nqbL5O8zqo0= github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= @@ -345,8 +345,16 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.46.2 h1:o90t/Xa6dhJbvy8Bz2RpzUXqrkigp19DLStMolTZbyo= +github.com/golangci/golangci-lint v1.46.2/go.mod h1:3DkdHnxn9eoTTrpT2gB0TEv8KSziuoqe9FitgQLHvAY= +github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb h1:Bi7BYmZVg4C+mKGi8LeohcP2GGUl2XJD4xCkJoZSaYc= +github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb/go.mod h1:ON/c2UR0VAAv6ZEAFKhjCLplESSmRFfZcDLASbI1GWo= +github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us= github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -446,6 +454,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= @@ -511,6 +521,9 @@ github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiD github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.1 h1:cErYo+J4SmEjdXZrVXGwLJCE2sB06s23LpkcyWNrT+s= +github.com/kisielk/errcheck v1.6.1/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= @@ -589,6 +602,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/ncw/directio v1.0.4/go.mod h1:CKGdcN7StAaqjT7Qack3lAXeX4pjnyc46YeqZH1yWVY= github.com/ncw/directio v1.0.5 h1:JSUBhdjEvVaJvOoyPAbcW0fnd0tvRXD76wEfZ1KcQz4= github.com/ncw/directio v1.0.5/go.mod h1:rX/pKEYkOXBGOggmcyJeJGloCkleSvphPx2eV3t6ROk= @@ -597,6 +612,8 @@ github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8 github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k= github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -636,7 +653,6 @@ github.com/pingcap/check v0.0.0-20211026125417-57bd13f7b5f0/go.mod h1:PYMCGwN0JH github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/errors v0.11.5-0.20210425183316-da1aaba5fb63/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTmyFqUwr+jcCvpVkK7sumiz+ko5H9eq4= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= @@ -648,8 +664,9 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20220302110454-c696585a961b/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= -github.com/pingcap/kvproto v0.0.0-20220517085838-12e2f5a9d167 h1:dsMpneacHyuVslSVndgUfJKrXFNG7VPdXip2ulG6glo= -github.com/pingcap/kvproto v0.0.0-20220517085838-12e2f5a9d167/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI= +github.com/pingcap/kvproto v0.0.0-20220525022339-6aaebf466305/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI= +github.com/pingcap/kvproto v0.0.0-20220705053936-aa9c2d20cd2a h1:nP2wmyw9JTRsk5rm+tZtfAso6c/1FvuaFNbXTaYz3FE= +github.com/pingcap/kvproto v0.0.0-20220705053936-aa9c2d20cd2a/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= @@ -658,8 +675,8 @@ github.com/pingcap/log v1.1.0 h1:ELiPxACz7vdo1qAvvaWJg1NrYFoY6gqAh/+Uo6aXdD8= github.com/pingcap/log v1.1.0/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/sysutil v0.0.0-20220114020952-ea68d2dbf5b4 h1:HYbcxtnkN3s5tqrZ/z3eJS4j3Db8wMphEm1q10lY/TM= github.com/pingcap/sysutil v0.0.0-20220114020952-ea68d2dbf5b4/go.mod h1:sDCsM39cGiv2vwunZkaFA917vVkqDTGSPbbV7z4Oops= -github.com/pingcap/tipb v0.0.0-20220602075447-4847c5d68e73 h1:L4nZwfYSrIsWPAZR8zMwHaNQJy0Rjy3Od6Smj5mlOms= -github.com/pingcap/tipb v0.0.0-20220602075447-4847c5d68e73/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs= +github.com/pingcap/tipb v0.0.0-20220706024432-7be3cc83a7d5 h1:XaTE4ZhQbQtQZtAVzlZh/Pf6SjFfMSTe1ia2nGcl36Y= +github.com/pingcap/tipb v0.0.0-20220706024432-7be3cc83a7d5/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -720,8 +737,9 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil/v3 v3.21.12 h1:VoGxEW2hpmz0Vt3wUvHIl9fquzYLNpVpgNNB7pGJimA= github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8ufT6fPQLdJzA= +github.com/shirou/gopsutil/v3 v3.22.4 h1:srAQaiX6jX/cYL6q29aE0m8lOskT9CurZ9N61YR3yoI= +github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0 h1:mj/nMDAwTBiaCqMEs4cYCqF7pO6Np7vhy1D1wcQGz+E= @@ -758,25 +776,31 @@ github.com/stathat/consistent v1.0.0 h1:ZFJ1QTRn8npNBKW065raSZ8xfOqhpb8vLOkfp4Cc github.com/stathat/consistent v1.0.0/go.mod h1:uajTPbgSygZBJ+V+0mY7meZ8i0XAcZs7AQ6V121XSxw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df h1:rh3VYpfvzXRbJ90ymx1yfhGl/wq8ac2m/cUbao61kwY= github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tdakkota/asciicheck v0.1.1 h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A= +github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= -github.com/tikv/client-go/v2 v2.0.1-0.20220613112734-be31f33ba03b h1:N5ivsNkDQDgimY0ZVqMnWqXjEnxy5uFChoB4wPIKpPI= -github.com/tikv/client-go/v2 v2.0.1-0.20220613112734-be31f33ba03b/go.mod h1:KzWkFRax8foxw13dSXAQZN+dLgixwahT10ZaAK9V/pg= +github.com/tikv/client-go/v2 v2.0.1-0.20220627063500-947d923945fd h1:VAyYcN1Nw7RupQszUYqOkueEVapWSxKFU7uBaYY5Dv8= +github.com/tikv/client-go/v2 v2.0.1-0.20220627063500-947d923945fd/go.mod h1:uoZHYWKB+PsDueEnZ0EvF5zvNJPEauEWN26Tgi7qvNI= github.com/tikv/pd/client v0.0.0-20220307081149-841fa61e9710 h1:jxgmKOscXSjaFEKQGRyY5qOpK8hLqxs2irb/uDJMtwk= github.com/tikv/pd/client v0.0.0-20220307081149-841fa61e9710/go.mod h1:AtvppPwkiyUgQlR1W9qSqfTB+OsOIu19jDCOxOsPkmU= -github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= +github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -896,7 +920,6 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.20.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= @@ -920,7 +943,6 @@ golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSO golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -933,8 +955,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E= -golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= +golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= +golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= @@ -963,6 +985,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1134,9 +1157,10 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f h1:8w7RhxzTVgUzw/AH/9mUV5q0vMgy40SQRursCcfmkCw= -golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664 h1:wEZYwx+kK+KlZ0hpvP2Ls1Xr4+RWnlzGFwPP0aiDjIU= +golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1199,6 +1223,7 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1222,13 +1247,16 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f h1:OKYpQQVE3DKSc3r3zHVzq46vq5YH7x8xpR3/k9ixmUg= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -1392,8 +1420,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1433,19 +1462,6 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.3.2 h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34= honnef.co/go/tools v0.3.2/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= -modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= -modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254= -modernc.org/lex v1.0.0/go.mod h1:G6rxMTy3cH2iA0iXL/HRRv4Znu8MK4higxph/lE7ypk= -modernc.org/lexer v1.0.0/go.mod h1:F/Dld0YKYdZCLQ7bD0USbWL4YKCyTDRDHiDTOs0q0vk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/parser v1.0.0/go.mod h1:H20AntYJ2cHHL6MHthJ8LZzXCdDCHMWt1KZXtIMjejA= -modernc.org/parser v1.0.2/go.mod h1:TXNq3HABP3HMaqLK7brD1fLA/LfN0KS6JxZn71QdDqs= -modernc.org/scanner v1.0.1/go.mod h1:OIzD2ZtjYk6yTuyqZr57FmifbM9fIH74SumloSsajuE= -modernc.org/sortutil v1.0.0/go.mod h1:1QO0q8IlIlmjBIwm6t/7sof874+xCfZouyqZMLIAtxM= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/y v1.0.1/go.mod h1:Ho86I+LVHEI+LYXoUKlmOMAM1JTXOCfj8qi1T8PsClE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/infoschema/BUILD.bazel b/infoschema/BUILD.bazel index 1f78edce74f2a..1a76d99316cca 100644 --- a/infoschema/BUILD.bazel +++ b/infoschema/BUILD.bazel @@ -40,6 +40,7 @@ go_library( "//util/domainutil", "//util/execdetails", "//util/logutil", + "//util/mock", "//util/pdapi", "//util/sem", "//util/set", @@ -50,6 +51,7 @@ go_library( "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/metapb", "@com_github_tikv_client_go_v2//tikv", + "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", ], ) diff --git a/infoschema/builder.go b/infoschema/builder.go index bdca4f8dd058f..6799de641750b 100644 --- a/infoschema/builder.go +++ b/infoschema/builder.go @@ -35,6 +35,7 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/sqlexec" "go.uber.org/zap" + "golang.org/x/exp/slices" ) type policyGetter struct { @@ -648,7 +649,9 @@ func (b *Builder) applyCreateTable(m *meta.Meta, dbInfo *model.DBInfo, tableID i bucketIdx := tableBucketIdx(tableID) sortedTbls := b.is.sortedTablesBuckets[bucketIdx] sortedTbls = append(sortedTbls, tbl) - sort.Sort(sortedTbls) + slices.SortFunc(sortedTbls, func(i, j table.Table) bool { + return i.Meta().ID < j.Meta().ID + }) b.is.sortedTablesBuckets[bucketIdx] = sortedTbls newTbl, ok := b.is.TableByID(tableID) diff --git a/infoschema/cluster.go b/infoschema/cluster.go index d1d4f3c5a7fac..47bb7db4c3434 100644 --- a/infoschema/cluster.go +++ b/infoschema/cluster.go @@ -79,16 +79,14 @@ func isClusterTableByName(dbName, tableName string) bool { dbName = strings.ToUpper(dbName) switch dbName { case util.InformationSchemaName.O, util.PerformanceSchemaName.O: - break - default: - return false - } - tableName = strings.ToUpper(tableName) - for _, name := range memTableToClusterTables { - name = strings.ToUpper(name) - if name == tableName { - return true + tableName = strings.ToUpper(tableName) + for _, name := range memTableToClusterTables { + name = strings.ToUpper(name) + if name == tableName { + return true + } } + default: } return false } diff --git a/infoschema/info_store.go b/infoschema/info_store.go new file mode 100644 index 0000000000000..9ffd78e64fa2c --- /dev/null +++ b/infoschema/info_store.go @@ -0,0 +1,160 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package infoschema + +import ( + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/table/tables" +) + +// InfoStore is a simple structure that stores DBInfo and TableInfo. It's modifiable and not thread-safe. +type InfoStore struct { + lowerCaseTableNames int // same as variable lower_case_table_names + + dbs map[string]*model.DBInfo + tables map[string]map[string]*model.TableInfo +} + +// NewInfoStore creates a InfoStore. +func NewInfoStore(lowerCaseTableNames int) *InfoStore { + return &InfoStore{ + lowerCaseTableNames: lowerCaseTableNames, + dbs: map[string]*model.DBInfo{}, + tables: map[string]map[string]*model.TableInfo{}, + } +} + +func (i *InfoStore) ciStr2Key(name model.CIStr) string { + if i.lowerCaseTableNames == 0 { + return name.O + } + return name.L +} + +// SchemaByName returns the DBInfo of given name. nil if not found. +func (i *InfoStore) SchemaByName(name model.CIStr) *model.DBInfo { + key := i.ciStr2Key(name) + return i.dbs[key] +} + +// PutSchema puts a DBInfo, it will overwrite the old one. +func (i *InfoStore) PutSchema(dbInfo *model.DBInfo) { + key := i.ciStr2Key(dbInfo.Name) + i.dbs[key] = dbInfo + if i.tables[key] == nil { + i.tables[key] = map[string]*model.TableInfo{} + } +} + +// DeleteSchema deletes the schema from InfoSchema. Returns true when the schema exists, false otherwise. +func (i *InfoStore) DeleteSchema(name model.CIStr) bool { + key := i.ciStr2Key(name) + _, ok := i.dbs[key] + if !ok { + return false + } + delete(i.dbs, key) + delete(i.tables, key) + return true +} + +// TableByName returns the TableInfo. It will also return the error like an infoschema. +func (i *InfoStore) TableByName(schema, table model.CIStr) (*model.TableInfo, error) { + schemaKey := i.ciStr2Key(schema) + tables, ok := i.tables[schemaKey] + if !ok { + return nil, ErrDatabaseNotExists.GenWithStackByArgs(schema) + } + + tableKey := i.ciStr2Key(table) + tbl, ok := tables[tableKey] + if !ok { + return nil, ErrTableNotExists.GenWithStackByArgs(schema, table) + } + return tbl, nil +} + +// PutTable puts a TableInfo, it will overwrite the old one. If the schema doesn't exist, it will return ErrDatabaseNotExists. +func (i *InfoStore) PutTable(schemaName model.CIStr, tblInfo *model.TableInfo) error { + schemaKey := i.ciStr2Key(schemaName) + tables, ok := i.tables[schemaKey] + if !ok { + return ErrDatabaseNotExists.GenWithStackByArgs(schemaName) + } + tableKey := i.ciStr2Key(tblInfo.Name) + tables[tableKey] = tblInfo + return nil +} + +// DeleteTable deletes the TableInfo, it will return ErrDatabaseNotExists or ErrTableNotExists when schema or table does +// not exist. +func (i *InfoStore) DeleteTable(schema, table model.CIStr) error { + schemaKey := i.ciStr2Key(schema) + tables, ok := i.tables[schemaKey] + if !ok { + return ErrDatabaseNotExists.GenWithStackByArgs(schema) + } + + tableKey := i.ciStr2Key(table) + _, ok = tables[tableKey] + if !ok { + return ErrTableNotExists.GenWithStackByArgs(schema, table) + } + delete(tables, tableKey) + return nil +} + +// InfoStoreAdaptor convert InfoStore to InfoSchema, it only implements a part of InfoSchema interface to be +// used by DDL interface. +// nolint:unused +type InfoStoreAdaptor struct { + InfoSchema + inner *InfoStore +} + +// SchemaByName implements the InfoSchema interface. +// nolint:unused +func (i InfoStoreAdaptor) SchemaByName(schema model.CIStr) (*model.DBInfo, bool) { + dbInfo := i.inner.SchemaByName(schema) + return dbInfo, dbInfo != nil +} + +// TableExists implements the InfoSchema interface. +// nolint:unused +func (i InfoStoreAdaptor) TableExists(schema, table model.CIStr) bool { + tableInfo, _ := i.inner.TableByName(schema, table) + return tableInfo != nil +} + +// TableIsView implements the InfoSchema interface. +// nolint:unused +func (i InfoStoreAdaptor) TableIsView(schema, table model.CIStr) bool { + tableInfo, _ := i.inner.TableByName(schema, table) + if tableInfo == nil { + return false + } + return tableInfo.IsView() +} + +// TableByName implements the InfoSchema interface. +// nolint:unused +func (i InfoStoreAdaptor) TableByName(schema, table model.CIStr) (t table.Table, err error) { + tableInfo, err := i.inner.TableByName(schema, table) + if err != nil { + return nil, err + } + return tables.MockTableFromMeta(tableInfo), nil +} diff --git a/infoschema/info_store_test.go b/infoschema/info_store_test.go new file mode 100644 index 0000000000000..07ee9d87d9de5 --- /dev/null +++ b/infoschema/info_store_test.go @@ -0,0 +1,115 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package infoschema + +import ( + "testing" + + "github.com/pingcap/tidb/parser/model" + "github.com/stretchr/testify/require" +) + +func TestInfoStoreLowerCaseTableNames(t *testing.T) { + dbName := model.NewCIStr("DBName") + lowerDBName := model.NewCIStr("dbname") + tableName := model.NewCIStr("TableName") + lowerTableName := model.NewCIStr("tablename") + dbInfo := &model.DBInfo{Name: dbName} + tableInfo := &model.TableInfo{Name: tableName} + + // case-sensitive + + is := NewInfoStore(0) + is.PutSchema(dbInfo) + got := is.SchemaByName(dbName) + require.NotNil(t, got) + got = is.SchemaByName(lowerDBName) + require.Nil(t, got) + + err := is.PutTable(lowerDBName, tableInfo) + require.True(t, ErrDatabaseNotExists.Equal(err)) + err = is.PutTable(dbName, tableInfo) + require.NoError(t, err) + got2, err := is.TableByName(dbName, tableName) + require.NoError(t, err) + require.NotNil(t, got2) + got2, err = is.TableByName(lowerTableName, tableName) + require.True(t, ErrDatabaseNotExists.Equal(err)) + require.Nil(t, got2) + got2, err = is.TableByName(dbName, lowerTableName) + require.True(t, ErrTableNotExists.Equal(err)) + require.Nil(t, got2) + + // compare-insensitive + + is = NewInfoStore(2) + is.PutSchema(dbInfo) + got = is.SchemaByName(dbName) + require.NotNil(t, got) + got = is.SchemaByName(lowerDBName) + require.NotNil(t, got) + require.Equal(t, dbName, got.Name) + + err = is.PutTable(lowerDBName, tableInfo) + require.NoError(t, err) + got2, err = is.TableByName(dbName, tableName) + require.NoError(t, err) + require.NotNil(t, got2) + got2, err = is.TableByName(dbName, lowerTableName) + require.NoError(t, err) + require.NotNil(t, got2) + require.Equal(t, tableName, got2.Name) +} + +func TestInfoStoreDeleteTables(t *testing.T) { + is := NewInfoStore(0) + dbName1 := model.NewCIStr("DBName1") + dbName2 := model.NewCIStr("DBName2") + tableName1 := model.NewCIStr("TableName1") + tableName2 := model.NewCIStr("TableName2") + dbInfo1 := &model.DBInfo{Name: dbName1} + dbInfo2 := &model.DBInfo{Name: dbName2} + tableInfo1 := &model.TableInfo{Name: tableName1} + tableInfo2 := &model.TableInfo{Name: tableName2} + + is.PutSchema(dbInfo1) + err := is.PutTable(dbName1, tableInfo1) + require.NoError(t, err) + err = is.PutTable(dbName1, tableInfo2) + require.NoError(t, err) + + // db2 not created + ok := is.DeleteSchema(dbName2) + require.False(t, ok) + err = is.PutTable(dbName2, tableInfo1) + require.True(t, ErrDatabaseNotExists.Equal(err)) + err = is.DeleteTable(dbName2, tableName1) + require.True(t, ErrDatabaseNotExists.Equal(err)) + + is.PutSchema(dbInfo2) + err = is.PutTable(dbName2, tableInfo1) + require.NoError(t, err) + + err = is.DeleteTable(dbName2, tableName2) + require.True(t, ErrTableNotExists.Equal(err)) + err = is.DeleteTable(dbName2, tableName1) + require.NoError(t, err) + + // delete db will remove its tables + ok = is.DeleteSchema(dbName1) + require.True(t, ok) + _, err = is.TableByName(dbName1, tableName1) + require.True(t, ErrDatabaseNotExists.Equal(err)) +} diff --git a/infoschema/infoschema.go b/infoschema/infoschema.go index 3200166d21af0..2510e6e45b56c 100644 --- a/infoschema/infoschema.go +++ b/infoschema/infoschema.go @@ -23,8 +23,11 @@ import ( "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/mock" + "golang.org/x/exp/slices" ) // InfoSchema is the interface used to retrieve the schema information. @@ -127,7 +130,9 @@ func MockInfoSchema(tbList []*model.TableInfo) InfoSchema { result.sortedTablesBuckets[bucketIdx] = append(result.sortedTablesBuckets[bucketIdx], tbl) } for i := range result.sortedTablesBuckets { - sort.Sort(result.sortedTablesBuckets[i]) + slices.SortFunc(result.sortedTablesBuckets[i], func(i, j table.Table) bool { + return i.Meta().ID < j.Meta().ID + }) } return result } @@ -152,7 +157,9 @@ func MockInfoSchemaWithSchemaVer(tbList []*model.TableInfo, schemaVer int64) Inf result.sortedTablesBuckets[bucketIdx] = append(result.sortedTablesBuckets[bucketIdx], tbl) } for i := range result.sortedTablesBuckets { - sort.Sort(result.sortedTablesBuckets[i]) + slices.SortFunc(result.sortedTablesBuckets[i], func(i, j table.Table) bool { + return i.Meta().ID < j.Meta().ID + }) } result.schemaMetaVersion = schemaVer return result @@ -353,6 +360,9 @@ func init() { util.GetSequenceByName = func(is interface{}, schema, sequence model.CIStr) (util.SequenceTable, error) { return GetSequenceByName(is.(InfoSchema), schema, sequence) } + mock.MockInfoschema = func(tbList []*model.TableInfo) sessionctx.InfoschemaMetaVersion { + return MockInfoSchema(tbList) + } } // HasAutoIncrementColumn checks whether the table has auto_increment columns, if so, return true and the column name. diff --git a/infoschema/infoschema_test.go b/infoschema/infoschema_test.go index e414f97d02906..7f1f36b030b29 100644 --- a/infoschema/infoschema_test.go +++ b/infoschema/infoschema_test.go @@ -102,7 +102,8 @@ func TestBasic(t *testing.T) { } dbInfos := []*model.DBInfo{dbInfo} - err = kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err = kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { err := meta.NewMeta(txn).CreateDatabase(dbInfo) require.NoError(t, err) return errors.Trace(err) @@ -193,7 +194,7 @@ func TestBasic(t *testing.T) { require.NoError(t, err) require.NotNil(t, tb) - err = kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { err := meta.NewMeta(txn).CreateTableOrView(dbID, tblInfo) require.NoError(t, err) return errors.Trace(err) @@ -305,7 +306,8 @@ func TestInfoTables(t *testing.T) { func genGlobalID(store kv.Storage) (int64, error) { var globalID int64 - err := kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + err := kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { var err error globalID, err = meta.NewMeta(txn).GenGlobalID() return errors.Trace(err) @@ -356,7 +358,8 @@ func TestBuildBundle(t *testing.T) { var tb1Bundle, p1Bundle *placement.Bundle - require.NoError(t, kv.RunInNewTxn(context.TODO(), store, false, func(ctx context.Context, txn kv.Transaction) (err error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) + require.NoError(t, kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) (err error) { m := meta.NewMeta(txn) tb1Bundle, err = placement.NewTableBundle(m, tbl1.Meta()) require.NoError(t, err) diff --git a/infoschema/metrics_schema.go b/infoschema/metrics_schema.go index 9ffe6a5a8e8a2..8efd1273d5d1e 100644 --- a/infoschema/metrics_schema.go +++ b/infoschema/metrics_schema.go @@ -17,7 +17,6 @@ package infoschema import ( "bytes" "fmt" - "sort" "strconv" "strings" @@ -29,6 +28,7 @@ import ( "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/set" + "golang.org/x/exp/slices" ) const ( @@ -135,7 +135,7 @@ func GenLabelConditionValues(values set.StringSet) string { for k := range values { vs = append(vs, k) } - sort.Strings(vs) + slices.Sort(vs) return strings.Join(vs, "|") } diff --git a/infoschema/perfschema/BUILD.bazel b/infoschema/perfschema/BUILD.bazel index 92930a65eaa80..c577e67525808 100644 --- a/infoschema/perfschema/BUILD.bazel +++ b/infoschema/perfschema/BUILD.bazel @@ -28,6 +28,7 @@ go_library( "//util/profile", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", + "@org_golang_x_exp//slices", ], ) diff --git a/infoschema/perfschema/tables.go b/infoschema/perfschema/tables.go index c0006f9f7413a..149336c3bf87d 100644 --- a/infoschema/perfschema/tables.go +++ b/infoschema/perfschema/tables.go @@ -17,7 +17,6 @@ package perfschema import ( "fmt" "net/http" - "sort" "strings" "sync" "time" @@ -35,6 +34,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/profile" + "golang.org/x/exp/slices" ) const ( @@ -390,7 +390,7 @@ func dataForRemoteProfile(ctx sessionctx.Context, nodeType, uri string, isGorout } results = append(results, result) } - sort.Slice(results, func(i, j int) bool { return results[i].addr < results[j].addr }) + slices.SortFunc(results, func(i, j result) bool { return i.addr < j.addr }) var finalRows [][]types.Datum for _, result := range results { addr := types.NewStringDatum(result.addr) diff --git a/infoschema/tables.go b/infoschema/tables.go index 622eb1ef9452c..9f6a21f3727b6 100644 --- a/infoschema/tables.go +++ b/infoschema/tables.go @@ -20,26 +20,21 @@ import ( "fmt" "net" "net/http" - "sort" "strconv" "strings" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/tidb/parser/charset" - "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/parser/terror" - "github.com/pingcap/tidb/util/logutil" - "github.com/pingcap/tidb/util/stmtsummary" - "go.uber.org/zap" - "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/session/txninfo" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" @@ -48,8 +43,12 @@ import ( "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/deadlockhistory" "github.com/pingcap/tidb/util/execdetails" + "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/pdapi" + "github.com/pingcap/tidb/util/stmtsummary" "github.com/tikv/client-go/v2/tikv" + "go.uber.org/zap" + "golang.org/x/exp/slices" ) const ( @@ -1910,25 +1909,12 @@ type infoschemaTable struct { tp table.Type } -// SchemasSorter implements the sort.Interface interface, sorts DBInfo by name. -type SchemasSorter []*model.DBInfo - -func (s SchemasSorter) Len() int { - return len(s) -} - -func (s SchemasSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s SchemasSorter) Less(i, j int) bool { - return s[i].Name.L < s[j].Name.L -} - func (it *infoschemaTable) getRows(ctx sessionctx.Context, cols []*table.Column) (fullRows [][]types.Datum, err error) { is := ctx.GetInfoSchema().(InfoSchema) dbs := is.AllSchemas() - sort.Sort(SchemasSorter(dbs)) + slices.SortFunc(dbs, func(i, j *model.DBInfo) bool { + return i.Name.L < j.Name.L + }) switch it.meta.Name.O { case tableFiles: case tablePlugins, tableTriggers: diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index cf9075ee21f29..c7978cbcf4cd5 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -1456,7 +1456,7 @@ func TestTiDBTrx(t *testing.T) { StartTS: 425070846483628033, CurrentSQLDigest: "", AllSQLDigests: []string{"sql1", "sql2", digest.String()}, - State: txninfo.TxnLockWaiting, + State: txninfo.TxnLockAcquiring, ConnectionID: 10, Username: "user1", CurrentDB: "db1", diff --git a/kv/BUILD.bazel b/kv/BUILD.bazel index fed476f803dfc..835cd827b6bfa 100644 --- a/kv/BUILD.bazel +++ b/kv/BUILD.bazel @@ -46,6 +46,7 @@ go_library( "@com_github_tikv_client_go_v2//oracle", "@com_github_tikv_client_go_v2//tikv", "@com_github_tikv_client_go_v2//tikvrpc", + "@com_github_tikv_client_go_v2//util", "@com_github_tikv_pd_client//:client", "@org_uber_go_zap//:zap", ], diff --git a/kv/kv.go b/kv/kv.go index 486b93007217d..d65d6498c12d1 100644 --- a/kv/kv.go +++ b/kv/kv.go @@ -31,6 +31,7 @@ import ( "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/tikv" "github.com/tikv/client-go/v2/tikvrpc" + "github.com/tikv/client-go/v2/util" pd "github.com/tikv/pd/client" ) @@ -227,16 +228,16 @@ type Transaction interface { // If a key doesn't exist, there shouldn't be any corresponding entry in the result map. BatchGet(ctx context.Context, keys []Key) (map[string][]byte, error) IsPessimistic() bool - // CacheIndexName caches the index name. + // CacheTableInfo caches the index name. // PresumeKeyNotExists will use this to help decode error message. CacheTableInfo(id int64, info *model.TableInfo) - // GetIndexName returns the cached index name. + // GetTableInfo returns the cached index name. // If there is no such index already inserted through CacheIndexName, it will return UNKNOWN. GetTableInfo(id int64) *model.TableInfo - // set allowed options of current operation in each TiKV disk usage level. + // SetDiskFullOpt set allowed options of current operation in each TiKV disk usage level. SetDiskFullOpt(level kvrpcpb.DiskFullOpt) - // clear allowed flag + // ClearDiskFullOpt clear allowed flag ClearDiskFullOpt() // GetMemDBCheckpoint gets the transaction's memDB checkpoint. @@ -355,6 +356,8 @@ type Request struct { TaskID uint64 // TiDBServerID is the specified TiDB serverID to execute request. `0` means all TiDB instances. TiDBServerID uint64 + // TxnScope is the scope of the txn + TxnScope string // ReadReplicaScope is the scope of the read replica. ReadReplicaScope string // IsStaleness indicates whether the request read staleness data @@ -365,6 +368,8 @@ type Request struct { ResourceGroupTagger tikvrpc.ResourceGroupTagger // Paging indicates whether the request is a paging request. Paging bool + // RequestSource indicates whether the request is an internal request. + RequestSource util.RequestSource } // PartitionIDAndRanges used by PartitionTableScan in tiflash. diff --git a/kv/option.go b/kv/option.go index 56a2e58e9196e..295cf60ee0c11 100644 --- a/kv/option.go +++ b/kv/option.go @@ -14,6 +14,10 @@ package kv +import ( + "github.com/tikv/client-go/v2/util" +) + // Transaction options const ( // BinlogInfo contains the binlog data and client. @@ -81,6 +85,10 @@ const ( TableToColumnMaps // AssertionLevel controls how strict the assertions on data during transactions should be. AssertionLevel + // RequestSourceInternal set request source scope of transaction. + RequestSourceInternal + // RequestSourceType set request source type of the current statement. + RequestSourceType ) // ReplicaReadType is the type of replica to read data from @@ -106,3 +114,49 @@ func (r ReplicaReadType) IsFollowerRead() bool { func (r ReplicaReadType) IsClosestRead() bool { return r == ReplicaReadClosest } + +// RequestSourceKey is used as the key of request source type in context. +var RequestSourceKey = util.RequestSourceKey + +// RequestSource is the scope and type of the request and it's passed by go context. +type RequestSource = util.RequestSource + +// WithInternalSourceType create context with internal source. +var WithInternalSourceType = util.WithInternalSourceType + +const ( + // InternalTxnOthers is the type of requests that consume low resources. + // This reduces the size of metrics. + InternalTxnOthers = util.InternalTxnOthers + // InternalTxnGC is the type of GC txn. + InternalTxnGC = util.InternalTxnGC + // InternalTxnBootstrap is the type of TiDB bootstrap txns. + InternalTxnBootstrap = InternalTxnOthers + // InternalTxnMeta is the type of the miscellaneous meta usage. + InternalTxnMeta = util.InternalTxnMeta + // InternalTxnDDL is the type of inner txns in ddl module. + InternalTxnDDL = "ddl" + // InternalTxnBackfillDDLPrefix is the prefix of the types of DDL operations needs backfilling. + InternalTxnBackfillDDLPrefix = "ddl_" + // InternalTxnCacheTable is the type of cache table usage. + InternalTxnCacheTable = InternalTxnOthers + // InternalTxnStats is the type of statistics txn. + InternalTxnStats = "stats" + // InternalTxnBindInfo is the type of bind info txn. + InternalTxnBindInfo = InternalTxnOthers + // InternalTxnSysVar is the type of sys var txn. + InternalTxnSysVar = InternalTxnOthers + // InternalTxnTelemetry is the type of telemetry. + InternalTxnTelemetry = InternalTxnOthers + // InternalTxnAdmin is the type of admin operations. + InternalTxnAdmin = "admin" + // InternalTxnPrivilege is the type of privilege txn. + InternalTxnPrivilege = InternalTxnOthers + // InternalTxnTools is the type of tools usage of TiDB. + // Do not classify different tools by now. + InternalTxnTools = "tools" + // InternalTxnBR is the type of BR usage. + InternalTxnBR = InternalTxnTools + // InternalTxnTrace handles the trace statement. + InternalTxnTrace = "Trace" +) diff --git a/kv/txn.go b/kv/txn.go index dd51b7a2e56fe..dfad0ee2efed0 100644 --- a/kv/txn.go +++ b/kv/txn.go @@ -17,6 +17,7 @@ package kv import ( "context" "errors" + "flag" "fmt" "math" "math/rand" @@ -99,7 +100,7 @@ func PrintLongTimeInternalTxn(now time.Time, startTS uint64, runByFunction bool) } } -// RunInNewTxn will run the f in a new transaction environment. +// RunInNewTxn will run the f in a new transaction environment, should be used by inner txn only. func RunInNewTxn(ctx context.Context, store Storage, retryable bool, f func(ctx context.Context, txn Transaction) error) error { var ( err error @@ -117,6 +118,7 @@ func RunInNewTxn(ctx context.Context, store Storage, retryable bool, f func(ctx logutil.BgLogger().Error("RunInNewTxn", zap.Error(err)) return err } + setRequestSourceForInnerTxn(ctx, txn) // originalTxnTS is used to trace the original transaction when the function is retryable. if i == 0 { @@ -188,3 +190,24 @@ func BackOff(attempts uint) int { time.Sleep(sleep) return int(sleep) } + +func setRequestSourceForInnerTxn(ctx context.Context, txn Transaction) { + if source := ctx.Value(RequestSourceKey); source != nil { + requestSource := source.(RequestSource) + if !requestSource.RequestSourceInternal { + logutil.Logger(ctx).Warn("`RunInNewTxn` should be used by inner txn only") + } + txn.SetOption(RequestSourceInternal, requestSource.RequestSourceInternal) + txn.SetOption(RequestSourceType, requestSource.RequestSourceType) + } else { + // panic in test mode in case there are requests without source in the future. + // log warnings in production mode. + if flag.Lookup("test.v") != nil || flag.Lookup("check.v") != nil { + panic("unexpected no source type context, if you see this error, " + + "the `RequestSourceTypeKey` is missing in your context") + } else { + logutil.Logger(ctx).Warn("unexpected no source type context, if you see this warning, " + + "the `RequestSourceTypeKey` is missing in the context") + } + } +} diff --git a/kv/txn_test.go b/kv/txn_test.go index 22fc61a482042..9e01a6fbd92a7 100644 --- a/kv/txn_test.go +++ b/kv/txn_test.go @@ -42,17 +42,18 @@ func TestRetryExceedCountError(t *testing.T) { }(maxRetryCnt) maxRetryCnt = 5 - err := RunInNewTxn(context.Background(), &mockStorage{}, true, func(ctx context.Context, txn Transaction) error { + ctx := WithInternalSourceType(context.Background(), InternalTxnOthers) + err := RunInNewTxn(ctx, &mockStorage{}, true, func(ctx context.Context, txn Transaction) error { return nil }) assert.NotNil(t, err) - err = RunInNewTxn(context.Background(), &mockStorage{}, true, func(ctx context.Context, txn Transaction) error { + err = RunInNewTxn(ctx, &mockStorage{}, true, func(ctx context.Context, txn Transaction) error { return ErrTxnRetryable }) assert.NotNil(t, err) - err = RunInNewTxn(context.Background(), &mockStorage{}, true, func(ctx context.Context, txn Transaction) error { + err = RunInNewTxn(ctx, &mockStorage{}, true, func(ctx context.Context, txn Transaction) error { return errors.New("do not retry") }) assert.NotNil(t, err) @@ -62,7 +63,7 @@ func TestRetryExceedCountError(t *testing.T) { cfg.SetGetError(err1) cfg.SetCommitError(err1) storage := NewInjectedStore(newMockStorage(), &cfg) - err = RunInNewTxn(context.Background(), storage, true, func(ctx context.Context, txn Transaction) error { + err = RunInNewTxn(ctx, storage, true, func(ctx context.Context, txn Transaction) error { return nil }) assert.NotNil(t, err) diff --git a/meta/BUILD.bazel b/meta/BUILD.bazel index 2138e3ef080d8..a266aa48d2494 100644 --- a/meta/BUILD.bazel +++ b/meta/BUILD.bazel @@ -32,6 +32,7 @@ go_test( embed = [":meta"], flaky = True, deps = [ + "//ddl", "//kv", "//parser/model", "//store/mockstore", diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 5d8745cf78697..7082ea184b476 100644 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -234,7 +234,8 @@ func (alloc *allocator) End() int64 { func (alloc *allocator) NextGlobalAutoID() (int64, error) { var autoID int64 startTime := time.Now() - err := kv.RunInNewTxn(context.Background(), alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { var err1 error autoID, err1 = alloc.getIDAccessor(txn).Get() if err1 != nil { @@ -271,6 +272,7 @@ func (alloc *allocator) rebase4Unsigned(ctx context.Context, requiredBase uint64 } var newBase, newEnd uint64 startTime := time.Now() + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { if allocatorStats != nil { txn.SetOption(kv.CollectRuntimeStats, allocatorStats.SnapshotRuntimeStats) @@ -330,6 +332,7 @@ func (alloc *allocator) rebase4Signed(ctx context.Context, requiredBase int64, a } var newBase, newEnd int64 startTime := time.Now() + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { if allocatorStats != nil { txn.SetOption(kv.CollectRuntimeStats, allocatorStats.SnapshotRuntimeStats) @@ -370,7 +373,8 @@ func (alloc *allocator) rebase4Signed(ctx context.Context, requiredBase int64, a func (alloc *allocator) rebase4Sequence(requiredBase int64) (int64, bool, error) { startTime := time.Now() alreadySatisfied := false - err := kv.RunInNewTxn(context.Background(), alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { acc := meta.NewMeta(txn).GetAutoIDAccessors(alloc.dbID, alloc.tbID) currentEnd, err := acc.SequenceValue().Get() if err != nil { @@ -427,7 +431,8 @@ func (alloc *allocator) ForceRebase(requiredBase int64) error { alloc.mu.Lock() defer alloc.mu.Unlock() startTime := time.Now() - err := kv.RunInNewTxn(context.Background(), alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { idAcc := alloc.getIDAccessor(txn) currentEnd, err1 := idAcc.Get() if err1 != nil { @@ -758,6 +763,7 @@ func (alloc *allocator) alloc4Signed(ctx context.Context, n uint64, increment, o }() } + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { span1 := span.Tracer().StartSpan("alloc.alloc4Signed", opentracing.ChildOf(span.Context())) @@ -847,6 +853,7 @@ func (alloc *allocator) alloc4Unsigned(ctx context.Context, n uint64, increment, }() } + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { span1 := span.Tracer().StartSpan("alloc.alloc4Unsigned", opentracing.ChildOf(span.Context())) @@ -931,7 +938,8 @@ func (alloc *allocator) alloc4Sequence() (min int64, max int64, round int64, err var newBase, newEnd int64 startTime := time.Now() - err = kv.RunInNewTxn(context.Background(), alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error { acc := meta.NewMeta(txn).GetAutoIDAccessors(alloc.dbID, alloc.tbID) var ( err1 error diff --git a/meta/autoid/autoid_test.go b/meta/autoid/autoid_test.go index e529764918eff..0b8cd60257cf4 100644 --- a/meta/autoid/autoid_test.go +++ b/meta/autoid/autoid_test.go @@ -47,7 +47,8 @@ func TestSignedAutoid(t *testing.T) { require.NoError(t, err) }() - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) require.NoError(t, err) @@ -69,7 +70,6 @@ func TestSignedAutoid(t *testing.T) { alloc := autoid.NewAllocator(store, 1, 1, false, autoid.RowIDAllocType) require.NotNil(t, alloc) - ctx := context.Background() globalAutoID, err := alloc.NextGlobalAutoID() require.NoError(t, err) require.Equal(t, int64(1), globalAutoID) @@ -252,7 +252,8 @@ func TestUnsignedAutoid(t *testing.T) { require.NoError(t, err) }() - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) require.NoError(t, err) @@ -273,7 +274,6 @@ func TestUnsignedAutoid(t *testing.T) { alloc := autoid.NewAllocator(store, 1, 1, true, autoid.RowIDAllocType) require.NotNil(t, alloc) - ctx := context.Background() globalAutoID, err := alloc.NextGlobalAutoID() require.NoError(t, err) require.Equal(t, int64(1), globalAutoID) @@ -416,7 +416,8 @@ func TestConcurrentAlloc(t *testing.T) { dbID := int64(2) tblID := int64(100) - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) require.NoError(t, err) @@ -501,7 +502,8 @@ func TestRollbackAlloc(t *testing.T) { }() dbID := int64(1) tblID := int64(2) - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) require.NoError(t, err) @@ -511,7 +513,6 @@ func TestRollbackAlloc(t *testing.T) { }) require.NoError(t, err) - ctx := context.Background() injectConf := new(kv.InjectionConfig) injectConf.SetCommitError(errors.New("injected")) injectedStore := kv.NewInjectedStore(store, injectConf) @@ -551,7 +552,8 @@ func TestAllocComputationIssue(t *testing.T) { require.NoError(t, err) }() - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) require.NoError(t, err) @@ -582,7 +584,6 @@ func TestAllocComputationIssue(t *testing.T) { // Simulate the rest cache is not enough for next batch, assuming 10 & 13, batch size = 4. autoid.TestModifyBaseAndEndInjection(signedAlloc1, 4, 6) - ctx := context.Background() // Here will recompute the new allocator batch size base on new base = 10, which will get 6. min, max, err := unsignedAlloc1.Alloc(ctx, 2, 3, 1) require.NoError(t, err) diff --git a/meta/autoid/bench_test.go b/meta/autoid/bench_test.go index e10ce091e5c84..d8b489060875d 100644 --- a/meta/autoid/bench_test.go +++ b/meta/autoid/bench_test.go @@ -41,7 +41,8 @@ func BenchmarkAllocator_Alloc(b *testing.B) { }() dbID := int64(1) tblID := int64(2) - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: dbID, Name: model.NewCIStr("a")}) if err != nil { @@ -56,7 +57,6 @@ func BenchmarkAllocator_Alloc(b *testing.B) { if err != nil { return } - ctx := context.Background() alloc := autoid.NewAllocator(store, 1, 2, false, autoid.RowIDAllocType) b.StartTimer() for i := 0; i < b.N; i++ { @@ -81,7 +81,8 @@ func BenchmarkAllocator_SequenceAlloc(b *testing.B) { }() var seq *model.SequenceInfo var sequenceBase int64 - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) if err != nil { diff --git a/meta/autoid/seq_autoid_test.go b/meta/autoid/seq_autoid_test.go index 80568edd1c8ee..761f3955cb65d 100644 --- a/meta/autoid/seq_autoid_test.go +++ b/meta/autoid/seq_autoid_test.go @@ -40,7 +40,8 @@ func TestSequenceAutoid(t *testing.T) { var seq *model.SequenceInfo var sequenceBase int64 - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err = m.CreateDatabase(&model.DBInfo{ID: 1, Name: model.NewCIStr("a")}) require.NoError(t, err) @@ -164,7 +165,8 @@ func TestConcurrentAllocSequence(t *testing.T) { var seq *model.SequenceInfo var sequenceBase int64 - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) err1 := m.CreateDatabase(&model.DBInfo{ID: 2, Name: model.NewCIStr("a")}) require.NoError(t, err1) diff --git a/meta/meta.go b/meta/meta.go index f8bf8e6eafd19..0ee38539c85b5 100644 --- a/meta/meta.go +++ b/meta/meta.go @@ -19,7 +19,6 @@ import ( "encoding/json" "fmt" "math" - "sort" "strconv" "strings" "sync" @@ -137,6 +136,8 @@ func NewMeta(txn kv.Transaction, jobListKeys ...JobListKeyType) *Meta { // NewSnapshotMeta creates a Meta with snapshot. func NewSnapshotMeta(snapshot kv.Snapshot) *Meta { + snapshot.SetOption(kv.RequestSourceInternal, true) + snapshot.SetOption(kv.RequestSourceType, kv.InternalTxnMeta) t := structure.NewStructure(snapshot, nil, mMetaPrefix) return &Meta{txn: t} } @@ -1014,36 +1015,11 @@ func (m *Meta) GetHistoryDDLJob(id int64) (*model.Job, error) { return job, errors.Trace(err) } -// GetAllHistoryDDLJobs gets all history DDL jobs. -func (m *Meta) GetAllHistoryDDLJobs() ([]*model.Job, error) { - pairs, err := m.txn.HGetAll(mDDLJobHistoryKey) - if err != nil { - return nil, errors.Trace(err) - } - jobs, err := decodeJob(pairs) - if err != nil { - return nil, errors.Trace(err) - } - // sort job. - sorter := &jobsSorter{jobs: jobs} - sort.Sort(sorter) - return jobs, nil -} - // GetHistoryDDLCount the count of all history DDL jobs. func (m *Meta) GetHistoryDDLCount() (uint64, error) { return m.txn.HGetLen(mDDLJobHistoryKey) } -// GetLastNHistoryDDLJobs gets latest N history ddl jobs. -func (m *Meta) GetLastNHistoryDDLJobs(num int) ([]*model.Job, error) { - pairs, err := m.txn.HGetLastN(mDDLJobHistoryKey, num) - if err != nil { - return nil, errors.Trace(err) - } - return decodeJob(pairs) -} - // LastJobIterator is the iterator for gets latest history. type LastJobIterator interface { GetLastJobs(num int, jobs []*model.Job) ([]*model.Job, error) @@ -1087,36 +1063,6 @@ func (i *HLastJobIterator) GetLastJobs(num int, jobs []*model.Job) ([]*model.Job return jobs, nil } -func decodeJob(jobPairs []structure.HashPair) ([]*model.Job, error) { - jobs := make([]*model.Job, 0, len(jobPairs)) - for _, pair := range jobPairs { - job := &model.Job{} - err := job.Decode(pair.Value) - if err != nil { - return nil, errors.Trace(err) - } - jobs = append(jobs, job) - } - return jobs, nil -} - -// jobsSorter implements the sort.Interface interface. -type jobsSorter struct { - jobs []*model.Job -} - -func (s *jobsSorter) Swap(i, j int) { - s.jobs[i], s.jobs[j] = s.jobs[j], s.jobs[i] -} - -func (s *jobsSorter) Len() int { - return len(s.jobs) -} - -func (s *jobsSorter) Less(i, j int) bool { - return s.jobs[i].ID < s.jobs[j].ID -} - // GetBootstrapVersion returns the version of the server which bootstrap the store. // If the store is not bootstraped, the version will be zero. func (m *Meta) GetBootstrapVersion() (int64, error) { diff --git a/meta/meta_test.go b/meta/meta_test.go index 024e774615b1d..7174eae38c66c 100644 --- a/meta/meta_test.go +++ b/meta/meta_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" @@ -564,7 +565,7 @@ func TestDDL(t *testing.T) { historyJob2.Args = append(job.Args, arg) err = m.AddHistoryDDLJob(historyJob2, false) require.NoError(t, err) - all, err := m.GetAllHistoryDDLJobs() + all, err := ddl.GetAllHistoryDDLJobs(m) require.NoError(t, err) var lastID int64 for _, job := range all { @@ -581,7 +582,7 @@ func TestDDL(t *testing.T) { } // Test for get last N history ddl jobs. - historyJobs, err := m.GetLastNHistoryDDLJobs(2) + historyJobs, err := ddl.GetLastNHistoryDDLJobs(m, 2) require.NoError(t, err) require.Len(t, historyJobs, 2) require.Equal(t, int64(1234), historyJobs[0].ID) diff --git a/metrics/grafana/tidb.json b/metrics/grafana/tidb.json index 30d2c788dc0e1..5fe75a524d0c6 100644 --- a/metrics/grafana/tidb.json +++ b/metrics/grafana/tidb.json @@ -5689,6 +5689,648 @@ "align": false, "alignLevel": null } + }, + { + "aliasColors": {}, + "dashLength": 10, + "datasource": "${DS_TEST-CLUSTER}", + "description": "How much time transactions spend on each state", + "fieldConfig": { + "defaults": { + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 60 + }, + "id": 252, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": false + }, + "pluginVersion": "7.5.11", + "pointradius": 2, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(tidb_session_txn_state_seconds_bucket{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\"}[1m])) by (le, type))", + "legendFormat": "{{type}}-99", + "interval": "", + "exemplar": true, + "hide": false, + "refId": "A" + }, + { + "expr": "histogram_quantile(0.9, sum(rate(tidb_session_txn_state_seconds_bucket{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\"}[1m])) by (le, type))", + "legendFormat": "{{type}}-90", + "interval": "", + "exemplar": true, + "refId": "B", + "hide": false + }, + { + "expr": "histogram_quantile(0.8, sum(rate(tidb_session_txn_state_seconds_bucket{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\"}[1m])) by (le, type))", + "legendFormat": "{{type}}-80", + "interval": "", + "exemplar": true, + "refId": "C", + "hide": false + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Transaction execution states duration", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:230", + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:231", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + }, + "bars": false, + "dashes": false, + "fillGradient": 0, + "hiddenSeries": false, + "percentage": false, + "points": false, + "stack": false, + "steppedLine": false, + "timeFrom": null, + "timeShift": null + }, + { + "aliasColors": {}, + "dashLength": 10, + "datasource": "${DS_TEST-CLUSTER}", + "description": "How much time transactions spend on each state after it acquire at least one lock", + "fieldConfig": { + "defaults": { + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 60 + }, + "id": 253, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": false + }, + "pluginVersion": "7.5.11", + "pointradius": 2, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(tidb_session_txn_state_seconds_bucket{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\", has_lock=\"true\"}[1m])) by (le, type))", + "legendFormat": "{{type}}-99", + "interval": "", + "exemplar": true, + "hide": false, + "refId": "A" + }, + { + "exemplar": true, + "expr": "histogram_quantile(0.90, sum(rate(tidb_session_txn_state_seconds_bucket{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\", has_lock=\"true\"}[1m])) by (le, type))", + "hide": false, + "interval": "", + "legendFormat": "{{type}}-90", + "refId": "B" + }, + { + "exemplar": true, + "expr": "histogram_quantile(0.80, sum(rate(tidb_session_txn_state_seconds_bucket{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\", has_lock=\"true\"}[1m])) by (le, type))", + "interval": "", + "legendFormat": "{{type}}-80", + "refId": "C" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Transaction with lock execution states duration", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:230", + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:231", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + }, + "bars": false, + "dashes": false, + "fillGradient": 0, + "hiddenSeries": false, + "percentage": false, + "points": false, + "stack": false, + "steppedLine": false, + "timeFrom": null, + "timeShift": null + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "datasource": "${DS_TEST-CLUSTER}", + "description": "How much time transactions spend on each state", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 60 + }, + "id": 255, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": false + }, + "pluginVersion": "7.5.11", + "pointradius": 2, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:1266", + "alias": "total", + "bars": false, + "lines": true + } + ], + "spaceLength": 10, + "stack": true, + "targets": [ + { + "expr": "sum(rate(tidb_session_txn_state_seconds_sum{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\"}[1m])) by (type)", + "legendFormat": "{{type}}", + "interval": "", + "exemplar": true, + "hide": false, + "refId": "A" + }, + { + "exemplar": true, + "expr": "sum(rate(tidb_session_transaction_duration_seconds_sum{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\"}[1m]))", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "total", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Transaction execution states duration", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:230", + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:231", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + }, + "dashes": false, + "fillGradient": 0, + "hiddenSeries": false, + "lines": false, + "percentage": false, + "points": false, + "steppedLine": false, + "timeFrom": null, + "timeShift": null + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "datasource": "${DS_TEST-CLUSTER}", + "description": "How many times transactions enter this state in the last minute", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 67 + }, + "id": 254, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": false + }, + "pluginVersion": "7.5.11", + "pointradius": 2, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "targets": [ + { + "expr": "sum(increase(tidb_session_txn_state_seconds_count{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\"}[1m])) by (type)", + "legendFormat": "{{type}}", + "interval": "", + "exemplar": true, + "hide": false, + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Transaction enter state", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:230", + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:231", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + }, + "dashes": false, + "fillGradient": 0, + "hiddenSeries": false, + "lines": false, + "percentage": false, + "points": false, + "steppedLine": false, + "timeFrom": null, + "timeShift": null + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "datasource": "${DS_TEST-CLUSTER}", + "description": "How many times transactions leave this state in the last minute", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 67 + }, + "id": 256, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": false + }, + "pluginVersion": "7.5.11", + "pointradius": 2, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "targets": [ + { + "expr": "sum(increase(tidb_session_txn_state_seconds_count{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\"}[1m])) by (type)", + "legendFormat": "{{type}}", + "interval": "", + "exemplar": true, + "hide": false, + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Transaction leave state", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:230", + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:231", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + }, + "dashes": false, + "fillGradient": 0, + "hiddenSeries": false, + "lines": false, + "percentage": false, + "points": false, + "steppedLine": false, + "timeFrom": null, + "timeShift": null + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "datasource": "${DS_TEST-CLUSTER}", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 67 + }, + "id": 261, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": false + }, + "pluginVersion": "7.5.11", + "pointradius": 2, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "targets": [ + { + "expr": "sum(increase(tidb_session_txn_state_seconds_count{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\"}[1m])) by (type) - on (type) increase(tidb_session_txn_state_entering_count{k8s_cluster=\"$k8s_cluster\",tidb_cluster=\"$tidb_cluster\"}[1m])", + "legendFormat": "{{type}}", + "interval": "", + "exemplar": true, + "hide": false, + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Transaction state count change", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:230", + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:231", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + }, + "description": "Transaction leave state minus Transaction enter state", + "dashes": false, + "fillGradient": 0, + "hiddenSeries": false, + "lines": false, + "percentage": false, + "points": false, + "steppedLine": false, + "timeFrom": null, + "timeShift": null } ], "repeat": null, @@ -5713,7 +6355,229 @@ "dashes": false, "datasource": "${DS_TEST-CLUSTER}", "decimals": null, - "description": "The time cost of parsing SQL to AST", + "description": "The time cost of parsing SQL to AST", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 5 + }, + "hiddenSeries": false, + "id": 156, + "interval": "", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": null, + "sortDesc": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.11", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(tidb_session_parse_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, sql_type))", + "format": "time_series", + "instant": false, + "intervalFactor": 2, + "legendFormat": "{{sql_type}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Parse Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": null, + "description": "The time cost of building the query plan", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 5 + }, + "hiddenSeries": false, + "id": 154, + "interval": "", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": null, + "sortDesc": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.11", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(tidb_session_compile_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, sql_type))", + "format": "time_series", + "instant": false, + "intervalFactor": 2, + "legendFormat": "{{sql_type}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Compile Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": null, + "description": "The time cost of executing the SQL which does not include the time to get the results of the query .", "editable": true, "error": false, "fieldConfig": { @@ -5727,10 +6591,10 @@ "h": 8, "w": 12, "x": 0, - "y": 5 + "y": 13 }, "hiddenSeries": false, - "id": 156, + "id": 169, "interval": "", "legend": { "alignAsTable": true, @@ -5765,7 +6629,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.95, sum(rate(tidb_session_parse_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, sql_type))", + "expr": "histogram_quantile(0.95, sum(rate(tidb_session_execute_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, sql_type))", "format": "time_series", "instant": false, "intervalFactor": 2, @@ -5778,7 +6642,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Parse Duration", + "title": "Execution Duration", "tooltip": { "msResolution": false, "shared": true, @@ -5824,7 +6688,7 @@ "dashes": false, "datasource": "${DS_TEST-CLUSTER}", "decimals": null, - "description": "The time cost of building the query plan", + "description": "TiDB executors using more cpu and memory resources", "editable": true, "error": false, "fieldConfig": { @@ -5838,11 +6702,10 @@ "h": 8, "w": 12, "x": 12, - "y": 5 + "y": 13 }, "hiddenSeries": false, - "id": 154, - "interval": "", + "id": 76, "legend": { "alignAsTable": true, "avg": true, @@ -5876,11 +6739,10 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.95, sum(rate(tidb_session_compile_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, sql_type))", + "expr": "sum(rate(tidb_executor_expensive_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", "format": "time_series", - "instant": false, "intervalFactor": 2, - "legendFormat": "{{sql_type}}", + "legendFormat": "{{type}}", "refId": "A", "step": 30 } @@ -5889,7 +6751,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Compile Duration", + "title": "Expensive Executors OPS", "tooltip": { "msResolution": false, "shared": true, @@ -5906,10 +6768,9 @@ }, "yaxes": [ { - "decimals": null, - "format": "s", + "format": "short", "label": null, - "logBase": 1, + "logBase": 10, "max": null, "min": "0", "show": true @@ -5920,7 +6781,7 @@ "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -5935,7 +6796,7 @@ "dashes": false, "datasource": "${DS_TEST-CLUSTER}", "decimals": null, - "description": "The time cost of executing the SQL which does not include the time to get the results of the query .", + "description": "TiDB plan cache hit total", "editable": true, "error": false, "fieldConfig": { @@ -5949,11 +6810,10 @@ "h": 8, "w": 12, "x": 0, - "y": 13 + "y": 21 }, "hiddenSeries": false, - "id": 169, - "interval": "", + "id": 91, "legend": { "alignAsTable": true, "avg": true, @@ -5987,11 +6847,10 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.95, sum(rate(tidb_session_execute_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, sql_type))", + "expr": "sum(rate(tidb_server_plan_cache_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", "format": "time_series", - "instant": false, "intervalFactor": 2, - "legendFormat": "{{sql_type}}", + "legendFormat": "{{type}}", "refId": "A", "step": 30 } @@ -6000,7 +6859,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Execution Duration", + "title": "Queries Using Plan Cache OPS", "tooltip": { "msResolution": false, "shared": true, @@ -6017,10 +6876,9 @@ }, "yaxes": [ { - "decimals": null, - "format": "s", + "format": "short", "label": null, - "logBase": 1, + "logBase": 2, "max": null, "min": "0", "show": true @@ -6031,7 +6889,7 @@ "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -6046,7 +6904,7 @@ "dashes": false, "datasource": "${DS_TEST-CLUSTER}", "decimals": null, - "description": "TiDB executors using more cpu and memory resources", + "description": "TiDB plan cache hit total", "editable": true, "error": false, "fieldConfig": { @@ -6060,10 +6918,10 @@ "h": 8, "w": 12, "x": 12, - "y": 13 + "y": 21 }, "hiddenSeries": false, - "id": 76, + "id": 258, "legend": { "alignAsTable": true, "avg": true, @@ -6097,7 +6955,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_executor_expensive_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", + "expr": "sum(rate(tidb_server_plan_cache_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{type}}", @@ -6109,7 +6967,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Expensive Executors OPS", + "title": "Queries Using Plan Cache OPS", "tooltip": { "msResolution": false, "shared": true, @@ -6128,7 +6986,7 @@ { "format": "short", "label": null, - "logBase": 10, + "logBase": 2, "max": null, "min": "0", "show": true @@ -6154,7 +7012,7 @@ "dashes": false, "datasource": "${DS_TEST-CLUSTER}", "decimals": null, - "description": "TiDB plan cache hit total", + "description": "TiDB read table cache hit total", "editable": true, "error": false, "fieldConfig": { @@ -6168,10 +7026,10 @@ "h": 8, "w": 12, "x": 0, - "y": 21 + "y": 29 }, "hiddenSeries": false, - "id": 91, + "id": 249, "legend": { "alignAsTable": true, "avg": true, @@ -6205,19 +7063,160 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_server_plan_cache_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", + "exemplar": true, + "expr": "sum(rate(tidb_server_read_from_tablecache_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m]))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "qps", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Read From Table Cache OPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:469", + "format": "short", + "label": null, + "logBase": 2, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:470", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "repeat": null, + "title": "Executor", + "type": "row" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 5 + }, + "id": 143, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "description": "durations of distsql execution by type", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 122 + }, + "id": 12, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + { + "url": "/" + } + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.999, sum(rate(tidb_distsql_handle_query_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "999-{{type}}", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(tidb_distsql_handle_query_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "99-{{type}}", + "metric": "tidb_distsql_handle_query_duration_seconds_bucket{}", + "refId": "A", + "step": 4 + }, + { + "expr": "histogram_quantile(0.90, sum(rate(tidb_distsql_handle_query_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, type))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "A", - "step": 30 + "legendFormat": "90-{{type}}", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(tidb_distsql_handle_query_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "50-{{type}}", + "refId": "C" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Queries Using Plan Cache OPS", + "title": "Distsql Duration", "tooltip": { "msResolution": false, "shared": true, @@ -6234,11 +7233,11 @@ }, "yaxes": [ { - "format": "short", + "format": "s", "label": null, "logBase": 2, "max": null, - "min": "0", + "min": "0.0005", "show": true }, { @@ -6261,49 +7260,33 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "decimals": null, - "description": "TiDB plan cache hit total", + "description": "distsql query handling durations per second", "editable": true, "error": false, - "fieldConfig": { - "defaults": {}, - "overrides": [] - }, "fill": 1, - "fillGradient": 0, "grid": {}, "gridPos": { - "h": 8, + "h": 7, "w": 12, "x": 12, - "y": 21 + "y": 122 }, - "hiddenSeries": false, - "id": 250, + "id": 14, "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "hideEmpty": true, - "hideZero": true, - "max": true, + "avg": false, + "current": false, + "max": false, "min": false, - "rightSide": true, - "show": true, - "sort": null, - "sortDesc": null, + "rightSide": false, + "show": false, "total": false, - "values": true + "values": false }, "lines": true, - "linewidth": 1, + "linewidth": 2, "links": [], "nullPointMode": "null as zero", - "options": { - "alertThreshold": true - }, "percentage": false, - "pluginVersion": "7.5.11", "pointradius": 5, "points": false, "renderer": "flot", @@ -6313,19 +7296,20 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_server_plan_cache_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", + "expr": "sum(rate(tidb_distsql_handle_query_duration_seconds_count{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (copr_type)", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{type}}", + "legendFormat": "{{copr_type}}", + "metric": "tidb_distsql_query_total", "refId": "A", - "step": 30 + "step": 4 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Queries Using Plan Cache OPS", + "title": "Distsql QPS", "tooltip": { "msResolution": false, "shared": true, @@ -6344,9 +7328,9 @@ { "format": "short", "label": null, - "logBase": 2, + "logBase": 1, "max": null, - "min": "0", + "min": 0, "show": true }, { @@ -6369,49 +7353,33 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "decimals": null, - "description": "TiDB read table cache hit total", + "description": "the numebr of distsql partial scan numbers", "editable": true, "error": false, - "fieldConfig": { - "defaults": {}, - "overrides": [] - }, "fill": 1, - "fillGradient": 0, "grid": {}, "gridPos": { - "h": 8, - "w": 12, + "h": 7, + "w": 8, "x": 0, - "y": 29 + "y": 129 }, - "hiddenSeries": false, - "id": 249, + "id": 60, "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "hideEmpty": true, - "hideZero": true, - "max": true, + "avg": false, + "current": false, + "max": false, "min": false, - "rightSide": true, - "show": true, - "sort": null, - "sortDesc": null, + "rightSide": false, + "show": false, "total": false, - "values": true + "values": false }, "lines": true, - "linewidth": 1, + "linewidth": 2, "links": [], "nullPointMode": "null as zero", - "options": { - "alertThreshold": true - }, "percentage": false, - "pluginVersion": "7.5.11", "pointradius": 5, "points": false, "renderer": "flot", @@ -6421,21 +7389,20 @@ "steppedLine": false, "targets": [ { - "exemplar": true, - "expr": "sum(rate(tidb_server_read_from_tablecache_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m]))", + "expr": "sum(rate(tidb_distsql_scan_keys_partial_num_count{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m]))", "format": "time_series", - "interval": "", "intervalFactor": 2, - "legendFormat": "qps", + "legendFormat": "", + "metric": "tidb_distsql_query_total", "refId": "A", - "step": 30 + "step": 4 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Read From Table Cache OPS", + "title": "Distsql Partial QPS", "tooltip": { "msResolution": false, "shared": true, @@ -6452,16 +7419,14 @@ }, "yaxes": [ { - "$$hashKey": "object:469", "format": "short", "label": null, - "logBase": 2, + "logBase": 1, "max": null, - "min": "0", + "min": 0, "show": true }, { - "$$hashKey": "object:470", "format": "short", "label": null, "logBase": 1, @@ -6474,60 +7439,35 @@ "align": false, "alignLevel": null } - } - ], - "repeat": null, - "title": "Executor", - "type": "row" - }, - { - "collapsed": true, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 5 - }, - "id": 143, - "panels": [ + }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "durations of distsql execution by type", - "editable": true, - "error": false, + "description": "the numebr of distsql scan numbers", "fill": 1, - "grid": {}, "gridPos": { "h": 7, - "w": 12, - "x": 0, - "y": 122 + "w": 8, + "x": 8, + "y": 129 }, - "id": 12, + "id": 57, "legend": { - "alignAsTable": true, "avg": false, "current": false, "max": false, "min": false, - "rightSide": true, "show": true, "total": false, "values": false }, "lines": true, - "linewidth": 2, - "links": [ - { - "url": "/" - } - ], - "nullPointMode": "null as zero", + "linewidth": 1, + "links": [], + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -6538,35 +7478,24 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.999, sum(rate(tidb_distsql_handle_query_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, type))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "999-{{type}}", - "refId": "D" - }, - { - "expr": "histogram_quantile(0.99, sum(rate(tidb_distsql_handle_query_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, type))", + "expr": "histogram_quantile(1, sum(rate(tidb_distsql_scan_keys_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", "format": "time_series", - "hide": false, "intervalFactor": 2, - "legendFormat": "99-{{type}}", - "metric": "tidb_distsql_handle_query_duration_seconds_bucket{}", - "refId": "A", - "step": 4 + "legendFormat": "100", + "refId": "A" }, { - "expr": "histogram_quantile(0.90, sum(rate(tidb_distsql_handle_query_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, type))", + "expr": "histogram_quantile(0.90, sum(rate(tidb_distsql_scan_keys_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "90-{{type}}", + "legendFormat": "90", "refId": "B" }, { - "expr": "histogram_quantile(0.50, sum(rate(tidb_distsql_handle_query_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, type))", + "expr": "histogram_quantile(0.50, sum(rate(tidb_distsql_scan_keys_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", "format": "time_series", - "interval": "", "intervalFactor": 2, - "legendFormat": "50-{{type}}", + "legendFormat": "50", "refId": "C" } ], @@ -6574,12 +7503,11 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Distsql Duration", + "title": "Scan Keys Num", "tooltip": { - "msResolution": false, "shared": true, "sort": 0, - "value_type": "cumulative" + "value_type": "individual" }, "type": "graph", "xaxis": { @@ -6591,11 +7519,11 @@ }, "yaxes": [ { - "format": "s", + "format": "short", "label": null, "logBase": 2, "max": null, - "min": "0.0005", + "min": null, "show": true }, { @@ -6618,32 +7546,28 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "distsql query handling durations per second", - "editable": true, - "error": false, + "description": "the numebr of distsql partial scan key numbers", "fill": 1, - "grid": {}, "gridPos": { "h": 7, - "w": 12, - "x": 12, - "y": 122 + "w": 8, + "x": 16, + "y": 129 }, - "id": 14, + "id": 58, "legend": { "avg": false, "current": false, "max": false, "min": false, - "rightSide": false, - "show": false, + "show": true, "total": false, "values": false }, "lines": true, - "linewidth": 2, + "linewidth": 1, "links": [], - "nullPointMode": "null as zero", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -6654,25 +7578,36 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_distsql_handle_query_duration_seconds_count{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (copr_type)", + "expr": "histogram_quantile(1, sum(rate(tidb_distsql_scan_keys_partial_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{copr_type}}", - "metric": "tidb_distsql_query_total", - "refId": "A", - "step": 4 + "legendFormat": "100", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.90, sum(rate(tidb_distsql_scan_keys_partial_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "90", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.80, sum(rate(tidb_distsql_scan_keys_partial_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "50", + "refId": "C" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Distsql QPS", + "title": "Scan Keys Partial Num", "tooltip": { - "msResolution": false, "shared": true, "sort": 0, - "value_type": "cumulative" + "value_type": "individual" }, "type": "graph", "xaxis": { @@ -6686,9 +7621,9 @@ { "format": "short", "label": null, - "logBase": 1, + "logBase": 2, "max": null, - "min": 0, + "min": null, "show": true }, { @@ -6711,32 +7646,28 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "the numebr of distsql partial scan numbers", - "editable": true, - "error": false, + "description": "distsql partial numbers per query", "fill": 1, - "grid": {}, "gridPos": { "h": 7, "w": 8, "x": 0, - "y": 129 + "y": 136 }, - "id": 60, + "id": 59, "legend": { "avg": false, "current": false, "max": false, "min": false, - "rightSide": false, - "show": false, + "show": true, "total": false, "values": false }, "lines": true, - "linewidth": 2, + "linewidth": 1, "links": [], - "nullPointMode": "null as zero", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -6747,25 +7678,36 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_distsql_scan_keys_partial_num_count{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m]))", + "expr": "histogram_quantile(1, sum(rate(tidb_distsql_partial_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "", - "metric": "tidb_distsql_query_total", - "refId": "A", - "step": 4 + "legendFormat": "100", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.90, sum(rate(tidb_distsql_partial_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "90", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(tidb_distsql_partial_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "50", + "refId": "C" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Distsql Partial QPS", + "title": "Partial Num", "tooltip": { - "msResolution": false, "shared": true, "sort": 0, - "value_type": "cumulative" + "value_type": "individual" }, "type": "graph", "xaxis": { @@ -6779,9 +7721,9 @@ { "format": "short", "label": null, - "logBase": 1, + "logBase": 2, "max": null, - "min": 0, + "min": null, "show": true }, { @@ -6804,28 +7746,35 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "the numebr of distsql scan numbers", + "description": "TiDB coprocessor cache hit, evict and miss number", + "editable": true, + "error": false, "fill": 1, + "grid": {}, "gridPos": { "h": 7, "w": 8, - "x": 8, - "y": 129 + "x": 16, + "y": 163 }, - "id": 57, + "id": 175, "legend": { + "alignAsTable": true, "avg": false, "current": false, - "max": false, + "max": true, "min": false, + "rightSide": true, "show": true, + "sort": "avg", + "sortDesc": true, "total": false, - "values": false + "values": true }, "lines": true, - "linewidth": 1, + "linewidth": 2, "links": [], - "nullPointMode": "null", + "nullPointMode": "null as zero", "percentage": false, "pointradius": 5, "points": false, @@ -6836,36 +7785,24 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(1, sum(rate(tidb_distsql_scan_keys_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "100", - "refId": "A" - }, - { - "expr": "histogram_quantile(0.90, sum(rate(tidb_distsql_scan_keys_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "90", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.50, sum(rate(tidb_distsql_scan_keys_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", + "expr": "sum(rate(tidb_distsql_copr_cache{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", "format": "time_series", "intervalFactor": 2, - "legendFormat": "50", - "refId": "C" + "legendFormat": "{{type}}", + "refId": "A", + "step": 40 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Scan Keys Num", + "title": "Coprocessor Cache", "tooltip": { + "msResolution": false, "shared": true, - "sort": 0, - "value_type": "individual" + "sort": 2, + "value_type": "cumulative" }, "type": "graph", "xaxis": { @@ -6877,9 +7814,9 @@ }, "yaxes": [ { - "format": "short", + "format": "none", "label": null, - "logBase": 2, + "logBase": 1, "max": null, "min": null, "show": true @@ -6890,7 +7827,7 @@ "logBase": 1, "max": null, "min": null, - "show": true + "show": false } ], "yaxis": { @@ -6904,28 +7841,35 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "the numebr of distsql partial scan key numbers", + "description": "kv storage coprocessor processing durations", + "editable": true, + "error": false, "fill": 1, + "grid": {}, "gridPos": { "h": 7, "w": 8, - "x": 16, - "y": 129 + "x": 8, + "y": 136 }, - "id": 58, + "id": 41, "legend": { + "alignAsTable": true, "avg": false, "current": false, - "max": false, + "max": true, "min": false, + "rightSide": true, "show": true, + "sort": "max", + "sortDesc": true, "total": false, - "values": false + "values": true }, "lines": true, - "linewidth": 1, + "linewidth": 2, "links": [], - "nullPointMode": "null", + "nullPointMode": "null as zero", "percentage": false, "pointradius": 5, "points": false, @@ -6936,36 +7880,24 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(1, sum(rate(tidb_distsql_scan_keys_partial_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "100", - "refId": "A" - }, - { - "expr": "histogram_quantile(0.90, sum(rate(tidb_distsql_scan_keys_partial_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "90", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.80, sum(rate(tidb_distsql_scan_keys_partial_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", + "expr": "histogram_quantile(0.999, sum(rate(tidb_tikvclient_cop_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, instance))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "50", - "refId": "C" + "legendFormat": "{{instance}}", + "refId": "A", + "step": 4 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Scan Keys Partial Num", + "title": "Coprocessor Seconds 999", "tooltip": { + "msResolution": false, "shared": true, "sort": 0, - "value_type": "individual" + "value_type": "cumulative" }, "type": "graph", "xaxis": { @@ -6977,11 +7909,11 @@ }, "yaxes": [ { - "format": "short", + "format": "s", "label": null, - "logBase": 2, + "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true }, { @@ -6997,35 +7929,58 @@ "align": false, "alignLevel": null } - }, + } + ], + "repeat": null, + "title": "Distsql", + "type": "row" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 6 + }, + "id": 144, + "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "distsql partial numbers per query", + "description": "kv backoff time durations by type", + "editable": true, + "error": false, "fill": 1, + "grid": {}, "gridPos": { "h": 7, - "w": 8, + "w": 12, "x": 0, - "y": 136 + "y": 7 }, - "id": 59, + "id": 6, "legend": { + "alignAsTable": false, "avg": false, "current": false, - "max": false, + "max": true, "min": false, + "rightSide": false, "show": true, + "sort": null, + "sortDesc": null, "total": false, - "values": false + "values": true }, "lines": true, - "linewidth": 1, + "linewidth": 2, "links": [], - "nullPointMode": "null", + "nullPointMode": "null as zero", "percentage": false, "pointradius": 5, "points": false, @@ -7036,24 +7991,25 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(1, sum(rate(tidb_distsql_partial_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", + "expr": "histogram_quantile(0.999, sum(rate(tidb_tikvclient_backoff_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "100", - "refId": "A" + "legendFormat": "999", + "refId": "A", + "step": 40 }, { - "expr": "histogram_quantile(0.90, sum(rate(tidb_distsql_partial_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(tidb_tikvclient_backoff_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "90", + "legendFormat": "99", "refId": "B" }, { - "expr": "histogram_quantile(0.50, sum(rate(tidb_distsql_partial_num_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", + "expr": "histogram_quantile(0.80, sum(rate(tidb_tikvclient_backoff_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "50", + "legendFormat": "80", "refId": "C" } ], @@ -7061,11 +8017,12 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Partial Num", + "title": "KV Backoff Duration", "tooltip": { + "msResolution": false, "shared": true, "sort": 0, - "value_type": "individual" + "value_type": "cumulative" }, "type": "graph", "xaxis": { @@ -7077,9 +8034,9 @@ }, "yaxes": [ { - "format": "short", + "format": "s", "label": null, - "logBase": 2, + "logBase": 1, "max": null, "min": null, "show": true @@ -7104,33 +8061,34 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "TiDB coprocessor cache hit, evict and miss number", + "decimals": 2, + "description": "kv region error times", "editable": true, "error": false, - "fill": 1, + "fill": 0, "grid": {}, "gridPos": { "h": 7, - "w": 8, - "x": 16, - "y": 163 + "w": 12, + "x": 12, + "y": 7 }, - "id": 175, + "id": 11, "legend": { "alignAsTable": true, "avg": false, - "current": false, + "current": true, + "hideEmpty": true, + "hideZero": true, "max": true, "min": false, "rightSide": true, "show": true, - "sort": "avg", - "sortDesc": true, "total": false, "values": true }, "lines": true, - "linewidth": 2, + "linewidth": 1, "links": [], "nullPointMode": "null as zero", "percentage": false, @@ -7143,23 +8101,32 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_distsql_copr_cache{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", + "expr": "sum(rate(tidb_tikvclient_region_err_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{type}}", + "metric": "tidb_server_session_execute_parse_duration_count", "refId": "A", "step": 40 + }, + { + "expr": "sum(rate(tidb_tikvclient_region_err_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}{EXTERNAL_LABELtype=\"server_is_busy\"}[1m]))", + "format": "time_series", + "hide": true, + "intervalFactor": 2, + "legendFormat": "sum", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Coprocessor Cache", + "title": "TiClient Region Error OPS", "tooltip": { "msResolution": false, "shared": true, - "sort": 2, + "sort": 0, "value_type": "cumulative" }, "type": "graph", @@ -7172,11 +8139,11 @@ }, "yaxes": [ { - "format": "none", + "format": "short", "label": null, "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true }, { @@ -7185,7 +8152,7 @@ "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -7199,29 +8166,26 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "kv storage coprocessor processing durations", - "editable": true, - "error": false, + "description": "kv storage backoff times", "fill": 1, - "grid": {}, "gridPos": { "h": 7, - "w": 8, - "x": 8, - "y": 136 + "w": 12, + "x": 0, + "y": 14 }, - "id": 41, + "id": 53, "legend": { "alignAsTable": true, "avg": false, "current": false, + "hideEmpty": true, + "hideZero": true, "max": true, "min": false, "rightSide": true, "show": true, - "sort": "max", - "sortDesc": true, - "total": false, + "total": true, "values": true }, "lines": true, @@ -7238,24 +8202,22 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.999, sum(rate(tidb_tikvclient_cop_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, instance))", + "expr": "sum(rate(tidb_tikvclient_backoff_seconds_count{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{instance}}", - "refId": "A", - "step": 4 + "legendFormat": "{{type}}", + "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Coprocessor Seconds 999", + "title": "KV Backoff OPS", "tooltip": { - "msResolution": false, "shared": true, "sort": 0, - "value_type": "cumulative" + "value_type": "individual" }, "type": "graph", "xaxis": { @@ -7267,11 +8229,11 @@ }, "yaxes": [ { - "format": "s", + "format": "short", "label": null, "logBase": 1, "max": null, - "min": "0", + "min": null, "show": true }, { @@ -7287,30 +8249,14 @@ "align": false, "alignLevel": null } - } - ], - "repeat": null, - "title": "Distsql", - "type": "row" - }, - { - "collapsed": true, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 6 - }, - "id": 144, - "panels": [ + }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "kv backoff time durations by type", + "description": "lock resolve times", "editable": true, "error": false, "fill": 1, @@ -7318,20 +8264,20 @@ "gridPos": { "h": 7, "w": 12, - "x": 0, - "y": 7 + "x": 12, + "y": 14 }, - "id": 6, + "id": 32, "legend": { - "alignAsTable": false, + "alignAsTable": true, "avg": false, "current": false, + "hideEmpty": true, + "hideZero": true, "max": true, "min": false, - "rightSide": false, + "rightSide": true, "show": true, - "sort": null, - "sortDesc": null, "total": false, "values": true }, @@ -7349,33 +8295,20 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.999, sum(rate(tidb_tikvclient_backoff_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", + "expr": "sum(rate(tidb_tikvclient_lock_resolver_actions_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", "format": "time_series", "intervalFactor": 2, - "legendFormat": "999", + "legendFormat": "{{type}}", + "metric": "tidb_tikvclient_lock_resolver_actions_total{}", "refId": "A", "step": 40 - }, - { - "expr": "histogram_quantile(0.99, sum(rate(tidb_tikvclient_backoff_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "99", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.80, sum(rate(tidb_tikvclient_backoff_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "80", - "refId": "C" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "KV Backoff Duration", + "title": "Lock Resolve OPS", "tooltip": { "msResolution": false, "shared": true, @@ -7392,11 +8325,11 @@ }, "yaxes": [ { - "format": "s", + "format": "short", "label": null, "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true }, { @@ -7419,23 +8352,22 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "decimals": 2, - "description": "kv region error times", + "description": "lock cleanup failed times and safe point update times", "editable": true, "error": false, - "fill": 0, + "fill": 1, "grid": {}, "gridPos": { "h": 7, "w": 12, - "x": 12, - "y": 7 + "x": 0, + "y": 21 }, - "id": 11, + "id": 84, "legend": { "alignAsTable": true, "avg": false, - "current": true, + "current": false, "hideEmpty": true, "hideZero": true, "max": true, @@ -7446,7 +8378,7 @@ "values": true }, "lines": true, - "linewidth": 1, + "linewidth": 2, "links": [], "nullPointMode": "null as zero", "percentage": false, @@ -7459,20 +8391,19 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_tikvclient_region_err_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", + "expr": "sum(rate(tidb_tikvclient_lock_cleanup_task_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{type}}", - "metric": "tidb_server_session_execute_parse_duration_count", + "legendFormat": "cleanup_secondary_failure_{{type}}", + "metric": "tidb_tikvclient_lock_resolver_actions_total{}", "refId": "A", "step": 40 }, { - "expr": "sum(rate(tidb_tikvclient_region_err_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}{EXTERNAL_LABELtype=\"server_is_busy\"}[1m]))", + "expr": "sum(rate(tidb_tikvclient_load_safepoint_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\", type=\"fail\"}[1m]))", "format": "time_series", - "hide": true, "intervalFactor": 2, - "legendFormat": "sum", + "legendFormat": "load_safepoint_failure", "refId": "B" } ], @@ -7480,7 +8411,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "TiClient Region Error OPS", + "title": "Other Errors OPS", "tooltip": { "msResolution": false, "shared": true, @@ -7524,32 +8455,37 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "kv storage backoff times", + "description": "This metric shows the reasons of replica selector failure (which needs a backoff).", + "editable": true, + "error": false, "fill": 1, + "fillGradient": 0, + "grid": {}, "gridPos": { "h": 7, "w": 12, - "x": 0, - "y": 14 + "x": 12, + "y": 21 }, - "id": 53, + "id": 223, "legend": { "alignAsTable": true, "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": true, + "current": true, "max": true, "min": false, "rightSide": true, "show": true, - "total": true, + "total": false, "values": true }, "lines": true, - "linewidth": 2, + "linewidth": 1, "links": [], "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, "percentage": false, "pointradius": 5, "points": false, @@ -7560,9 +8496,9 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_tikvclient_backoff_seconds_count{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", + "expr": "sum(rate(tidb_tikvclient_replica_selector_failure_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", "format": "time_series", - "intervalFactor": 2, + "intervalFactor": 1, "legendFormat": "{{type}}", "refId": "A" } @@ -7571,8 +8507,9 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "KV Backoff OPS", + "title": "Replica Selector Failure Per Second", "tooltip": { + "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" @@ -7591,7 +8528,7 @@ "label": null, "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true }, { @@ -7607,31 +8544,45 @@ "align": false, "alignLevel": null } - }, + } + ], + "repeat": null, + "title": "KV Errors", + "type": "row" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 145, + "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "lock resolve times", + "description": "kv request total by instance and command type", "editable": true, "error": false, "fill": 1, "grid": {}, "gridPos": { "h": 7, - "w": 12, - "x": 12, - "y": 14 + "w": 8, + "x": 0, + "y": 9 }, - "id": 32, + "id": 172, "legend": { "alignAsTable": true, "avg": false, "current": false, - "hideEmpty": true, - "hideZero": true, "max": true, "min": false, "rightSide": true, @@ -7653,11 +8604,10 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_tikvclient_lock_resolver_actions_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", + "expr": "sum(rate(tidb_tikvclient_request_seconds_count{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (instance, type)", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{type}}", - "metric": "tidb_tikvclient_lock_resolver_actions_total{}", + "legendFormat": "{{instance}}-{{type}}", "refId": "A", "step": 40 } @@ -7666,9 +8616,9 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Lock Resolve OPS", + "title": "KV Request OPS", "tooltip": { - "msResolution": false, + "msResolution": true, "shared": true, "sort": 0, "value_type": "cumulative" @@ -7710,33 +8660,33 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "lock cleanup failed times and safe point update times", + "description": "kv requests durations by store", "editable": true, "error": false, "fill": 1, "grid": {}, "gridPos": { "h": 7, - "w": 12, - "x": 0, - "y": 21 + "w": 8, + "x": 8, + "y": 9 }, - "id": 84, + "id": 48, "legend": { "alignAsTable": true, "avg": false, "current": false, - "hideEmpty": true, - "hideZero": true, "max": true, "min": false, "rightSide": true, "show": true, + "sort": "max", + "sortDesc": true, "total": false, "values": true }, "lines": true, - "linewidth": 2, + "linewidth": 1, "links": [], "nullPointMode": "null as zero", "percentage": false, @@ -7749,32 +8699,24 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_tikvclient_lock_cleanup_task_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", + "expr": "histogram_quantile(0.99, sum(rate(tidb_tikvclient_request_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\", store!=\"0\"}[1m])) by (le, store))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "cleanup_secondary_failure_{{type}}", - "metric": "tidb_tikvclient_lock_resolver_actions_total{}", + "legendFormat": "store-{{store}}", "refId": "A", "step": 40 - }, - { - "expr": "sum(rate(tidb_tikvclient_load_safepoint_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\", type=\"fail\"}[1m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "load_safepoint_failure", - "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Other Errors OPS", + "title": "KV Request Duration 99 by store", "tooltip": { "msResolution": false, "shared": true, "sort": 0, - "value_type": "cumulative" + "value_type": "individual" }, "type": "graph", "xaxis": { @@ -7786,11 +8728,11 @@ }, "yaxes": [ { - "format": "short", + "format": "s", "label": null, "logBase": 1, "max": null, - "min": "0", + "min": null, "show": true }, { @@ -7813,27 +8755,28 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "This metric shows the reasons of replica selector failure (which needs a backoff).", + "description": "kv request durations by request type", "editable": true, "error": false, "fill": 1, - "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, - "w": 12, - "x": 12, - "y": 21 + "w": 8, + "x": 16, + "y": 9 }, - "id": 223, + "id": 30, "legend": { "alignAsTable": true, "avg": false, - "current": true, + "current": false, "max": true, "min": false, "rightSide": true, "show": true, + "sort": "max", + "sortDesc": true, "total": false, "values": true }, @@ -7841,9 +8784,6 @@ "linewidth": 1, "links": [], "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, "percentage": false, "pointradius": 5, "points": false, @@ -7854,18 +8794,19 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_tikvclient_replica_selector_failure_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type)", + "expr": "histogram_quantile(0.99, sum(rate(tidb_tikvclient_request_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\", store!=\"0\"}[1m])) by (le,type))", "format": "time_series", - "intervalFactor": 1, + "intervalFactor": 2, "legendFormat": "{{type}}", - "refId": "A" + "refId": "A", + "step": 40 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Replica Selector Failure Per Second", + "title": "KV Request Duration 99 by type", "tooltip": { "msResolution": false, "shared": true, @@ -7882,11 +8823,11 @@ }, "yaxes": [ { - "format": "short", + "format": "s", "label": null, "logBase": 1, "max": null, - "min": "0", + "min": null, "show": true }, { @@ -7902,30 +8843,14 @@ "align": false, "alignLevel": null } - } - ], - "repeat": null, - "title": "KV Errors", - "type": "row" - }, - { - "collapsed": true, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 7 - }, - "id": 145, - "panels": [ + }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "kv request total by instance and command type", + "description": "kv requests that's forwarded by different stores", "editable": true, "error": false, "fill": 1, @@ -7934,9 +8859,9 @@ "h": 7, "w": 8, "x": 0, - "y": 9 + "y": 16 }, - "id": 172, + "id": 219, "legend": { "alignAsTable": true, "avg": false, @@ -7962,10 +8887,10 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_tikvclient_request_seconds_count{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (instance, type)", + "expr": "sum(rate(tidb_tikvclient_forward_request_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (from_store, to_store, result)", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{instance}}-{{type}}", + "legendFormat": "{{from_store}}-to-{{to_store}}-{{result}}", "refId": "A", "step": 40 } @@ -7974,7 +8899,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "KV Request OPS", + "title": "KV Request Forwarding OPS", "tooltip": { "msResolution": true, "shared": true, @@ -8018,7 +8943,7 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "kv requests durations by store", + "description": "kv requests that's forwarded by different stores, grouped by request type", "editable": true, "error": false, "fill": 1, @@ -8027,9 +8952,9 @@ "h": 7, "w": 8, "x": 8, - "y": 9 + "y": 16 }, - "id": 48, + "id": 220, "legend": { "alignAsTable": true, "avg": false, @@ -8038,13 +8963,11 @@ "min": false, "rightSide": true, "show": true, - "sort": "max", - "sortDesc": true, "total": false, "values": true }, "lines": true, - "linewidth": 1, + "linewidth": 2, "links": [], "nullPointMode": "null as zero", "percentage": false, @@ -8057,10 +8980,10 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(tidb_tikvclient_request_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\", store!=\"0\"}[1m])) by (le, store))", + "expr": "sum(rate(tidb_tikvclient_forward_request_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type, result)", "format": "time_series", "intervalFactor": 2, - "legendFormat": "store-{{store}}", + "legendFormat": "{{type}}-{{result}}", "refId": "A", "step": 40 } @@ -8069,12 +8992,12 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "KV Request Duration 99 by store", + "title": "KV Request Forwarding OPS by Type", "tooltip": { - "msResolution": false, + "msResolution": true, "shared": true, "sort": 0, - "value_type": "individual" + "value_type": "cumulative" }, "type": "graph", "xaxis": { @@ -8086,11 +9009,11 @@ }, "yaxes": [ { - "format": "s", + "format": "short", "label": null, "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true }, { @@ -8113,35 +9036,28 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "kv request durations by request type", - "editable": true, - "error": false, + "description": "TiDB successful region cache operations count", "fill": 1, - "grid": {}, "gridPos": { "h": 7, "w": 8, "x": 16, - "y": 9 + "y": 16 }, - "id": 30, + "id": 164, "legend": { - "alignAsTable": true, "avg": false, "current": false, - "max": true, + "max": false, "min": false, - "rightSide": true, "show": true, - "sort": "max", - "sortDesc": true, "total": false, - "values": true + "values": false }, "lines": true, "linewidth": 1, "links": [], - "nullPointMode": "null as zero", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -8152,21 +9068,20 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(tidb_tikvclient_request_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\", store!=\"0\"}[1m])) by (le,type))", + "expr": "sum(rate(tidb_tikvclient_region_cache_operations_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\", result=\"ok\"}[1m])) by (type)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{type}}", "refId": "A", - "step": 40 + "step": 30 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "KV Request Duration 99 by type", + "title": "Region Cache OK OPS", "tooltip": { - "msResolution": false, "shared": true, "sort": 0, "value_type": "individual" @@ -8181,7 +9096,7 @@ }, "yaxes": [ { - "format": "s", + "format": "short", "label": null, "logBase": 1, "max": null, @@ -8208,33 +9123,28 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "kv requests that's forwarded by different stores", - "editable": true, - "error": false, + "description": "TiDB error region cache operations count", "fill": 1, - "grid": {}, "gridPos": { "h": 7, "w": 8, "x": 0, - "y": 15 + "y": 23 }, - "id": 219, + "id": 250, "legend": { - "alignAsTable": true, "avg": false, "current": false, - "max": true, + "max": false, "min": false, - "rightSide": true, "show": true, "total": false, - "values": true + "values": false }, "lines": true, - "linewidth": 2, + "linewidth": 1, "links": [], - "nullPointMode": "null as zero", + "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, @@ -8245,24 +9155,23 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_tikvclient_forward_request_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (from_store, to_store, result)", + "expr": "sum(rate(tidb_tikvclient_region_cache_operations_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\", result=\"err\"}[1m])) by (type)", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{from_store}}-to-{{to_store}}-{{result}}", + "legendFormat": "{{type}}-err", "refId": "A", - "step": 40 + "step": 30 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "KV Request Forwarding OPS", + "title": "Region Cache Error OPS", "tooltip": { - "msResolution": true, "shared": true, "sort": 0, - "value_type": "cumulative" + "value_type": "individual" }, "type": "graph", "xaxis": { @@ -8278,7 +9187,7 @@ "label": null, "logBase": 1, "max": null, - "min": "0", + "min": null, "show": true }, { @@ -8301,33 +9210,39 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "kv requests that's forwarded by different stores, grouped by request type", - "editable": true, - "error": false, + "description": "TiDB loading region cache durations", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "fill": 1, - "grid": {}, + "fillGradient": 0, "gridPos": { "h": 7, "w": 8, "x": 8, - "y": 15 + "y": 23 }, - "id": 220, + "hiddenSeries": false, + "id": 262, "legend": { "alignAsTable": true, "avg": false, "current": false, - "max": true, + "max": false, "min": false, "rightSide": true, "show": true, "total": false, - "values": true + "values": false }, "lines": true, - "linewidth": 2, + "linewidth": 1, "links": [], - "nullPointMode": "null as zero", + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, "pointradius": 5, "points": false, @@ -8338,24 +9253,33 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_tikvclient_forward_request_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (type, result)", + "exemplar": true, + "expr": "histogram_quantile(0.99, sum(rate(tidb_tikvclient_load_region_cache_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, type))", "format": "time_series", + "interval": "", "intervalFactor": 2, - "legendFormat": "{{type}}-{{result}}", - "refId": "A", - "step": 40 + "legendFormat": "99-{{type}}", + "refId": "A" + }, + { + "exemplar": true, + "expr": "sum(rate(tidb_tikvclient_load_region_cache_seconds_sum{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, type) / sum(rate(tidb_tikvclient_load_region_cache_seconds_count{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, type)", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "avg-{{type}}", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "KV Request Forwarding OPS by Type", + "title": "Load Region Duration", "tooltip": { - "msResolution": true, "shared": true, "sort": 0, - "value_type": "cumulative" + "value_type": "individual" }, "type": "graph", "xaxis": { @@ -8367,7 +9291,7 @@ }, "yaxes": [ { - "format": "short", + "format": "s", "label": null, "logBase": 1, "max": null, @@ -12770,105 +13694,18 @@ "refId": "B" }, { - "expr": "histogram_quantile(0.80, sum(rate(tidb_autoid_operation_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, type))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "80-{{type}}", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "AutoID Duration", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 2, - "max": null, - "min": "0.001", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_TEST-CLUSTER}", - "description": "TiDB region cache operations count", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 21 - }, - "id": 164, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(tidb_tikvclient_region_cache_operations_total{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\", result=\"err\"}[1m])) by (type)", + "expr": "histogram_quantile(0.80, sum(rate(tidb_autoid_operation_duration_seconds_bucket{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (le, type))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "A", - "step": 30 + "legendFormat": "80-{{type}}", + "refId": "C" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Region Cache Error OPS", + "title": "AutoID Duration", "tooltip": { "shared": true, "sort": 0, @@ -12884,11 +13721,11 @@ }, "yaxes": [ { - "format": "short", + "format": "s", "label": null, - "logBase": 1, + "logBase": 2, "max": null, - "min": null, + "min": "0.001", "show": true }, { @@ -12916,8 +13753,8 @@ "gridPos": { "h": 7, "w": 12, - "x": 12, - "y": 21 + "x": 0, + "y": 42 }, "id": 52, "legend": { @@ -14887,6 +15724,277 @@ ], "title": "TopSQL", "type": "row" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 257, + "panels": [ + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "description": "kv request count by instance and command source", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 15 + }, + "hiddenSeries": false, + "id": 259, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.11", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "KV Requst Count", + "bars": false, + "color": "#FADE2A", + "lines": true, + "linewidth": 1, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(rate(tidb_tikvclient_request_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (instance, type, source)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}-{{type}}-{{source}}", + "refId": "A", + "step": 40 + }, + { + "exemplar": true, + "expr": "sum(rate(tidb_tikvclient_request_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m]))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "KV Requst Count", + "refId": "B", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "KV Request OPS by source", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:62", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:63", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "description": "kv request time by instance and command source", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 15 + }, + "hiddenSeries": false, + "id": 260, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.11", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "KV Requst Time", + "bars": false, + "color": "#FADE2A", + "lines": true, + "linewidth": 1, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(rate(tidb_tikvclient_request_time_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m])) by (instance, type, source)", + "format": "time_series", + "interval": "", + "legendFormat": "{{instance}}-{{type}}-{{source}}", + "refId": "A", + "step": 40 + }, + { + "exemplar": true, + "expr": "sum(rate(tidb_tikvclient_request_time_counter{k8s_cluster=\"$k8s_cluster\", tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}[1m]))", + "format": "time_series", + "interval": "", + "legendFormat": "KV Requst Time", + "refId": "B", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "KV Request Time by source", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:62", + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:63", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "SourceSQL", + "type": "row" } ], "refresh": "30s", diff --git a/metrics/metrics.go b/metrics/metrics.go index 565790480c6c7..19809bd9c85d2 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -189,6 +189,8 @@ func RegisterMetrics() { prometheus.MustRegister(StatsCacheLRUCounter) prometheus.MustRegister(StatsCacheLRUGauge) prometheus.MustRegister(StatsHealthyGauge) + prometheus.MustRegister(TxnStatusEnteringCounter) + prometheus.MustRegister(TxnDurationHistogram) tikvmetrics.InitMetrics(TiDB, TiKVClient) tikvmetrics.RegisterMetrics() diff --git a/metrics/session.go b/metrics/session.go index 4073644342e22..4065d57baf992 100644 --- a/metrics/session.go +++ b/metrics/session.go @@ -135,6 +135,22 @@ var ( Name: "non_transactional_delete_count", Help: "Counter of non-transactional delete", }) + TxnStatusEnteringCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "tidb", + Subsystem: "session", + Name: "txn_state_entering_count", + Help: "How many times transactions enter this state", + }, []string{LblType}, + ) + TxnDurationHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "tidb", + Subsystem: "session", + Name: "txn_state_seconds", + Help: "Bucketed histogram of different states of a transaction.", + Buckets: prometheus.ExponentialBuckets(0.0005, 2, 29), // 0.5ms ~ 1.5days + }, []string{LblType, LblHasLock}) ) // Label constants. @@ -165,4 +181,10 @@ const ( LblVersion = "version" LblHash = "hash" LblCTEType = "cte_type" + LblIdle = "idle" + LblRunning = "executing_sql" + LblLockWaiting = "waiting_for_lock" + LblCommitting = "committing" + LblRollingBack = "rolling_back" + LblHasLock = "has_lock" ) diff --git a/metrics/telemetry.go b/metrics/telemetry.go index 10bf1ac8b624e..79bcbf7c9f9a1 100644 --- a/metrics/telemetry.go +++ b/metrics/telemetry.go @@ -28,6 +28,13 @@ var ( Name: "non_recursive_cte_usage", Help: "Counter of usage of CTE", }, []string{LblCTEType}) + TelemetryMultiSchemaChangeCnt = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "tidb", + Subsystem: "telemetry", + Name: "multi_schema_change_usage", + Help: "Counter of usage of multi-schema change", + }) ) // readCounter reads the value of a prometheus.Counter. @@ -68,6 +75,25 @@ func GetCTECounter() CTEUsageCounter { } } +// MultiSchemaChangeUsageCounter records the usages of multi-schema change. +type MultiSchemaChangeUsageCounter struct { + MultiSchemaChangeUsed int64 `json:"multi_schema_change_used"` +} + +// Sub returns the difference of two counters. +func (c MultiSchemaChangeUsageCounter) Sub(rhs MultiSchemaChangeUsageCounter) MultiSchemaChangeUsageCounter { + return MultiSchemaChangeUsageCounter{ + MultiSchemaChangeUsed: c.MultiSchemaChangeUsed - rhs.MultiSchemaChangeUsed, + } +} + +// GetMultiSchemaCounter gets the TxnCommitCounter. +func GetMultiSchemaCounter() MultiSchemaChangeUsageCounter { + return MultiSchemaChangeUsageCounter{ + MultiSchemaChangeUsed: readCounter(TelemetryMultiSchemaChangeCnt), + } +} + // NonTransactionalStmtCounter records the usages of non-transactional statements. type NonTransactionalStmtCounter struct { DeleteCount int64 `json:"delete"` diff --git a/owner/manager_test.go b/owner/manager_test.go index 3668c4d060ad2..24627750d855d 100644 --- a/owner/manager_test.go +++ b/owner/manager_test.go @@ -16,7 +16,6 @@ package owner_test import ( "context" - goctx "context" "fmt" "runtime" "testing" @@ -54,7 +53,7 @@ func TestSingle(t *testing.T) { defer cluster.Terminate(t) client := cluster.RandClient() - ctx := goctx.Background() + ctx := context.Background() ic := infoschema.NewCache(2) ic.Insert(infoschema.MockInfoSchemaWithSchemaVer(nil, 0), 0) d := NewDDL( @@ -69,13 +68,13 @@ func TestSingle(t *testing.T) { require.True(t, isOwner) // test for newSession failed - ctx, cancel := goctx.WithCancel(ctx) + ctx, cancel := context.WithCancel(ctx) manager := owner.NewOwnerManager(ctx, client, "ddl", "ddl_id", DDLOwnerKey) cancel() err = manager.CampaignOwner() comment := fmt.Sprintf("campaigned result don't match, err %v", err) - require.True(t, terror.ErrorEqual(err, goctx.Canceled) || terror.ErrorEqual(err, goctx.DeadlineExceeded), comment) + require.True(t, terror.ErrorEqual(err, context.Canceled) || terror.ErrorEqual(err, context.DeadlineExceeded), comment) isOwner = checkOwner(d, true) require.True(t, isOwner) @@ -88,7 +87,7 @@ func TestSingle(t *testing.T) { time.Sleep(200 * time.Millisecond) // err is ok to be not nil since we canceled the manager. - ownerID, _ := manager.GetOwnerID(goctx.Background()) + ownerID, _ := manager.GetOwnerID(context.Background()) require.Equal(t, "", ownerID) } @@ -118,7 +117,7 @@ func TestCluster(t *testing.T) { ic := infoschema.NewCache(2) ic.Insert(infoschema.MockInfoSchemaWithSchemaVer(nil, 0), 0) d := NewDDL( - goctx.Background(), + context.Background(), WithEtcdClient(cli), WithStore(store), WithLease(testLease), @@ -136,7 +135,7 @@ func TestCluster(t *testing.T) { ic2 := infoschema.NewCache(2) ic2.Insert(infoschema.MockInfoSchemaWithSchemaVer(nil, 0), 0) d1 := NewDDL( - goctx.Background(), + context.Background(), WithEtcdClient(cli1), WithStore(store), WithLease(testLease), @@ -163,7 +162,7 @@ func TestCluster(t *testing.T) { ic3 := infoschema.NewCache(2) ic3.Insert(infoschema.MockInfoSchemaWithSchemaVer(nil, 0), 0) d3 := NewDDL( - goctx.Background(), + context.Background(), WithEtcdClient(cli3), WithStore(store), WithLease(testLease), @@ -186,7 +185,7 @@ func TestCluster(t *testing.T) { election := concurrency.NewElection(session, DDLOwnerKey) logPrefix := fmt.Sprintf("[ddl] %s ownerManager %s", DDLOwnerKey, "useless id") logCtx := logutil.WithKeyValue(context.Background(), "owner info", logPrefix) - _, err = owner.GetOwnerInfo(goctx.Background(), logCtx, election, "useless id") + _, err = owner.GetOwnerInfo(context.Background(), logCtx, election, "useless id") require.Truef(t, terror.ErrorEqual(err, concurrency.ErrElectionNoLeader), "get owner info result don't match, err %v", err) } @@ -213,10 +212,10 @@ func deleteLeader(cli *clientv3.Client, prefixKey string) error { _ = session.Close() }() election := concurrency.NewElection(session, prefixKey) - resp, err := election.Leader(goctx.Background()) + resp, err := election.Leader(context.Background()) if err != nil { return errors.Trace(err) } - _, err = cli.Delete(goctx.Background(), string(resp.Kvs[0].Key)) + _, err = cli.Delete(context.Background(), string(resp.Kvs[0].Key)) return errors.Trace(err) } diff --git a/parser/ast/ddl_test.go b/parser/ast/ddl_test.go index beb3a27b97370..25af0b1fe0485 100644 --- a/parser/ast/ddl_test.go +++ b/parser/ast/ddl_test.go @@ -16,10 +16,9 @@ package ast_test import ( "testing" - "github.com/stretchr/testify/require" - . "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/format" + "github.com/stretchr/testify/require" ) func TestDDLVisitorCover(t *testing.T) { diff --git a/parser/ast/misc.go b/parser/ast/misc.go index 3ec7d5c753ba8..08e14575c53eb 100644 --- a/parser/ast/misc.go +++ b/parser/ast/misc.go @@ -22,7 +22,6 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/parser/auth" - "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/format" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -619,7 +618,6 @@ const ( func (n CompletionType) Restore(ctx *format.RestoreCtx) error { switch n { case CompletionTypeDefault: - break case CompletionTypeChain: ctx.WriteKeyWord(" AND CHAIN") case CompletionTypeRelease: @@ -3620,27 +3618,6 @@ type TextString struct { IsBinaryLiteral bool } -// TransformTextStrings converts a slice of TextString to strings. -// This is only used by enum/set strings. -func TransformTextStrings(ts []*TextString, _ string) []string { - // The UTF-8 encoding rather than other encoding is used - // because parser is not possible to determine the "real" - // charset that a binary literal string should be converted to. - enc := charset.EncodingUTF8Impl - ret := make([]string, 0, len(ts)) - for _, t := range ts { - if !t.IsBinaryLiteral { - ret = append(ret, t.Value) - } else { - // Validate the binary literal string. - // See https://github.com/pingcap/tidb/issues/30740. - r, _ := enc.Transform(nil, charset.HackSlice(t.Value), charset.OpDecodeNoErr) - ret = append(ret, charset.HackString(r)) - } - } - return ret -} - type BinaryLiteral interface { ToString() string } diff --git a/parser/ast/util_test.go b/parser/ast/util_test.go index 015f5dc5cc4eb..b43fac39ae481 100644 --- a/parser/ast/util_test.go +++ b/parser/ast/util_test.go @@ -147,6 +147,8 @@ func (checker *nodeTextCleaner) Enter(in Node) (out Node, skipChildren bool) { } case *Join: node.ExplicitParens = false + case *ColumnDef: + node.Tp.CleanElemIsBinaryLit() } return in, false } diff --git a/parser/auth/mysql_native_password.go b/parser/auth/mysql_native_password.go index d781626a68c0f..05c6127c21991 100644 --- a/parser/auth/mysql_native_password.go +++ b/parser/auth/mysql_native_password.go @@ -15,7 +15,7 @@ package auth import ( "bytes" - "crypto/sha1" + "crypto/sha1" //nolint: gosec "encoding/hex" "fmt" @@ -39,6 +39,7 @@ import ( // check(candidate_hash2==hash_stage2) // // this three steps are done in check_scramble() func CheckScrambledPassword(salt, hpwd, auth []byte) bool { + //nolint: gosec crypt := sha1.New() _, err := crypt.Write(salt) terror.Log(errors.Trace(err)) @@ -58,6 +59,7 @@ func CheckScrambledPassword(salt, hpwd, auth []byte) bool { // Sha1Hash is an util function to calculate sha1 hash. func Sha1Hash(bs []byte) []byte { + //nolint: gosec crypt := sha1.New() _, err := crypt.Write(bs) terror.Log(errors.Trace(err)) diff --git a/parser/charset/BUILD.bazel b/parser/charset/BUILD.bazel index 564e0d8e29297..d201d51157ed6 100644 --- a/parser/charset/BUILD.bazel +++ b/parser/charset/BUILD.bazel @@ -20,6 +20,7 @@ go_library( "//parser/terror", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_log//:log", + "@org_golang_x_exp//slices", "@org_golang_x_text//encoding", "@org_golang_x_text//encoding/charmap", "@org_golang_x_text//encoding/japanese", diff --git a/parser/charset/charset.go b/parser/charset/charset.go index 2987dd3e24b44..6c02f1bcd1f43 100644 --- a/parser/charset/charset.go +++ b/parser/charset/charset.go @@ -14,7 +14,6 @@ package charset import ( - "sort" "strings" "github.com/pingcap/errors" @@ -22,6 +21,7 @@ import ( "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" "go.uber.org/zap" + "golang.org/x/exp/slices" ) var ( @@ -89,8 +89,8 @@ func GetSupportedCharsets() []*Charset { } // sort charset by name. - sort.Slice(charsets, func(i, j int) bool { - return charsets[i].Name < charsets[j].Name + slices.SortFunc(charsets, func(i, j *Charset) bool { + return i.Name < j.Name }) return charsets } diff --git a/parser/charset/encoding_latin1.go b/parser/charset/encoding_latin1.go index db7b66ed101af..38f9bb601ac4e 100644 --- a/parser/charset/encoding_latin1.go +++ b/parser/charset/encoding_latin1.go @@ -15,6 +15,7 @@ package charset import ( "bytes" + "golang.org/x/text/encoding" ) diff --git a/parser/go.mod b/parser/go.mod index 02b141eed3261..1f49f53d36814 100644 --- a/parser/go.mod +++ b/parser/go.mod @@ -10,9 +10,26 @@ require ( github.com/stretchr/testify v1.7.0 go.uber.org/goleak v1.1.10 go.uber.org/zap v1.18.1 - golang.org/x/text v0.3.6 + golang.org/x/exp v0.0.0-20220428152302-39d4317da171 + golang.org/x/text v0.3.7 modernc.org/parser v1.0.2 modernc.org/y v1.0.1 ) -go 1.13 +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/lint v0.0.0-20190930215403-16217165b5de // indirect + golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + modernc.org/golex v1.0.1 // indirect + modernc.org/mathutil v1.4.1 // indirect + modernc.org/sortutil v1.0.0 // indirect + modernc.org/strutil v1.1.0 // indirect +) + +go 1.18 diff --git a/parser/go.sum b/parser/go.sum index 3ea7bfd55e941..267fe82580882 100644 --- a/parser/go.sum +++ b/parser/go.sum @@ -48,20 +48,23 @@ go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= +golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 h1:Yq9t9jnGoR+dBuitxdo9l6Q7xh/zOyNnYUtDKaQ3x0E= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 h1:0c3L82FDQ5rt1bjTBlchS8t6RQ6299/+5bWMnRLh+uI= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= diff --git a/parser/goyacc/BUILD.bazel b/parser/goyacc/BUILD.bazel index 51d0991ed1b0c..e8582c1c4a2ab 100644 --- a/parser/goyacc/BUILD.bazel +++ b/parser/goyacc/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "@com_github_cznic_sortutil//:sortutil", "@com_github_cznic_strutil//:strutil", "@com_github_pingcap_errors//:errors", + "@org_golang_x_exp//slices", "@org_modernc_parser//yacc", "@org_modernc_y//:y", ], diff --git a/parser/goyacc/main.go b/parser/goyacc/main.go index 22d78f2998e91..93fc90efb3afe 100644 --- a/parser/goyacc/main.go +++ b/parser/goyacc/main.go @@ -144,6 +144,7 @@ import ( "github.com/cznic/mathutil" "github.com/cznic/sortutil" "github.com/cznic/strutil" + "golang.org/x/exp/slices" parser "modernc.org/parser/yacc" "modernc.org/y" ) @@ -396,7 +397,7 @@ type %[1]sXError struct { } nsyms[nm] = sym } - sort.Strings(a) + slices.Sort(a) mustFormat(f, "\nconst (%i\n") for _, v := range a { nm := v diff --git a/parser/hintparser_test.go b/parser/hintparser_test.go index 5c252b2d4af78..2c9156f579128 100644 --- a/parser/hintparser_test.go +++ b/parser/hintparser_test.go @@ -16,12 +16,11 @@ package parser_test import ( "testing" - "github.com/stretchr/testify/require" - "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" + "github.com/stretchr/testify/require" ) func TestParseHint(t *testing.T) { diff --git a/parser/lexer.go b/parser/lexer.go index 659fc14dd9235..bcddc5f5bfcea 100644 --- a/parser/lexer.go +++ b/parser/lexer.go @@ -478,7 +478,6 @@ func startWithSlash(s *Scanner) (tok int, pos Pos, lit string) { } case 'M': // '/*M' maybe MariaDB-specific comments // no special treatment for now. - break case '+': // '/*+' optimizer hints // See https://dev.mysql.com/doc/refman/5.7/en/optimizer-hints.html @@ -502,7 +501,6 @@ func startWithSlash(s *Scanner) (tok int, pos Pos, lit string) { currentCharIsStar = true default: - break } // standard C-like comment. read until we see '*/' then drop it. @@ -574,7 +572,7 @@ func startWithAt(s *Scanner) (tok int, pos Pos, lit string) { tok, lit = doubleAtIdentifier, s.r.data(&pos) } case invalid: - break + return default: tok = singleAtIdentifier } diff --git a/parser/model/ddl.go b/parser/model/ddl.go index 549a8119e6b33..8d2a80c64ccdf 100644 --- a/parser/model/ddl.go +++ b/parser/model/ddl.go @@ -67,8 +67,8 @@ const ( ActionCreateSequence ActionType = 34 ActionAlterSequence ActionType = 35 ActionDropSequence ActionType = 36 - ActionAddColumns ActionType = 37 - ActionDropColumns ActionType = 38 + ActionAddColumns ActionType = 37 // Deprecated, we use ActionMultiSchemaChange instead. + ActionDropColumns ActionType = 38 // Deprecated, we use ActionMultiSchemaChange instead. ActionModifyTableAutoIdCache ActionType = 39 ActionRebaseAutoRandomBase ActionType = 40 ActionAlterIndexVisibility ActionType = 41 @@ -82,7 +82,7 @@ const ( __DEPRECATED_ActionAlterTableAlterPartition ActionType = 46 ActionRenameTables ActionType = 47 - ActionDropIndexes ActionType = 48 + ActionDropIndexes ActionType = 48 // Deprecated, we use ActionMultiSchemaChange instead. ActionAlterTableAttributes ActionType = 49 ActionAlterTablePartitionAttributes ActionType = 50 ActionCreatePlacementPolicy ActionType = 51 @@ -137,8 +137,6 @@ var actionMap = map[ActionType]string{ ActionCreateSequence: "create sequence", ActionAlterSequence: "alter sequence", ActionDropSequence: "drop sequence", - ActionAddColumns: "add multi-columns", - ActionDropColumns: "drop multi-columns", ActionModifyTableAutoIdCache: "modify auto id cache", ActionRebaseAutoRandomBase: "rebase auto_random ID", ActionAlterIndexVisibility: "alter index visibility", @@ -146,7 +144,6 @@ var actionMap = map[ActionType]string{ ActionAddCheckConstraint: "add check constraint", ActionDropCheckConstraint: "drop check constraint", ActionAlterCheckConstraint: "alter check constraint", - ActionDropIndexes: "drop multi-indexes", ActionAlterTableAttributes: "alter table attributes", ActionAlterTablePartitionPlacement: "alter table partition placement", ActionAlterTablePartitionAttributes: "alter table partition attributes", @@ -202,9 +199,7 @@ func (h *HistoryInfo) AddTableInfo(schemaVer int64, tblInfo *TableInfo) { func (h *HistoryInfo) SetTableInfos(schemaVer int64, tblInfos []*TableInfo) { h.SchemaVersion = schemaVer h.MultipleTableInfos = make([]*TableInfo, len(tblInfos)) - for i, info := range tblInfos { - h.MultipleTableInfos[i] = info - } + copy(h.MultipleTableInfos, tblInfos) } // Clean cleans history information. @@ -312,8 +307,8 @@ func (sub *SubJob) IsFinished() bool { } // ToProxyJob converts a sub-job to a proxy job. -func (sub *SubJob) ToProxyJob(parentJob *Job) *Job { - return &Job{ +func (sub *SubJob) ToProxyJob(parentJob *Job) Job { + return Job{ ID: parentJob.ID, Type: sub.Type, SchemaID: parentJob.SchemaID, @@ -440,6 +435,28 @@ func (job *Job) MarkNonRevertible() { } } +// Clone returns a copy of the job. +func (job *Job) Clone() *Job { + encode, err := job.Encode(true) + if err != nil { + return nil + } + var clone Job + err = clone.Decode(encode) + if err != nil { + return nil + } + if len(job.Args) > 0 { + clone.Args = make([]interface{}, len(job.Args)) + copy(clone.Args, job.Args) + } + for i, sub := range job.MultiSchemaInfo.SubJobs { + clone.MultiSchemaInfo.SubJobs[i].Args = make([]interface{}, len(sub.Args)) + copy(clone.MultiSchemaInfo.SubJobs[i].Args, sub.Args) + } + return &clone +} + // TSConvert2Time converts timestamp to time. func TSConvert2Time(ts uint64) time.Time { t := int64(ts >> 18) // 18 is for the logical time. @@ -646,7 +663,7 @@ func (job *Job) MayNeedReorg() bool { // IsRollbackable checks whether the job can be rollback. func (job *Job) IsRollbackable() bool { switch job.Type { - case ActionDropIndex, ActionDropPrimaryKey, ActionDropIndexes: + case ActionDropIndex, ActionDropPrimaryKey: // We can't cancel if index current state is in StateDeleteOnly or StateDeleteReorganization or StateWriteOnly, otherwise there will be an inconsistent issue between record and index. // In WriteOnly state, we can rollback for normal index but can't rollback for expression index(need to drop hidden column). Since we can't // know the type of index here, we consider all indices except primary index as non-rollbackable. @@ -662,7 +679,7 @@ func (job *Job) IsRollbackable() bool { case ActionDropColumn, ActionDropSchema, ActionDropTable, ActionDropSequence, ActionDropForeignKey, ActionDropTablePartition: return job.SchemaState == StatePublic - case ActionDropColumns, ActionRebaseAutoID, ActionShardRowID, + case ActionRebaseAutoID, ActionShardRowID, ActionTruncateTable, ActionAddForeignKey, ActionRenameTable, ActionModifyTableCharsetAndCollate, ActionTruncateTablePartition, ActionModifySchemaCharsetAndCollate, ActionRepairTable, diff --git a/parser/model/model.go b/parser/model/model.go index 43e3e4bc5bcfb..808d5fd1f8496 100644 --- a/parser/model/model.go +++ b/parser/model/model.go @@ -1353,6 +1353,10 @@ func (db *DBInfo) Copy() *DBInfo { return &newInfo } +func LessDBInfo(a *DBInfo, b *DBInfo) bool { + return a.Name.L < b.Name.L +} + // CIStr is case insensitive string. type CIStr struct { O string `json:"O"` // Original string. @@ -1396,6 +1400,13 @@ type TableColumnID struct { ColumnID int64 } +// TableItemID is composed by table ID and column/index ID +type TableItemID struct { + TableID int64 + ID int64 + IsIndex bool +} + // PolicyRefInfo is the struct to refer the placement policy. type PolicyRefInfo struct { ID int64 `json:"id"` diff --git a/parser/model/model_test.go b/parser/model/model_test.go index 114b3a38bf5bf..480786ec14050 100644 --- a/parser/model/model_test.go +++ b/parser/model/model_test.go @@ -376,11 +376,8 @@ func TestString(t *testing.T) { {ActionAddIndex, "add index"}, {ActionDropIndex, "drop index"}, {ActionAddColumn, "add column"}, - {ActionAddColumns, "add multi-columns"}, {ActionDropColumn, "drop column"}, - {ActionDropColumns, "drop multi-columns"}, {ActionModifySchemaCharsetAndCollate, "modify schema charset and collate"}, - {ActionDropIndexes, "drop multi-indexes"}, {ActionAlterTablePlacement, "alter table placement"}, {ActionAlterTablePartitionPlacement, "alter table partition placement"}, {ActionAlterNoCacheTable, "alter table nocache"}, diff --git a/parser/parser.go b/parser/parser.go index 051b5550596c1..9f9112dc76731 100644 --- a/parser/parser.go +++ b/parser/parser.go @@ -19881,12 +19881,13 @@ yynewstate: tp := types.NewFieldType(mysql.TypeEnum) elems := yyS[yypt-2].item.([]*ast.TextString) opt := yyS[yypt-0].item.(*ast.OptBinary) - tp.SetElems(ast.TransformTextStrings(elems, opt.Charset)) + tp.SetElems(make([]string, len(elems))) fieldLen := -1 // enum_flen = max(ele_flen) - for i := range tp.GetElems() { - tp.SetElem(i, strings.TrimRight(tp.GetElem(i), " ")) - if len(tp.GetElem(i)) > fieldLen { - fieldLen = len(tp.GetElem(i)) + for i, e := range elems { + trimmed := strings.TrimRight(e.Value, " ") + tp.SetElemWithIsBinaryLit(i, trimmed, e.IsBinaryLiteral) + if len(trimmed) > fieldLen { + fieldLen = len(trimmed) } } tp.SetFlen(fieldLen) @@ -19901,11 +19902,12 @@ yynewstate: tp := types.NewFieldType(mysql.TypeSet) elems := yyS[yypt-2].item.([]*ast.TextString) opt := yyS[yypt-0].item.(*ast.OptBinary) - tp.SetElems(ast.TransformTextStrings(elems, opt.Charset)) - fieldLen := len(tp.GetElems()) - 1 // set_flen = sum(ele_flen) + number_of_ele - 1 - for i := range tp.GetElems() { - tp.SetElem(i, strings.TrimRight(tp.GetElem(i), " ")) - fieldLen += len(tp.GetElem(i)) + tp.SetElems(make([]string, len(elems))) + fieldLen := len(elems) - 1 // set_flen = sum(ele_flen) + number_of_ele - 1 + for i, e := range elems { + trimmed := strings.TrimRight(e.Value, " ") + tp.SetElemWithIsBinaryLit(i, trimmed, e.IsBinaryLiteral) + fieldLen += len(trimmed) } tp.SetFlen(fieldLen) tp.SetCharset(opt.Charset) diff --git a/parser/parser.y b/parser/parser.y index 131fa55aaaebd..0cda00670a6eb 100644 --- a/parser/parser.y +++ b/parser/parser.y @@ -11896,12 +11896,13 @@ StringType: tp := types.NewFieldType(mysql.TypeEnum) elems := $3.([]*ast.TextString) opt := $5.(*ast.OptBinary) - tp.SetElems(ast.TransformTextStrings(elems, opt.Charset)) + tp.SetElems(make([]string, len(elems))) fieldLen := -1 // enum_flen = max(ele_flen) - for i := range tp.GetElems() { - tp.SetElem(i, strings.TrimRight(tp.GetElem(i), " ")) - if len(tp.GetElem(i)) > fieldLen { - fieldLen = len(tp.GetElem(i)) + for i, e := range elems { + trimmed := strings.TrimRight(e.Value, " ") + tp.SetElemWithIsBinaryLit(i, trimmed, e.IsBinaryLiteral) + if len(trimmed) > fieldLen { + fieldLen = len(trimmed) } } tp.SetFlen(fieldLen) @@ -11916,11 +11917,12 @@ StringType: tp := types.NewFieldType(mysql.TypeSet) elems := $3.([]*ast.TextString) opt := $5.(*ast.OptBinary) - tp.SetElems(ast.TransformTextStrings(elems, opt.Charset)) - fieldLen := len(tp.GetElems()) - 1 // set_flen = sum(ele_flen) + number_of_ele - 1 - for i := range tp.GetElems() { - tp.SetElem(i, strings.TrimRight(tp.GetElem(i), " ")) - fieldLen += len(tp.GetElem(i)) + tp.SetElems(make([]string, len(elems))) + fieldLen := len(elems) - 1 // set_flen = sum(ele_flen) + number_of_ele - 1 + for i, e := range elems { + trimmed := strings.TrimRight(e.Value, " ") + tp.SetElemWithIsBinaryLit(i, trimmed, e.IsBinaryLiteral) + fieldLen += len(trimmed) } tp.SetFlen(fieldLen) tp.SetCharset(opt.Charset) diff --git a/parser/parser_test.go b/parser/parser_test.go index 3ab44e1a232c0..ee5c9fc7e384c 100644 --- a/parser/parser_test.go +++ b/parser/parser_test.go @@ -2177,7 +2177,7 @@ func TestIdentifier(t *testing.T) { {`select COUNT from DESC`, false, ""}, {`select COUNT from SELECT.DESC`, true, "SELECT `COUNT` FROM `SELECT`.`DESC`"}, {"use `select`", true, "USE `select`"}, - {"use `sel``ect`", true, "USE `sel``ect`"}, + {"use `sel``ect`", true, "USE `sel``ect`"}, //nolint: misspell {"use select", false, "USE `select`"}, {`select * from t as a`, true, "SELECT * FROM `t` AS `a`"}, {"select 1 full, 1 row, 1 abs", false, ""}, @@ -6192,6 +6192,8 @@ func (checker *nodeTextCleaner) Enter(in ast.Node) (out ast.Node, skipChildren b node.Specs = specs case *ast.Join: node.ExplicitParens = false + case *ast.ColumnDef: + node.Tp.CleanElemIsBinaryLit() } return in, false } diff --git a/parser/types/etc.go b/parser/types/etc.go index 2fe3d113e8820..1fdfeaf05367f 100644 --- a/parser/types/etc.go +++ b/parser/types/etc.go @@ -128,12 +128,8 @@ func TypeToStr(tp byte, cs string) (r string) { // Args: // ts: type string func StrToType(ts string) (tp byte) { - if strings.Contains(ts, "blob") { - ts = strings.Replace(ts, "blob", "text", 1) - } else if strings.Contains(ts, "binary") { - ts = strings.Replace(ts, "binary", "char", 1) - } - + ts = strings.Replace(ts, "blob", "text", 1) + ts = strings.Replace(ts, "binary", "char", 1) if tp, ok := str2Type[ts]; ok { return tp } diff --git a/parser/types/field_type.go b/parser/types/field_type.go index aa984e2a945b6..429896d6d790e 100644 --- a/parser/types/field_type.go +++ b/parser/types/field_type.go @@ -53,7 +53,9 @@ type FieldType struct { // collate represent collate rules of the charset collate string // elems is the element list for enum and set type. - elems []string + elems []string + elemsIsBinaryLit []bool + // Please keep in mind that jsonFieldType should be updated if you add a new field here. } // NewFieldType returns a FieldType, @@ -180,10 +182,34 @@ func (ft *FieldType) SetElem(idx int, element string) { ft.elems[idx] = element } +func (ft *FieldType) SetElemWithIsBinaryLit(idx int, element string, isBinaryLit bool) { + ft.elems[idx] = element + if isBinaryLit { + // Create the binary literal flags lazily. + if ft.elemsIsBinaryLit == nil { + ft.elemsIsBinaryLit = make([]bool, len(ft.elems)) + } + ft.elemsIsBinaryLit[idx] = true + } +} + func (ft *FieldType) GetElem(idx int) string { return ft.elems[idx] } +func (ft *FieldType) GetElemIsBinaryLit(idx int) bool { + if len(ft.elemsIsBinaryLit) == 0 { + return false + } + return ft.elemsIsBinaryLit[idx] +} + +func (ft *FieldType) CleanElemIsBinaryLit() { + if ft != nil && ft.elemsIsBinaryLit != nil { + ft.elemsIsBinaryLit = nil + } +} + // Clone returns a copy of itself. func (ft *FieldType) Clone() *FieldType { ret := *ft @@ -506,13 +532,14 @@ func HasCharset(ft *FieldType) bool { // for json type jsonFieldType struct { - Tp byte - Flag uint - Flen int - Decimal int - Charset string - Collate string - Elems []string + Tp byte + Flag uint + Flen int + Decimal int + Charset string + Collate string + Elems []string + ElemsIsBinaryLit []bool } func (ft *FieldType) UnmarshalJSON(data []byte) error { @@ -526,6 +553,7 @@ func (ft *FieldType) UnmarshalJSON(data []byte) error { ft.charset = r.Charset ft.collate = r.Collate ft.elems = r.Elems + ft.elemsIsBinaryLit = r.ElemsIsBinaryLit } return err } @@ -539,5 +567,6 @@ func (ft *FieldType) MarshalJSON() ([]byte, error) { r.Charset = ft.charset r.Collate = ft.collate r.Elems = ft.elems + r.ElemsIsBinaryLit = ft.elemsIsBinaryLit return json.Marshal(r) } diff --git a/parser/types/field_type_test.go b/parser/types/field_type_test.go index e258e75787313..6310dbb102bc9 100644 --- a/parser/types/field_type_test.go +++ b/parser/types/field_type_test.go @@ -21,11 +21,10 @@ import ( "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/mysql" - . "github.com/pingcap/tidb/parser/types" - "github.com/stretchr/testify/require" - // import parser_driver _ "github.com/pingcap/tidb/parser/test_driver" + . "github.com/pingcap/tidb/parser/types" + "github.com/stretchr/testify/require" ) func TestFieldType(t *testing.T) { diff --git a/planner/core/BUILD.bazel b/planner/core/BUILD.bazel index 7c7186892db2d..83342809499ce 100644 --- a/planner/core/BUILD.bazel +++ b/planner/core/BUILD.bazel @@ -133,7 +133,6 @@ go_library( "@com_github_pingcap_kvproto//pkg/coprocessor", "@com_github_pingcap_tipb//go-tipb", "@com_github_tikv_client_go_v2//kv", - "@com_github_tikv_client_go_v2//oracle", "@com_github_tikv_client_go_v2//tikv", "@org_golang_x_exp//slices", "@org_uber_go_atomic//:atomic", @@ -143,6 +142,7 @@ go_library( go_test( name = "core_test", + timeout = "short", srcs = [ "cache_test.go", "cacheable_checker_test.go", @@ -243,6 +243,7 @@ go_test( "@com_github_prometheus_client_golang//prometheus", "@com_github_prometheus_client_model//go", "@com_github_stretchr_testify//require", + "@org_golang_x_exp//slices", "@org_uber_go_goleak//:goleak", ], ) diff --git a/planner/core/access_object.go b/planner/core/access_object.go new file mode 100644 index 0000000000000..b5aa4af1fd692 --- /dev/null +++ b/planner/core/access_object.go @@ -0,0 +1,485 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "strconv" + "strings" + + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tipb/go-tipb" +) + +// A plan is dataAccesser means it can access underlying data. +// Include `PhysicalTableScan`, `PhysicalIndexScan`, `PointGetPlan`, `BatchPointScan` and `PhysicalMemTable`. +// ExplainInfo = AccessObject + OperatorInfo +type dataAccesser interface { + + // AccessObject return plan's `table`, `partition` and `index`. + AccessObject() AccessObject + + // OperatorInfo return other operator information to be explained. + OperatorInfo(normalized bool) string +} + +type partitionAccesser interface { + accessObject(sessionctx.Context) AccessObject +} + +// AccessObject represents what is accessed by an operator. +// It corresponds to the "access object" column in an EXPLAIN statement result. +type AccessObject interface { + String() string + NormalizedString() string + // SetIntoPB transform itself into a protobuf message and set into the binary plan. + SetIntoPB(*tipb.ExplainOperator) +} + +// DynamicPartitionAccessObject represents the partitions accessed by the children of this operator. +// It's mainly used in dynamic pruning mode. +type DynamicPartitionAccessObject struct { + Database string + Table string + AllPartitions bool + Partitions []string + err string +} + +func (d *DynamicPartitionAccessObject) String() string { + if len(d.err) > 0 { + return d.err + } + if d.AllPartitions { + return "partition:all" + } else if len(d.Partitions) == 0 { + return "partition:dual" + } + return "partition:" + strings.Join(d.Partitions, ",") +} + +// DynamicPartitionAccessObjects is a list of DynamicPartitionAccessObject. +type DynamicPartitionAccessObjects []*DynamicPartitionAccessObject + +func (d DynamicPartitionAccessObjects) String() string { + if len(d) == 0 { + return "" + } + if len(d) == 1 { + return d[0].String() + } + var b strings.Builder + for i, access := range d { + if i != 0 { + b.WriteString(", ") + } + b.WriteString(access.String()) + b.WriteString(" of " + access.Table) + } + return b.String() +} + +// NormalizedString implements AccessObject. +func (d DynamicPartitionAccessObjects) NormalizedString() string { + return d.String() +} + +// SetIntoPB implements AccessObject. +func (d DynamicPartitionAccessObjects) SetIntoPB(pb *tipb.ExplainOperator) { + if len(d) == 0 || pb == nil { + return + } + pbObjSlice := make([]tipb.DynamicPartitionAccessObject, len(d)) + for i, obj := range d { + if len(obj.err) > 0 { + continue + } + pbObj := pbObjSlice[i] + pbObj.Database = obj.Database + pbObj.Table = obj.Table + pbObj.AllPartitions = obj.AllPartitions + pbObj.Partitions = obj.Partitions + } + pbObjs := tipb.DynamicPartitionAccessObjects{Objects: make([]*tipb.DynamicPartitionAccessObject, 0, len(d))} + for i := range pbObjSlice { + pbObjs.Objects = append(pbObjs.Objects, &pbObjSlice[i]) + } + pb.AccessObject = &tipb.ExplainOperator_DynamicPartitionObjects{DynamicPartitionObjects: &pbObjs} +} + +// IndexAccess represents the index accessed by an operator. +type IndexAccess struct { + Name string + Cols []string + IsClusteredIndex bool +} + +// ToPB turns itself into a protobuf message. +func (a *IndexAccess) ToPB() *tipb.IndexAccess { + if a == nil { + return nil + } + return &tipb.IndexAccess{ + Name: a.Name, + Cols: a.Cols, + IsClusteredIndex: a.IsClusteredIndex, + } +} + +// ScanAccessObject represents the access to a table. +// It may also represent the access to indexes and partitions of a table. +type ScanAccessObject struct { + Database string + Table string + Indexes []IndexAccess + Partitions []string +} + +// NormalizedString implements AccessObject. +func (s *ScanAccessObject) NormalizedString() string { + var b strings.Builder + if len(s.Table) > 0 { + b.WriteString("table:" + s.Table) + } + if len(s.Partitions) > 0 { + b.WriteString(", partition:?") + } + for _, index := range s.Indexes { + if index.IsClusteredIndex { + b.WriteString(", clustered index:") + } else { + b.WriteString(", index:") + } + b.WriteString(index.Name + "(" + strings.Join(index.Cols, ", ") + ")") + } + return b.String() +} + +func (s *ScanAccessObject) String() string { + var b strings.Builder + if len(s.Table) > 0 { + b.WriteString("table:" + s.Table) + } + if len(s.Partitions) > 0 { + b.WriteString(", partition:" + strings.Join(s.Partitions, ",")) + } + for _, index := range s.Indexes { + if index.IsClusteredIndex { + b.WriteString(", clustered index:") + } else { + b.WriteString(", index:") + } + b.WriteString(index.Name + "(" + strings.Join(index.Cols, ", ") + ")") + } + return b.String() +} + +// SetIntoPB implements AccessObject. +func (s *ScanAccessObject) SetIntoPB(pb *tipb.ExplainOperator) { + if s == nil || pb == nil { + return + } + pbObj := tipb.ScanAccessObject{ + Database: s.Database, + Table: s.Table, + Partitions: s.Partitions, + } + for i := range s.Indexes { + pbObj.Indexes = append(pbObj.Indexes, s.Indexes[i].ToPB()) + } + pb.AccessObject = &tipb.ExplainOperator_ScanObject{ScanObject: &pbObj} +} + +// OtherAccessObject represents other kinds of access. +type OtherAccessObject string + +func (o OtherAccessObject) String() string { + return string(o) +} + +// NormalizedString implements AccessObject. +func (o OtherAccessObject) NormalizedString() string { + return o.String() +} + +// SetIntoPB implements AccessObject. +func (o OtherAccessObject) SetIntoPB(pb *tipb.ExplainOperator) { + if pb == nil { + return + } + pb.AccessObject = &tipb.ExplainOperator_OtherObject{OtherObject: string(o)} +} + +// AccessObject implements dataAccesser interface. +func (p *PhysicalIndexScan) AccessObject() AccessObject { + res := &ScanAccessObject{ + Database: p.DBName.O, + } + tblName := p.Table.Name.O + if p.TableAsName != nil && p.TableAsName.O != "" { + tblName = p.TableAsName.O + } + res.Table = tblName + if p.isPartition { + pi := p.Table.GetPartitionInfo() + if pi != nil { + partitionName := pi.GetNameByID(p.physicalTableID) + res.Partitions = []string{partitionName} + } + } + if len(p.Index.Columns) > 0 { + index := IndexAccess{ + Name: p.Index.Name.O, + } + for _, idxCol := range p.Index.Columns { + if tblCol := p.Table.Columns[idxCol.Offset]; tblCol.Hidden { + index.Cols = append(index.Cols, tblCol.GeneratedExprString) + } else { + index.Cols = append(index.Cols, idxCol.Name.O) + } + } + res.Indexes = []IndexAccess{index} + } + return res +} + +// AccessObject implements dataAccesser interface. +func (p *PhysicalTableScan) AccessObject() AccessObject { + res := &ScanAccessObject{ + Database: p.DBName.O, + } + tblName := p.Table.Name.O + if p.TableAsName != nil && p.TableAsName.O != "" { + tblName = p.TableAsName.O + } + res.Table = tblName + if p.isPartition { + pi := p.Table.GetPartitionInfo() + if pi != nil { + partitionName := pi.GetNameByID(p.physicalTableID) + res.Partitions = []string{partitionName} + } + } + return res +} + +// AccessObject implements dataAccesser interface. +func (p *PhysicalMemTable) AccessObject() AccessObject { + return &ScanAccessObject{ + Database: p.DBName.O, + Table: p.Table.Name.O, + } +} + +// AccessObject implements dataAccesser interface. +func (p *PointGetPlan) AccessObject() AccessObject { + res := &ScanAccessObject{ + Database: p.dbName, + Table: p.TblInfo.Name.O, + } + if p.PartitionInfo != nil { + res.Partitions = []string{p.PartitionInfo.Name.O} + } + if p.IndexInfo != nil { + index := IndexAccess{ + Name: p.IndexInfo.Name.O, + IsClusteredIndex: p.IndexInfo.Primary && p.TblInfo.IsCommonHandle, + } + for _, idxCol := range p.IndexInfo.Columns { + if tblCol := p.TblInfo.Columns[idxCol.Offset]; tblCol.Hidden { + index.Cols = append(index.Cols, tblCol.GeneratedExprString) + } else { + index.Cols = append(index.Cols, idxCol.Name.O) + } + } + res.Indexes = []IndexAccess{index} + } + return res +} + +// AccessObject implements physicalScan interface. +func (p *BatchPointGetPlan) AccessObject() AccessObject { + res := &ScanAccessObject{ + Database: p.dbName, + Table: p.TblInfo.Name.O, + } + for _, partitionInfo := range p.PartitionInfos { + res.Partitions = append(res.Partitions, partitionInfo.Name.O) + } + if p.IndexInfo != nil { + index := IndexAccess{ + Name: p.IndexInfo.Name.O, + IsClusteredIndex: p.IndexInfo.Primary && p.TblInfo.IsCommonHandle, + } + for _, idxCol := range p.IndexInfo.Columns { + if tblCol := p.TblInfo.Columns[idxCol.Offset]; tblCol.Hidden { + index.Cols = append(index.Cols, tblCol.GeneratedExprString) + } else { + index.Cols = append(index.Cols, idxCol.Name.O) + } + } + res.Indexes = []IndexAccess{index} + } + return res +} + +func getDynamicAccessPartition(sctx sessionctx.Context, tblInfo *model.TableInfo, partitionInfo *PartitionInfo, asName string) (res *DynamicPartitionAccessObject) { + pi := tblInfo.GetPartitionInfo() + if pi == nil || !sctx.GetSessionVars().UseDynamicPartitionPrune() { + return nil + } + + res = &DynamicPartitionAccessObject{} + tblName := tblInfo.Name.O + if len(asName) > 0 { + tblName = asName + } + res.Table = tblName + is := sctx.GetInfoSchema().(infoschema.InfoSchema) + db, ok := is.SchemaByTable(tblInfo) + if ok { + res.Database = db.Name.O + } + tmp, ok := is.TableByID(tblInfo.ID) + if !ok { + res.err = "partition table not found:" + strconv.FormatInt(tblInfo.ID, 10) + return res + } + tbl := tmp.(table.PartitionedTable) + + idxArr, err := PartitionPruning(sctx, tbl, partitionInfo.PruningConds, partitionInfo.PartitionNames, partitionInfo.Columns, partitionInfo.ColumnNames) + if err != nil { + res.err = "partition pruning error:" + err.Error() + return res + } + + if len(idxArr) == 1 && idxArr[0] == FullRange { + res.AllPartitions = true + return res + } + + for _, idx := range idxArr { + res.Partitions = append(res.Partitions, pi.Definitions[idx].Name.O) + } + return res +} + +func (p *PhysicalTableReader) accessObject(sctx sessionctx.Context) AccessObject { + if !sctx.GetSessionVars().UseDynamicPartitionPrune() { + return DynamicPartitionAccessObjects(nil) + } + if len(p.PartitionInfos) == 0 { + ts := p.TablePlans[0].(*PhysicalTableScan) + asName := "" + if ts.TableAsName != nil && len(ts.TableAsName.O) > 0 { + asName = ts.TableAsName.O + } + res := getDynamicAccessPartition(sctx, ts.Table, &p.PartitionInfo, asName) + if res == nil { + return DynamicPartitionAccessObjects(nil) + } + return DynamicPartitionAccessObjects{res} + } + if len(p.PartitionInfos) == 1 { + ts := p.PartitionInfos[0].tableScan + partInfo := p.PartitionInfos[0].partitionInfo + asName := "" + if ts.TableAsName != nil && len(ts.TableAsName.O) > 0 { + asName = ts.TableAsName.O + } + res := getDynamicAccessPartition(sctx, ts.Table, &partInfo, asName) + if res == nil { + return DynamicPartitionAccessObjects(nil) + } + return DynamicPartitionAccessObjects{res} + } + + res := make(DynamicPartitionAccessObjects, 0) + for _, info := range p.PartitionInfos { + if info.tableScan.Table.GetPartitionInfo() == nil { + continue + } + ts := info.tableScan + partInfo := info.partitionInfo + asName := "" + if ts.TableAsName != nil && len(ts.TableAsName.O) > 0 { + asName = ts.TableAsName.O + } + accessObj := getDynamicAccessPartition(sctx, ts.Table, &partInfo, asName) + if accessObj != nil { + res = append(res, accessObj) + } + } + if len(res) == 0 { + return DynamicPartitionAccessObjects(nil) + } + return res +} + +func (p *PhysicalIndexReader) accessObject(sctx sessionctx.Context) AccessObject { + if !sctx.GetSessionVars().UseDynamicPartitionPrune() { + return DynamicPartitionAccessObjects(nil) + } + is := p.IndexPlans[0].(*PhysicalIndexScan) + asName := "" + if is.TableAsName != nil && len(is.TableAsName.O) > 0 { + asName = is.TableAsName.O + } + res := getDynamicAccessPartition(sctx, is.Table, &p.PartitionInfo, asName) + if res == nil { + return DynamicPartitionAccessObjects(nil) + } + return DynamicPartitionAccessObjects{res} +} + +func (p *PhysicalIndexLookUpReader) accessObject(sctx sessionctx.Context) AccessObject { + if !sctx.GetSessionVars().UseDynamicPartitionPrune() { + return DynamicPartitionAccessObjects(nil) + } + ts := p.TablePlans[0].(*PhysicalTableScan) + asName := "" + if ts.TableAsName != nil && len(ts.TableAsName.O) > 0 { + asName = ts.TableAsName.O + } + res := getDynamicAccessPartition(sctx, ts.Table, &p.PartitionInfo, asName) + if res == nil { + return DynamicPartitionAccessObjects(nil) + } + return DynamicPartitionAccessObjects{res} +} + +func (p *PhysicalIndexMergeReader) accessObject(sctx sessionctx.Context) AccessObject { + if !sctx.GetSessionVars().UseDynamicPartitionPrune() { + return DynamicPartitionAccessObjects(nil) + } + ts := p.TablePlans[0].(*PhysicalTableScan) + asName := "" + if ts.TableAsName != nil && len(ts.TableAsName.O) > 0 { + asName = ts.TableAsName.O + } + res := getDynamicAccessPartition(sctx, ts.Table, &p.PartitionInfo, asName) + if res == nil { + return DynamicPartitionAccessObjects(nil) + } + return DynamicPartitionAccessObjects{res} +} + +// AccessObject implements physicalScan interface. +func (p *PhysicalCTE) AccessObject() AccessObject { + return OtherAccessObject(fmt.Sprintf("CTE:%s", p.cteAsName.L)) +} diff --git a/planner/core/cbo_test.go b/planner/core/cbo_test.go index a135eadf959a0..2aeeba0e92072 100644 --- a/planner/core/cbo_test.go +++ b/planner/core/cbo_test.go @@ -822,9 +822,6 @@ func TestLimitIndexEstimation(t *testing.T) { SQL string Plan []string } - // When paging is used, there is a 'paging:true' makes the explain output differ. - // IndexLookUp 0.00 root paging:true - tk.MustExec("set @@tidb_enable_paging = off") analyzeSuiteData := core.GetAnalyzeSuiteData() analyzeSuiteData.GetTestCases(t, &input, &output) diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index eeab80bcf2640..6495d318dc265 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -22,9 +22,7 @@ import ( "strings" "github.com/pingcap/errors" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/bindinfo" - "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" @@ -38,7 +36,6 @@ import ( "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" - "github.com/pingcap/tidb/sessiontxn/staleread" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" @@ -52,7 +49,6 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/texttree" - "github.com/tikv/client-go/v2/oracle" "go.uber.org/zap" ) @@ -194,15 +190,9 @@ type Execute struct { TxtProtoVars []expression.Expression // parsed variables under text protocol BinProtoVars []types.Datum // parsed variables under binary protocol ExecID uint32 - // Deprecated: SnapshotTS now is only used for asserting after refactoring stale read, it will be removed later. - SnapshotTS uint64 - // Deprecated: IsStaleness now is only used for asserting after refactoring stale read, it will be removed later. - IsStaleness bool - // Deprecated: ReadReplicaScope now is only used for asserting after refactoring stale read, it will be removed later. - ReadReplicaScope string - Stmt ast.StmtNode - StmtType string - Plan Plan + Stmt ast.StmtNode + StmtType string + Plan Plan } // Check if result of GetVar expr is BinaryLiteral @@ -274,29 +264,6 @@ func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Cont } } - // Just setting `e.SnapshotTS`, `e.ReadReplicaScope` and `e.IsStaleness` with the return value of `handleExecuteBuilderOption` - // for asserting the stale read context after refactoring is exactly the same with the previous logic. - snapshotTS, readReplicaScope, isStaleness, err := handleExecuteBuilderOption(sctx, preparedObj) - if err != nil { - return err - } - e.SnapshotTS = snapshotTS - e.ReadReplicaScope = readReplicaScope - e.IsStaleness = isStaleness - - failpoint.Inject("assertStaleReadForOptimizePreparedPlan", func() { - staleread.AssertStmtStaleness(sctx, isStaleness) - if isStaleness { - is2, err := domain.GetDomain(sctx).GetSnapshotInfoSchema(snapshotTS) - if err != nil { - panic(err) - } - - if is.SchemaMetaVersion() != is2.SchemaMetaVersion() { - panic(fmt.Sprintf("%d != %d", is.SchemaMetaVersion(), is2.SchemaMetaVersion())) - } - } - }) if prepared.SchemaVersion != is.SchemaMetaVersion() { // In order to avoid some correctness issues, we have to clear the // cached plan once the schema version is changed. @@ -328,7 +295,7 @@ func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Cont prepared.CachedPlan = nil vars.LastUpdateTime4PC = expiredTimeStamp4PC } - err = e.getPhysicalPlan(ctx, sctx, is, preparedObj) + err := e.getPhysicalPlan(ctx, sctx, is, preparedObj) if err != nil { return err } @@ -336,64 +303,6 @@ func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Cont return nil } -// Deprecated: it will be removed later. Now it is only used for asserting -func handleExecuteBuilderOption(sctx sessionctx.Context, - preparedObj *CachedPrepareStmt) (snapshotTS uint64, readReplicaScope string, isStaleness bool, err error) { - snapshotTS = 0 - readReplicaScope = oracle.GlobalTxnScope - isStaleness = false - err = nil - vars := sctx.GetSessionVars() - readTS := vars.TxnReadTS.PeakTxnReadTS() - if readTS > 0 { - // It means we meet following case: - // 1. prepare p from 'select * from t as of timestamp now() - x seconds' - // 1. set transaction read only as of timestamp ts2 - // 2. execute prepare p - // The execute statement would be refused due to timestamp conflict - if preparedObj.SnapshotTSEvaluator != nil { - err = ErrAsOf.FastGenWithCause("as of timestamp can't be set after set transaction read only as of.") - return - } - snapshotTS = vars.TxnReadTS.UseTxnReadTS() - isStaleness = true - readReplicaScope = config.GetTxnScopeFromConfig() - return - } - // It means we meet following case: - // 1. prepare p from 'select * from t as of timestamp ts1' - // 1. begin - // 2. execute prepare p - // The execute statement would be refused due to timestamp conflict - if preparedObj.SnapshotTSEvaluator != nil { - if vars.InTxn() { - err = ErrAsOf.FastGenWithCause("as of timestamp can't be set in transaction.") - return - } - // if preparedObj.SnapshotTSEvaluator != nil, it is a stale read SQL: - // which means its infoschema is specified by the SQL, not the current/latest infoschema - snapshotTS, err = preparedObj.SnapshotTSEvaluator(sctx) - if err != nil { - err = errors.Trace(err) - return - } - isStaleness = true - readReplicaScope = config.GetTxnScopeFromConfig() - return - } - // It means we meet following case: - // 1. prepare p from 'select * from t' - // 1. start transaction read only as of timestamp ts1 - // 2. execute prepare p - if vars.InTxn() && vars.TxnCtx.IsStaleness { - isStaleness = true - snapshotTS = vars.TxnCtx.StartTS - readReplicaScope = vars.TxnCtx.TxnScope - return - } - return -} - func (e *Execute) checkPreparedPriv(ctx context.Context, sctx sessionctx.Context, preparedObj *CachedPrepareStmt, is infoschema.InfoSchema) error { if pm := privilege.GetPrivilegeManager(sctx); pm != nil { @@ -1593,11 +1502,11 @@ func (e *Explain) getOperatorInfo(p Plan, id string) (string, string, string, st } var accessObject, operatorInfo string if plan, ok := p.(dataAccesser); ok { - accessObject = plan.AccessObject(false) + accessObject = plan.AccessObject().String() operatorInfo = plan.OperatorInfo(false) } else { if pa, ok := p.(partitionAccesser); ok && e.ctx != nil { - accessObject = pa.accessObject(e.ctx) + accessObject = pa.accessObject(e.ctx).String() } operatorInfo = p.ExplainInfo() } diff --git a/planner/core/explain.go b/planner/core/explain.go index 6d8512d5dd94a..e03bd43ceee2a 100644 --- a/planner/core/explain.go +++ b/planner/core/explain.go @@ -22,38 +22,18 @@ import ( "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" - "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" - "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/planner/property" "github.com/pingcap/tidb/planner/util" - "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/statistics" - "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/plancodec" "github.com/pingcap/tidb/util/stringutil" "github.com/pingcap/tipb/go-tipb" ) -// A plan is dataAccesser means it can access underlying data. -// Include `PhysicalTableScan`, `PhysicalIndexScan`, `PointGetPlan`, `BatchPointScan` and `PhysicalMemTable`. -// ExplainInfo = AccessObject + OperatorInfo -type dataAccesser interface { - - // AccessObject return plan's `table`, `partition` and `index`. - AccessObject(normalized bool) string - - // OperatorInfo return other operator information to be explained. - OperatorInfo(normalized bool) string -} - -type partitionAccesser interface { - accessObject(sessionctx.Context) string -} - // ExplainInfo implements Plan interface. func (p *PhysicalLock) ExplainInfo() string { var str strings.Builder @@ -83,47 +63,12 @@ func (p *PhysicalIndexScan) TP() string { // ExplainInfo implements Plan interface. func (p *PhysicalIndexScan) ExplainInfo() string { - return p.AccessObject(false) + ", " + p.OperatorInfo(false) + return p.AccessObject().String() + ", " + p.OperatorInfo(false) } // ExplainNormalizedInfo implements Plan interface. func (p *PhysicalIndexScan) ExplainNormalizedInfo() string { - return p.AccessObject(true) + ", " + p.OperatorInfo(true) -} - -// AccessObject implements dataAccesser interface. -func (p *PhysicalIndexScan) AccessObject(normalized bool) string { - var buffer strings.Builder - tblName := p.Table.Name.O - if p.TableAsName != nil && p.TableAsName.O != "" { - tblName = p.TableAsName.O - } - buffer.WriteString("table:") - buffer.WriteString(tblName) - if p.isPartition { - if normalized { - buffer.WriteString(", partition:?") - } else if pi := p.Table.GetPartitionInfo(); pi != nil { - partitionName := pi.GetNameByID(p.physicalTableID) - buffer.WriteString(", partition:") - buffer.WriteString(partitionName) - } - } - if len(p.Index.Columns) > 0 { - buffer.WriteString(", index:" + p.Index.Name.O + "(") - for i, idxCol := range p.Index.Columns { - if tblCol := p.Table.Columns[idxCol.Offset]; tblCol.Hidden { - buffer.WriteString(tblCol.GeneratedExprString) - } else { - buffer.WriteString(idxCol.Name.O) - } - if i+1 < len(p.Index.Columns) { - buffer.WriteString(", ") - } - } - buffer.WriteString(")") - } - return buffer.String() + return p.AccessObject().NormalizedString() + ", " + p.OperatorInfo(true) } // OperatorInfo implements dataAccesser interface. @@ -215,33 +160,12 @@ func (p *PhysicalTableScan) TP() string { // ExplainInfo implements Plan interface. func (p *PhysicalTableScan) ExplainInfo() string { - return p.AccessObject(false) + ", " + p.OperatorInfo(false) + return p.AccessObject().String() + ", " + p.OperatorInfo(false) } // ExplainNormalizedInfo implements Plan interface. func (p *PhysicalTableScan) ExplainNormalizedInfo() string { - return p.AccessObject(true) + ", " + p.OperatorInfo(true) -} - -// AccessObject implements dataAccesser interface. -func (p *PhysicalTableScan) AccessObject(normalized bool) string { - var buffer strings.Builder - tblName := p.Table.Name.O - if p.TableAsName != nil && p.TableAsName.O != "" { - tblName = p.TableAsName.O - } - buffer.WriteString("table:") - buffer.WriteString(tblName) - if p.isPartition { - if normalized { - buffer.WriteString(", partition:?") - } else if pi := p.Table.GetPartitionInfo(); pi != nil { - partitionName := pi.GetNameByID(p.physicalTableID) - buffer.WriteString(", partition:") - buffer.WriteString(partitionName) - } - } - return buffer.String() + return p.AccessObject().NormalizedString() + ", " + p.OperatorInfo(true) } // OperatorInfo implements dataAccesser interface. @@ -333,93 +257,6 @@ func (p *PhysicalTableReader) ExplainNormalizedInfo() string { return "" } -func getAccessObjectForTableScan(sctx sessionctx.Context, ts *PhysicalTableScan, partitionInfo PartitionInfo) string { - pi := ts.Table.GetPartitionInfo() - if pi == nil || !sctx.GetSessionVars().UseDynamicPartitionPrune() { - return "" - } - - is := sctx.GetInfoSchema().(infoschema.InfoSchema) - tmp, ok := is.TableByID(ts.Table.ID) - if !ok { - return "partition table not found" + strconv.FormatInt(ts.Table.ID, 10) - } - tbl := tmp.(table.PartitionedTable) - - return partitionAccessObject(sctx, tbl, pi, &partitionInfo) -} - -func (p *PhysicalTableReader) accessObject(sctx sessionctx.Context) string { - if !sctx.GetSessionVars().UseDynamicPartitionPrune() { - return "" - } - if len(p.PartitionInfos) == 0 { - ts := p.TablePlans[0].(*PhysicalTableScan) - return getAccessObjectForTableScan(sctx, ts, p.PartitionInfo) - } - if len(p.PartitionInfos) == 1 { - return getAccessObjectForTableScan(sctx, p.PartitionInfos[0].tableScan, p.PartitionInfos[0].partitionInfo) - } - containsPartitionTable := false - for _, info := range p.PartitionInfos { - if info.tableScan.Table.GetPartitionInfo() != nil { - containsPartitionTable = true - break - } - } - if !containsPartitionTable { - return "" - } - var buffer bytes.Buffer - for index, info := range p.PartitionInfos { - if index > 0 { - buffer.WriteString(", ") - } - - tblName := info.tableScan.Table.Name.O - if info.tableScan.TableAsName != nil && info.tableScan.TableAsName.O != "" { - tblName = info.tableScan.TableAsName.O - } - - if info.tableScan.Table.GetPartitionInfo() == nil { - buffer.WriteString("table of ") - buffer.WriteString(tblName) - continue - } - buffer.WriteString(getAccessObjectForTableScan(sctx, info.tableScan, info.partitionInfo)) - buffer.WriteString(" of ") - buffer.WriteString(tblName) - } - return buffer.String() -} - -func partitionAccessObject(sctx sessionctx.Context, tbl table.PartitionedTable, pi *model.PartitionInfo, partTable *PartitionInfo) string { - var buffer bytes.Buffer - idxArr, err := PartitionPruning(sctx, tbl, partTable.PruningConds, partTable.PartitionNames, partTable.Columns, partTable.ColumnNames) - if err != nil { - return "partition pruning error" + err.Error() - } - - if len(idxArr) == 0 { - return "partition:dual" - } - - if len(idxArr) == 1 && idxArr[0] == FullRange { - return "partition:all" - } - - for i, idx := range idxArr { - if i == 0 { - buffer.WriteString("partition:") - } else { - buffer.WriteString(",") - } - buffer.WriteString(pi.Definitions[idx].Name.O) - } - - return buffer.String() -} - // OperatorInfo return other operator information to be explained. func (p *PhysicalTableReader) OperatorInfo(normalized bool) string { return "data:" + p.tablePlan.ExplainID().String() @@ -435,26 +272,6 @@ func (p *PhysicalIndexReader) ExplainNormalizedInfo() string { return "index:" + p.indexPlan.TP() } -func (p *PhysicalIndexReader) accessObject(sctx sessionctx.Context) string { - ts := p.IndexPlans[0].(*PhysicalIndexScan) - pi := ts.Table.GetPartitionInfo() - if pi == nil || !sctx.GetSessionVars().UseDynamicPartitionPrune() { - return "" - } - - var buffer strings.Builder - is := sctx.GetInfoSchema().(infoschema.InfoSchema) - tmp, ok := is.TableByID(ts.Table.ID) - if !ok { - buffer.WriteString("partition table not found: ") - buffer.WriteString(strconv.FormatInt(ts.Table.ID, 10)) - return buffer.String() - } - - tbl := tmp.(table.PartitionedTable) - return partitionAccessObject(sctx, tbl, pi, &p.PartitionInfo) -} - // ExplainInfo implements Plan interface. func (p *PhysicalIndexLookUpReader) ExplainInfo() string { var str strings.Builder @@ -469,48 +286,11 @@ func (p *PhysicalIndexLookUpReader) ExplainInfo() string { return str.String() } -func (p *PhysicalIndexLookUpReader) accessObject(sctx sessionctx.Context) string { - ts := p.TablePlans[0].(*PhysicalTableScan) - pi := ts.Table.GetPartitionInfo() - if pi == nil || !sctx.GetSessionVars().UseDynamicPartitionPrune() { - return "" - } - - is := sctx.GetInfoSchema().(infoschema.InfoSchema) - tmp, ok := is.TableByID(ts.Table.ID) - if !ok { - var buffer strings.Builder - buffer.WriteString("partition table not found: ") - buffer.WriteString(strconv.FormatInt(ts.Table.ID, 10)) - return buffer.String() - } - - tbl := tmp.(table.PartitionedTable) - return partitionAccessObject(sctx, tbl, pi, &p.PartitionInfo) -} - // ExplainInfo implements Plan interface. func (p *PhysicalIndexMergeReader) ExplainInfo() string { return "" } -func (p *PhysicalIndexMergeReader) accessObject(sctx sessionctx.Context) string { - ts := p.TablePlans[0].(*PhysicalTableScan) - pi := ts.Table.GetPartitionInfo() - if pi == nil || !sctx.GetSessionVars().UseDynamicPartitionPrune() { - return "" - } - - is := sctx.GetInfoSchema().(infoschema.InfoSchema) - tmp, ok := is.TableByID(ts.Table.ID) - if !ok { - return "partition table not found" + strconv.FormatInt(ts.Table.ID, 10) - } - tbl := tmp.(table.PartitionedTable) - - return partitionAccessObject(sctx, tbl, pi, &p.PartitionInfo) -} - // ExplainInfo implements Plan interface. func (p *PhysicalUnionScan) ExplainInfo() string { return string(expression.SortedExplainExpressionList(p.Conditions)) @@ -518,7 +298,11 @@ func (p *PhysicalUnionScan) ExplainInfo() string { // ExplainInfo implements Plan interface. func (p *PhysicalSelection) ExplainInfo() string { - return string(expression.SortedExplainExpressionList(p.Conditions)) + exprStr := string(expression.SortedExplainExpressionList(p.Conditions)) + if p.TiFlashFineGrainedShuffleStreamCount > 0 { + exprStr += fmt.Sprintf(", stream_count: %d", p.TiFlashFineGrainedShuffleStreamCount) + } + return exprStr } // ExplainNormalizedInfo implements Plan interface. @@ -528,7 +312,11 @@ func (p *PhysicalSelection) ExplainNormalizedInfo() string { // ExplainInfo implements Plan interface. func (p *PhysicalProjection) ExplainInfo() string { - return expression.ExplainExpressionList(p.Exprs, p.schema) + exprStr := expression.ExplainExpressionList(p.Exprs, p.schema) + if p.TiFlashFineGrainedShuffleStreamCount > 0 { + exprStr += fmt.Sprintf(", stream_count: %d", p.TiFlashFineGrainedShuffleStreamCount) + } + return exprStr } // ExplainNormalizedInfo implements Plan interface. @@ -547,7 +335,11 @@ func (p *PhysicalTableDual) ExplainInfo() string { // ExplainInfo implements Plan interface. func (p *PhysicalSort) ExplainInfo() string { buffer := bytes.NewBufferString("") - return explainByItems(buffer, p.ByItems).String() + buffer = explainByItems(buffer, p.ByItems) + if p.TiFlashFineGrainedShuffleStreamCount > 0 { + buffer.WriteString(fmt.Sprintf(", stream_count: %d", p.TiFlashFineGrainedShuffleStreamCount)) + } + return buffer.String() } // ExplainInfo implements Plan interface. @@ -867,6 +659,9 @@ func (p *PhysicalWindow) ExplainInfo() string { p.formatFrameBound(buffer, p.Frame.End) } buffer.WriteString(")") + if p.TiFlashFineGrainedShuffleStreamCount > 0 { + buffer.WriteString(fmt.Sprintf(", stream_count: %d", p.TiFlashFineGrainedShuffleStreamCount)) + } return buffer.String() } @@ -995,9 +790,20 @@ func (p *PhysicalExchangeSender) ExplainInfo() string { } fmt.Fprintf(buffer, "]") } + if p.TiFlashFineGrainedShuffleStreamCount > 0 { + buffer.WriteString(fmt.Sprintf(", stream_count: %d", p.TiFlashFineGrainedShuffleStreamCount)) + } return buffer.String() } +// ExplainInfo implements Plan interface. +func (p *PhysicalExchangeReceiver) ExplainInfo() (res string) { + if p.TiFlashFineGrainedShuffleStreamCount > 0 { + res = fmt.Sprintf("stream_count: %d", p.TiFlashFineGrainedShuffleStreamCount) + } + return res +} + // ExplainInfo implements Plan interface. func (p *LogicalUnionScan) ExplainInfo() string { buffer := bytes.NewBufferString("") @@ -1110,18 +916,13 @@ const MetricTableTimeFormat = "2006-01-02 15:04:05.999" // ExplainInfo implements Plan interface. func (p *PhysicalMemTable) ExplainInfo() string { - accessObject, operatorInfo := p.AccessObject(false), p.OperatorInfo(false) + accessObject, operatorInfo := p.AccessObject().String(), p.OperatorInfo(false) if len(operatorInfo) == 0 { return accessObject } return accessObject + ", " + operatorInfo } -// AccessObject implements dataAccesser interface. -func (p *PhysicalMemTable) AccessObject(_ bool) string { - return "table:" + p.Table.Name.O -} - // OperatorInfo implements dataAccesser interface. func (p *PhysicalMemTable) OperatorInfo(_ bool) string { if p.Extractor != nil { diff --git a/planner/core/fragment_test.go b/planner/core/fragment_test.go index fa8ec9e99763c..ee01ddf8133c4 100644 --- a/planner/core/fragment_test.go +++ b/planner/core/fragment_test.go @@ -15,10 +15,10 @@ package core import ( + "testing" + "github.com/pingcap/tipb/go-tipb" "github.com/stretchr/testify/require" - - "testing" ) func TestFragmentInitSingleton(t *testing.T) { diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index 9fe9a7192cdcb..63e9d118507a3 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -17,6 +17,7 @@ package core_test import ( "bytes" "fmt" + "regexp" "strconv" "strings" "testing" @@ -177,9 +178,6 @@ func TestPushLimitDownIndexLookUpReader(t *testing.T) { tk.MustExec("insert into tbl values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5)") tk.MustExec("analyze table tbl") - // When paging is enabled, there would be a 'paging: true' in the explain result. - tk.MustExec("set @@tidb_enable_paging = off") - var input []string var output []struct { SQL string @@ -3685,9 +3683,6 @@ func TestExtendedStatsSwitch(t *testing.T) { "1.000000 1", )) - // When paging is enabled, there would be a 'paging: true' in the explain result. - tk.MustExec("set @@tidb_enable_paging = off") - // Estimated index scan count is 4 using extended stats. tk.MustQuery("explain format = 'brief' select * from t use index(b) where a > 3 order by b limit 1").Check(testkit.Rows( "Limit 1.00 root offset:0, count:1", @@ -4557,9 +4552,6 @@ func TestLimitIndexLookUpKeepOrder(t *testing.T) { tk.MustExec("drop table if exists t;") tk.MustExec("create table t(a int, b int, c int, d int, index idx(a,b,c));") - // When paging is enabled, there would be a 'paging: true' in the explain result. - tk.MustExec("set @@tidb_enable_paging = off") - var input []string var output []struct { SQL string @@ -4601,6 +4593,34 @@ func TestDecorrelateInnerJoinInSubquery(t *testing.T) { } } +func TestDecorrelateLimitInSubquery(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + + tk.MustExec("use test") + tk.MustExec("drop table if exists test") + tk.MustExec("create table test(id int, value int)") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(c int)") + tk.MustExec("insert t values(10), (8), (7), (9), (11)") + + var input []string + var output []struct { + SQL string + Plan []string + } + integrationSuiteData := core.GetIntegrationSuiteData() + integrationSuiteData.GetTestCases(t, &input, &output) + for i, tt := range input { + testdata.OnRecord(func() { + output[i].SQL = tt + output[i].Plan = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) + }) + tk.MustQuery(tt).Check(testkit.Rows(output[i].Plan...)) + } +} + func TestIndexMergeTableFilter(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() @@ -4758,9 +4778,6 @@ func TestMultiColMaxOneRow(t *testing.T) { tk.MustExec("create table t1(a int)") tk.MustExec("create table t2(a int, b int, c int, primary key(a,b))") - // When paging is enabled, there would be a 'paging: true' in the explain result. - tk.MustExec("set @@tidb_enable_paging = off") - var input []string var output []struct { SQL string @@ -5533,8 +5550,6 @@ func TestPreferRangeScanForUnsignedIntHandle(t *testing.T) { // Default RPC encoding may cause statistics explain result differ and then the test unstable. tk.MustExec("set @@tidb_enable_chunk_rpc = on") - // When paging is enabled, there would be a 'paging: true' in the explain result. - tk.MustExec("set @@tidb_enable_paging = off") var input []string var output []struct { @@ -5574,9 +5589,6 @@ func TestIssue27083(t *testing.T) { require.Nil(t, do.StatsHandle().DumpStatsDeltaToKV(handle.DumpAll)) tk.MustExec("analyze table t") - // When paging is enabled, there would be a 'paging: true' in the explain result. - tk.MustExec("set @@tidb_enable_paging = off") - var input []string var output []struct { SQL string @@ -6536,6 +6548,107 @@ func TestTiFlashPartitionTableScan(t *testing.T) { tk.MustExec("drop table hp_t;") } +func TestTiFlashFineGrainedShuffle(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@tidb_isolation_read_engines = 'tiflash'") + tk.MustExec("set @@tidb_enforce_mpp = on") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t1(c1 int, c2 int)") + + tbl1, err := dom.InfoSchema().TableByName(model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t1", L: "t1"}) + require.NoError(t, err) + // Set the hacked TiFlash replica for explain tests. + tbl1.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} + var input []string + var output []struct { + SQL string + Plan []string + } + integrationSuiteData := core.GetIntegrationSuiteData() + integrationSuiteData.GetTestCases(t, &input, &output) + for i, tt := range input { + testdata.OnRecord(func() { + output[i].SQL = tt + output[i].Plan = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) + }) + tk.MustQuery(tt).Check(testkit.Rows(output[i].Plan...)) + } +} + +func TestTiFlashFineGrainedShuffleWithMaxTiFlashThreads(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@tidb_isolation_read_engines = 'tiflash'") + tk.MustExec("set @@tidb_enforce_mpp = on") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t1(c1 int, c2 int)") + tbl1, err := dom.InfoSchema().TableByName(model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t1", L: "t1"}) + require.NoError(t, err) + // Set the hacked TiFlash replica for explain tests. + tbl1.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} + + sql := "explain select row_number() over w1 from t1 window w1 as (partition by c1);" + + getStreamCountFromExplain := func(rows [][]interface{}) (res []uint64) { + re := regexp.MustCompile("stream_count: ([0-9]+)") + for _, row := range rows { + buf := bytes.NewBufferString("") + _, _ = fmt.Fprintf(buf, "%s\n", row) + if matched := re.FindStringSubmatch(buf.String()); matched != nil { + require.Equal(t, len(matched), 2) + c, err := strconv.ParseUint(matched[1], 10, 64) + require.NoError(t, err) + res = append(res, c) + } + } + return res + } + + // tiflash_fine_grained_shuffle_stream_count should be same with tidb_max_tiflash_threads. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = -1") + tk.MustExec("set @@tidb_max_tiflash_threads = 10") + rows := tk.MustQuery(sql).Rows() + streamCount := getStreamCountFromExplain(rows) + // require.Equal(t, len(streamCount), 1) + require.Equal(t, uint64(10), streamCount[0]) + + // tiflash_fine_grained_shuffle_stream_count should be default value when tidb_max_tiflash_threads is -1. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = -1") + tk.MustExec("set @@tidb_max_tiflash_threads = -1") + rows = tk.MustQuery(sql).Rows() + streamCount = getStreamCountFromExplain(rows) + // require.Equal(t, len(streamCount), 1) + require.Equal(t, uint64(variable.DefStreamCountWhenMaxThreadsNotSet), streamCount[0]) + + // tiflash_fine_grained_shuffle_stream_count should be default value when tidb_max_tiflash_threads is 0. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = -1") + tk.MustExec("set @@tidb_max_tiflash_threads = 0") + rows = tk.MustQuery(sql).Rows() + streamCount = getStreamCountFromExplain(rows) + // require.Equal(t, len(streamCount), 1) + require.Equal(t, uint64(variable.DefStreamCountWhenMaxThreadsNotSet), streamCount[0]) + + // Disabled when tiflash_fine_grained_shuffle_stream_count is 0. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = 0") + tk.MustExec("set @@tidb_max_tiflash_threads = 10") + rows = tk.MustQuery(sql).Rows() + streamCount = getStreamCountFromExplain(rows) + require.Equal(t, len(streamCount), 0) + + // Test when tiflash_fine_grained_shuffle_stream_count is greater than 0. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = 16") + tk.MustExec("set @@tidb_max_tiflash_threads = 10") + rows = tk.MustQuery(sql).Rows() + streamCount = getStreamCountFromExplain(rows) + // require.Equal(t, len(streamCount), 1) + require.Equal(t, uint64(16), streamCount[0]) +} + func TestIssue33175(t *testing.T) { store, _, clean := testkit.CreateMockStoreAndDomain(t) defer clean() diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 69f067e10fec7..92c2a3dbcfd80 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -5179,18 +5179,21 @@ func CheckUpdateList(assignFlags []int, updt *Update, newTblID2Table map[int64]t } for i, col := range tbl.WritableCols() { - if flags[i] >= 0 && col.State != model.StatePublic { + if flags[i] < 0 { + continue + } + + if col.State != model.StatePublic { return ErrUnknownColumn.GenWithStackByArgs(col.Name, clauseMsg[fieldList]) } - if flags[i] >= 0 { - update = true - if mysql.HasPriKeyFlag(col.GetFlag()) { - updatePK = true - } - for _, partColName := range partitionColumnNames { - if col.Name.L == partColName.L { - updatePartitionCol = true - } + + update = true + if mysql.HasPriKeyFlag(col.GetFlag()) { + updatePK = true + } + for _, partColName := range partitionColumnNames { + if col.Name.L == partColName.L { + updatePartitionCol = true } } } diff --git a/planner/core/logical_plan_test.go b/planner/core/logical_plan_test.go index b6f279f81c4e5..0a73522baa9a3 100644 --- a/planner/core/logical_plan_test.go +++ b/planner/core/logical_plan_test.go @@ -55,6 +55,7 @@ func createPlannerSuite() (s *plannerSuite) { MockRangePartitionTable(), MockHashPartitionTable(), MockListPartitionTable(), + MockStateNoneColumnTable(), } id := int64(0) for _, tblInfo := range tblInfos { @@ -911,6 +912,10 @@ func TestValidate(t *testing.T) { sql: "select a+1 from t having t.a", err: ErrUnknownColumn, }, + { + sql: "update T_StateNoneColumn set c = 1 where a = 1", + err: ErrUnknownColumn, + }, } s := createPlannerSuite() diff --git a/planner/core/memtable_predicate_extractor_test.go b/planner/core/memtable_predicate_extractor_test.go index 97898259e1113..c77cfeb87f2d3 100644 --- a/planner/core/memtable_predicate_extractor_test.go +++ b/planner/core/memtable_predicate_extractor_test.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/util/hint" "github.com/pingcap/tidb/util/set" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" ) func getLogicalMemTable(t *testing.T, dom *domain.Domain, se session.Session, parser *parser.Parser, sql string) *plannercore.LogicalMemTable { @@ -1736,9 +1737,7 @@ func TestTikvRegionStatusExtractor(t *testing.T) { require.NotNil(t, logicalMemTable.Extractor) rse := logicalMemTable.Extractor.(*plannercore.TiKVRegionStatusExtractor) tableids := rse.GetTablesID() - sort.Slice(tableids, func(i, j int) bool { - return tableids[i] < tableids[j] - }) + slices.Sort(tableids) require.Equal(t, ca.tableIDs, tableids) } } diff --git a/planner/core/mock.go b/planner/core/mock.go index 159eb67ee44bf..fb554878862c1 100644 --- a/planner/core/mock.go +++ b/planner/core/mock.go @@ -553,3 +553,55 @@ func MockListPartitionTable() *model.TableInfo { tableInfo.Partition = partition return tableInfo } + +// MockStateNoneColumnTable is only used for plan related tests. +func MockStateNoneColumnTable() *model.TableInfo { + // column: a, b + // PK: a + // indeices: b + indices := []*model.IndexInfo{ + { + Name: model.NewCIStr("b"), + Columns: []*model.IndexColumn{ + { + Name: model.NewCIStr("b"), + Length: types.UnspecifiedLength, + Offset: 1, + }, + }, + State: model.StatePublic, + Unique: true, + }, + } + pkColumn := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 0, + Name: model.NewCIStr("a"), + FieldType: newLongType(), + ID: 1, + } + col0 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 1, + Name: model.NewCIStr("b"), + FieldType: newLongType(), + ID: 2, + } + col1 := &model.ColumnInfo{ + State: model.StateNone, + Offset: 2, + Name: model.NewCIStr("c"), + FieldType: newLongType(), + ID: 3, + } + pkColumn.SetFlag(mysql.PriKeyFlag | mysql.NotNullFlag | mysql.UnsignedFlag) + col0.SetFlag(mysql.NotNullFlag) + col1.SetFlag(mysql.UnsignedFlag) + table := &model.TableInfo{ + Columns: []*model.ColumnInfo{pkColumn, col0, col1}, + Indices: indices, + Name: model.NewCIStr("T_StateNoneColumn"), + PKIsHandle: true, + } + return table +} diff --git a/planner/core/optimizer.go b/planner/core/optimizer.go index fde76b3a41eec..20d4fd598e701 100644 --- a/planner/core/optimizer.go +++ b/planner/core/optimizer.go @@ -37,6 +37,7 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/set" "github.com/pingcap/tidb/util/tracing" + "github.com/pingcap/tipb/go-tipb" "go.uber.org/atomic" "go.uber.org/zap" "golang.org/x/exp/slices" @@ -373,10 +374,139 @@ func postOptimize(sctx sessionctx.Context, plan PhysicalPlan) PhysicalPlan { mergeContinuousSelections(plan) plan = eliminateUnionScanAndLock(sctx, plan) plan = enableParallelApply(sctx, plan) + handleFineGrainedShuffle(sctx, plan) checkPlanCacheable(sctx, plan) return plan } +// Only for MPP(Window<-[Sort]<-ExchangeReceiver<-ExchangeSender). +// TiFlashFineGrainedShuffleStreamCount: +// == 0: fine grained shuffle is disabled. +// > 0: use TiFlashFineGrainedShuffleStreamCount as stream count. +// < 0: use TiFlashMaxThreads as stream count when it's greater than 0. Otherwise use DefStreamCountWhenMaxThreadsNotSet. +func handleFineGrainedShuffle(sctx sessionctx.Context, plan PhysicalPlan) { + streamCount := sctx.GetSessionVars().TiFlashFineGrainedShuffleStreamCount + if streamCount == 0 { + return + } + if streamCount < 0 { + if sctx.GetSessionVars().TiFlashMaxThreads > 0 { + streamCount = sctx.GetSessionVars().TiFlashMaxThreads + } else { + streamCount = variable.DefStreamCountWhenMaxThreadsNotSet + } + } + setupFineGrainedShuffle(uint64(streamCount), plan) +} + +func setupFineGrainedShuffle(streamCount uint64, plan PhysicalPlan) { + if tableReader, ok := plan.(*PhysicalTableReader); ok { + if _, isExchangeSender := tableReader.tablePlan.(*PhysicalExchangeSender); isExchangeSender { + helper := fineGrainedShuffleHelper{shuffleTarget: unknown, plans: make([]*basePhysicalPlan, 1)} + setupFineGrainedShuffleInternal(tableReader.tablePlan, &helper, streamCount) + } + } else { + for _, child := range plan.Children() { + setupFineGrainedShuffle(streamCount, child) + } + } +} + +type shuffleTarget uint8 + +const ( + unknown shuffleTarget = iota + window + joinBuild +) + +type fineGrainedShuffleHelper struct { + shuffleTarget shuffleTarget + plans []*basePhysicalPlan +} + +func (h *fineGrainedShuffleHelper) clear() { + h.shuffleTarget = unknown + h.plans = h.plans[:0] +} + +func (h *fineGrainedShuffleHelper) updateTarget(t shuffleTarget, p *basePhysicalPlan) { + h.shuffleTarget = t + h.plans = append(h.plans, p) +} + +func setupFineGrainedShuffleInternal(plan PhysicalPlan, helper *fineGrainedShuffleHelper, streamCount uint64) { + switch x := plan.(type) { + case *PhysicalWindow: + // Do not clear the plans because window executor will keep the data partition. + // For non hash partition window function, there will be a passthrough ExchangeSender to collect data, + // which will break data partition. + helper.updateTarget(window, &x.basePhysicalPlan) + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + case *PhysicalSort: + if x.IsPartialSort { + // Partial sort will keep the data partition. + helper.plans = append(helper.plans, &x.basePhysicalPlan) + } else { + // Global sort will break the data partition. + helper.clear() + } + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + case *PhysicalSelection: + helper.plans = append(helper.plans, &x.basePhysicalPlan) + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + case *PhysicalProjection: + helper.plans = append(helper.plans, &x.basePhysicalPlan) + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + case *PhysicalExchangeReceiver: + helper.plans = append(helper.plans, &x.basePhysicalPlan) + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + case *PhysicalHashAgg: + // HashAgg is not implemented for now. + helper.clear() + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + case *PhysicalHashJoin: + child0 := x.children[0] + child1 := x.children[1] + if x.InnerChildIdx == 0 { + // Child0 is build side. + child0Helper := fineGrainedShuffleHelper{shuffleTarget: joinBuild, plans: []*basePhysicalPlan{}} + setupFineGrainedShuffleInternal(child0, &child0Helper, streamCount) + + // HashJoin is not implemented for now. + helper.clear() + setupFineGrainedShuffleInternal(child1, helper, streamCount) + } else { + // Child1 is build side. + child1Helper := fineGrainedShuffleHelper{shuffleTarget: joinBuild, plans: []*basePhysicalPlan{}} + setupFineGrainedShuffleInternal(child1, &child1Helper, streamCount) + + // HashJoin is not implemented for now. + helper.clear() + setupFineGrainedShuffleInternal(child0, helper, streamCount) + } + case *PhysicalExchangeSender: + if x.ExchangeType == tipb.ExchangeType_Hash { + if helper.shuffleTarget == window { + // Set up stream count for all plans based on shuffle target type. + // Currently, only enable fine grained shuffle if the shuffle target is window. + x.TiFlashFineGrainedShuffleStreamCount = streamCount + for _, p := range helper.plans { + p.TiFlashFineGrainedShuffleStreamCount = streamCount + } + } + } + // exchange sender will break the data partition. + helper.clear() + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + default: + for _, child := range x.Children() { + childHelper := fineGrainedShuffleHelper{shuffleTarget: unknown, plans: []*basePhysicalPlan{}} + setupFineGrainedShuffleInternal(child, &childHelper, streamCount) + } + } +} + // checkPlanCacheable used to check whether a plan can be cached. Plans that // meet the following characteristics cannot be cached: // 1. Use the TiFlash engine. diff --git a/planner/core/optimizer_test.go b/planner/core/optimizer_test.go index cc742c747b406..dd8a41bbab1f3 100644 --- a/planner/core/optimizer_test.go +++ b/planner/core/optimizer_test.go @@ -15,10 +15,13 @@ package core import ( + "reflect" "testing" "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/planner/property" "github.com/pingcap/tidb/types" + "github.com/pingcap/tipb/go-tipb" "github.com/stretchr/testify/require" ) @@ -102,3 +105,187 @@ func TestMPPJoinKeyTypeConvert(t *testing.T) { testJoinKeyTypeConvert(t, unsignedBigIntType, bigIntType, decimalType, true, true) testJoinKeyTypeConvert(t, bigIntType, unsignedBigIntType, decimalType, true, true) } + +// Test for core.handleFineGrainedShuffle() +func TestHandleFineGrainedShuffle(t *testing.T) { + sortItem := property.SortItem{ + Col: nil, + Desc: true, + } + var plans []*basePhysicalPlan + tableReader := &PhysicalTableReader{} + partWindow := &PhysicalWindow{ + // Meaningless sort item, just for test. + PartitionBy: []property.SortItem{sortItem}, + } + partialSort := &PhysicalSort{ + IsPartialSort: true, + } + sort := &PhysicalSort{} + recv := &PhysicalExchangeReceiver{} + passSender := &PhysicalExchangeSender{ + ExchangeType: tipb.ExchangeType_PassThrough, + } + hashSender := &PhysicalExchangeSender{ + ExchangeType: tipb.ExchangeType_Hash, + } + tableScan := &PhysicalTableScan{} + plans = append(plans, &partWindow.basePhysicalPlan) + plans = append(plans, &partialSort.basePhysicalPlan) + plans = append(plans, &sort.basePhysicalPlan) + plans = append(plans, &recv.basePhysicalPlan) + plans = append(plans, &hashSender.basePhysicalPlan) + clear := func(plans []*basePhysicalPlan) { + for _, p := range plans { + p.children = nil + p.TiFlashFineGrainedShuffleStreamCount = 0 + } + } + var check func(p PhysicalPlan, expStreamCount int64, expChildCount int, curChildCount int) + check = func(p PhysicalPlan, expStreamCount int64, expChildCount int, curChildCount int) { + if len(p.Children()) == 0 { + require.Equal(t, expChildCount, curChildCount) + _, isTableScan := p.(*PhysicalTableScan) + require.True(t, isTableScan) + return + } + val := reflect.ValueOf(p) + actStreamCount := reflect.Indirect(val).FieldByName("TiFlashFineGrainedShuffleStreamCount").Interface().(uint64) + require.Equal(t, uint64(expStreamCount), actStreamCount) + for _, child := range p.Children() { + check(child, expStreamCount, expChildCount, curChildCount+1) + } + } + + const expStreamCount int64 = 8 + sctx := MockContext() + sctx.GetSessionVars().TiFlashFineGrainedShuffleStreamCount = expStreamCount + + start := func(p PhysicalPlan, expStreamCount int64, expChildCount int, curChildCount int) { + handleFineGrainedShuffle(sctx, tableReader) + check(p, expStreamCount, expChildCount, curChildCount) + clear(plans) + } + + // Window <- Sort <- ExchangeReceiver <- ExchangeSender + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + partWindow.children = []PhysicalPlan{partialSort} + partialSort.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{tableScan} + start(partWindow, expStreamCount, 4, 0) + + // Window <- ExchangeReceiver <- ExchangeSender + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + partWindow.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{tableScan} + start(partWindow, expStreamCount, 3, 0) + + // Window <- Sort(x) <- ExchangeReceiver <- ExchangeSender + // Fine-grained shuffle is disabled because sort is not partial. + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + partWindow.children = []PhysicalPlan{sort} + sort.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{tableScan} + start(partWindow, 0, 4, 0) + + // Window <- Sort <- Window <- Sort <- ExchangeReceiver <- ExchangeSender + partWindow1 := &PhysicalWindow{ + // Meaningless sort item, just for test. + PartitionBy: []property.SortItem{sortItem}, + } + partialSort1 := &PhysicalSort{ + IsPartialSort: true, + } + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + partWindow.children = []PhysicalPlan{partialSort} + partialSort.children = []PhysicalPlan{partWindow1} + partWindow1.children = []PhysicalPlan{partialSort1} + partialSort1.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{tableScan} + start(partWindow, expStreamCount, 6, 0) + + // Window <- Sort <- Window(x) <- Sort <- ExchangeReceiver <- ExchangeSender(x) + // Fine-grained shuffle is disabled because Window is not hash partition. + nonPartWindow := &PhysicalWindow{} + partialSort1 = &PhysicalSort{ + IsPartialSort: true, + } + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + partWindow.children = []PhysicalPlan{partialSort} + partialSort.children = []PhysicalPlan{nonPartWindow} + nonPartWindow.children = []PhysicalPlan{partialSort1} + partialSort1.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{passSender} + passSender.children = []PhysicalPlan{tableScan} + start(partWindow, 0, 6, 0) + + // HashAgg <- Window <- ExchangeReceiver <- ExchangeSender + hashAgg := &PhysicalHashAgg{} + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{hashAgg} + hashAgg.children = []PhysicalPlan{partWindow} + partWindow.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{tableScan} + require.Equal(t, uint64(0), hashAgg.TiFlashFineGrainedShuffleStreamCount) + start(partWindow, expStreamCount, 3, 0) + + // Window <- HashAgg(x) <- ExchangeReceiver <- ExchangeSender + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + hashAgg = &PhysicalHashAgg{} + partWindow.children = []PhysicalPlan{hashAgg} + hashAgg.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{tableScan} + start(partWindow, 0, 4, 0) + + // Window <- Join(x) <- ExchangeReceiver <- ExchangeSender + // <- ExchangeReceiver <- ExchangeSender + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + hashJoin := &PhysicalHashJoin{} + recv1 := &PhysicalExchangeReceiver{} + tableScan1 := &PhysicalTableScan{} + partWindow.children = []PhysicalPlan{hashJoin} + hashSender1 := &PhysicalExchangeSender{ + ExchangeType: tipb.ExchangeType_Hash, + } + hashJoin.children = []PhysicalPlan{recv, recv1} + recv.children = []PhysicalPlan{hashSender} + recv1.children = []PhysicalPlan{hashSender1} + hashSender.children = []PhysicalPlan{tableScan} + hashSender1.children = []PhysicalPlan{tableScan1} + start(partWindow, 0, 4, 0) + + // Join <- ExchangeReceiver <- ExchangeSender <- Window <- ExchangeReceiver(2) <- ExchangeSender(2) + // <- ExchangeReceiver(1) <- ExchangeSender(1) + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + hashJoin = &PhysicalHashJoin{} + recv1 = &PhysicalExchangeReceiver{} + hashJoin.children = []PhysicalPlan{recv, recv1} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{partWindow} + recv2 := &PhysicalExchangeReceiver{} + hashSender2 := &PhysicalExchangeSender{ + ExchangeType: tipb.ExchangeType_Hash, + } + tableScan2 := &PhysicalTableScan{} + partWindow.children = []PhysicalPlan{recv2} + recv2.children = []PhysicalPlan{hashSender2} + hashSender2.children = []PhysicalPlan{tableScan2} + recv1.children = []PhysicalPlan{hashSender1} + tableScan1 = &PhysicalTableScan{} + hashSender1.children = []PhysicalPlan{tableScan1} + start(partWindow, expStreamCount, 3, 0) +} diff --git a/planner/core/physical_plan_test.go b/planner/core/physical_plan_test.go index 97b80f92251c1..fe5c5cba7da00 100644 --- a/planner/core/physical_plan_test.go +++ b/planner/core/physical_plan_test.go @@ -538,10 +538,6 @@ func TestEliminateMaxOneRow(t *testing.T) { tk.MustExec("create table t2(a int(11) DEFAULT NULL, b int(11) DEFAULT NULL)") tk.MustExec("create table t3(a int(11) DEFAULT NULL, b int(11) DEFAULT NULL, c int(11) DEFAULT NULL, UNIQUE KEY idx_abc (a, b, c))") - // When paging is used, there is a 'paging:true' makes the explain output differ. - // IndexLookUp 0.00 root paging:true - tk.MustExec("set @@tidb_enable_paging = off") - for i, ts := range input { testdata.OnRecord(func() { output[i].SQL = ts diff --git a/planner/core/physical_plans.go b/planner/core/physical_plans.go index c5c43df0a0a91..d867f0029a6bf 100644 --- a/planner/core/physical_plans.go +++ b/planner/core/physical_plans.go @@ -1547,11 +1547,6 @@ func (p *PhysicalCTE) ExtractCorrelatedCols() []*expression.CorrelatedColumn { return corCols } -// AccessObject implements physicalScan interface. -func (p *PhysicalCTE) AccessObject(normalized bool) string { - return fmt.Sprintf("CTE:%s", p.cteAsName.L) -} - // OperatorInfo implements dataAccesser interface. func (p *PhysicalCTE) OperatorInfo(normalized bool) string { return fmt.Sprintf("data:%s", (*CTEDefinition)(p).ExplainID()) @@ -1559,7 +1554,7 @@ func (p *PhysicalCTE) OperatorInfo(normalized bool) string { // ExplainInfo implements Plan interface. func (p *PhysicalCTE) ExplainInfo() string { - return p.AccessObject(false) + ", " + p.OperatorInfo(false) + return p.AccessObject().String() + ", " + p.OperatorInfo(false) } // ExplainID overrides the ExplainID. diff --git a/planner/core/plan.go b/planner/core/plan.go index aad8d06b68a7e..1dedfd05cf7e2 100644 --- a/planner/core/plan.go +++ b/planner/core/plan.go @@ -427,6 +427,11 @@ type basePhysicalPlan struct { // used by the new cost interface planCostInit bool planCost float64 + + // Only for MPP. If TiFlashFineGrainedShuffleStreamCount > 0: + // 1. For ExchangeSender, means its output will be partitioned by hash key. + // 2. For ExchangeReceiver/Window/Sort, means its input is already partitioned. + TiFlashFineGrainedShuffleStreamCount uint64 } // Cost implements PhysicalPlan interface. @@ -441,8 +446,9 @@ func (p *basePhysicalPlan) SetCost(cost float64) { func (p *basePhysicalPlan) cloneWithSelf(newSelf PhysicalPlan) (*basePhysicalPlan, error) { base := &basePhysicalPlan{ - basePlan: p.basePlan, - self: newSelf, + basePlan: p.basePlan, + self: newSelf, + TiFlashFineGrainedShuffleStreamCount: p.TiFlashFineGrainedShuffleStreamCount, } for _, child := range p.children { cloned, err := child.Clone() diff --git a/planner/core/plan_test.go b/planner/core/plan_test.go index ed05bcce2c429..003ca690a206d 100644 --- a/planner/core/plan_test.go +++ b/planner/core/plan_test.go @@ -892,3 +892,44 @@ func TestIssue34863(t *testing.T) { tk.MustQuery("select count(*) from c right join o on c.c_id=o.c_id;").Check(testkit.Rows("5")) tk.MustQuery("select count(o.c_id) from c right join o on c.c_id=o.c_id;").Check(testkit.Rows("5")) } + +func TestCloneFineGrainedShuffleStreamCount(t *testing.T) { + window := &core.PhysicalWindow{} + newPlan, err := window.Clone() + require.NoError(t, err) + newWindow, ok := newPlan.(*core.PhysicalWindow) + require.Equal(t, ok, true) + require.Equal(t, window.TiFlashFineGrainedShuffleStreamCount, newWindow.TiFlashFineGrainedShuffleStreamCount) + + window.TiFlashFineGrainedShuffleStreamCount = 8 + newPlan, err = window.Clone() + require.NoError(t, err) + newWindow, ok = newPlan.(*core.PhysicalWindow) + require.Equal(t, ok, true) + require.Equal(t, window.TiFlashFineGrainedShuffleStreamCount, newWindow.TiFlashFineGrainedShuffleStreamCount) + + sort := &core.PhysicalSort{} + newPlan, err = sort.Clone() + require.NoError(t, err) + newSort, ok := newPlan.(*core.PhysicalSort) + require.Equal(t, ok, true) + require.Equal(t, sort.TiFlashFineGrainedShuffleStreamCount, newSort.TiFlashFineGrainedShuffleStreamCount) + + sort.TiFlashFineGrainedShuffleStreamCount = 8 + newPlan, err = sort.Clone() + require.NoError(t, err) + newSort, ok = newPlan.(*core.PhysicalSort) + require.Equal(t, ok, true) + require.Equal(t, sort.TiFlashFineGrainedShuffleStreamCount, newSort.TiFlashFineGrainedShuffleStreamCount) +} + +// https://github.com/pingcap/tidb/issues/35527. +func TestTableDualAsSubQuery(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("CREATE VIEW v0(c0) AS SELECT NULL;") + tk.MustQuery("SELECT v0.c0 FROM v0 WHERE (v0.c0 IS NULL) LIKE(NULL);").Check(testkit.Rows()) + tk.MustQuery("SELECT v0.c0 FROM (SELECT null as c0) v0 WHERE (v0.c0 IS NULL) like (NULL);").Check(testkit.Rows()) +} diff --git a/planner/core/plan_to_pb.go b/planner/core/plan_to_pb.go index 7f93dd440b3fe..dbea51006c1dd 100644 --- a/planner/core/plan_to_pb.go +++ b/planner/core/plan_to_pb.go @@ -295,9 +295,11 @@ func (e *PhysicalExchangeSender) ToPB(ctx sessionctx.Context, storeType kv.Store } executorID := e.ExplainID().String() return &tipb.Executor{ - Tp: tipb.ExecType_TypeExchangeSender, - ExchangeSender: ecExec, - ExecutorId: &executorID, + Tp: tipb.ExecType_TypeExchangeSender, + ExchangeSender: ecExec, + ExecutorId: &executorID, + FineGrainedShuffleStreamCount: e.TiFlashFineGrainedShuffleStreamCount, + FineGrainedShuffleBatchSize: ctx.GetSessionVars().TiFlashFineGrainedShuffleBatchSize, }, nil } @@ -327,9 +329,11 @@ func (e *PhysicalExchangeReceiver) ToPB(ctx sessionctx.Context, storeType kv.Sto } executorID := e.ExplainID().String() return &tipb.Executor{ - Tp: tipb.ExecType_TypeExchangeReceiver, - ExchangeReceiver: ecExec, - ExecutorId: &executorID, + Tp: tipb.ExecType_TypeExchangeReceiver, + ExchangeReceiver: ecExec, + ExecutorId: &executorID, + FineGrainedShuffleStreamCount: e.TiFlashFineGrainedShuffleStreamCount, + FineGrainedShuffleBatchSize: ctx.GetSessionVars().TiFlashFineGrainedShuffleBatchSize, }, nil } @@ -540,7 +544,13 @@ func (p *PhysicalWindow) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (* return nil, errors.Trace(err) } executorID := p.ExplainID().String() - return &tipb.Executor{Tp: tipb.ExecType_TypeWindow, Window: windowExec, ExecutorId: &executorID}, nil + return &tipb.Executor{ + Tp: tipb.ExecType_TypeWindow, + Window: windowExec, + ExecutorId: &executorID, + FineGrainedShuffleStreamCount: p.TiFlashFineGrainedShuffleStreamCount, + FineGrainedShuffleBatchSize: ctx.GetSessionVars().TiFlashFineGrainedShuffleBatchSize, + }, nil } // ToPB implements PhysicalPlan ToPB interface. @@ -565,7 +575,13 @@ func (p *PhysicalSort) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*ti return nil, errors.Trace(err) } executorID := p.ExplainID().String() - return &tipb.Executor{Tp: tipb.ExecType_TypeSort, Sort: sortExec, ExecutorId: &executorID}, nil + return &tipb.Executor{ + Tp: tipb.ExecType_TypeSort, + Sort: sortExec, + ExecutorId: &executorID, + FineGrainedShuffleStreamCount: p.TiFlashFineGrainedShuffleStreamCount, + FineGrainedShuffleBatchSize: ctx.GetSessionVars().TiFlashFineGrainedShuffleBatchSize, + }, nil } // SetPBColumnsDefaultValue sets the default values of tipb.ColumnInfos. diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index b150788d36a56..4f574a6010b63 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -2261,7 +2261,8 @@ func (b *PlanBuilder) genV2AnalyzeOptions( func (b *PlanBuilder) getSavedAnalyzeOpts(physicalID int64, tblInfo *model.TableInfo) (map[ast.AnalyzeOptionType]uint64, model.ColumnChoice, []*model.ColumnInfo, error) { analyzeOptions := map[ast.AnalyzeOptionType]uint64{} exec := b.ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, "select sample_num,sample_rate,buckets,topn,column_choice,column_ids from mysql.analyze_options where table_id = %?", physicalID) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, "select sample_num,sample_rate,buckets,topn,column_choice,column_ids from mysql.analyze_options where table_id = %?", physicalID) if err != nil { return nil, model.DefaultChoice, nil, err } @@ -2933,17 +2934,17 @@ func (b *PlanBuilder) buildShow(ctx context.Context, show *ast.ShowStmt) (Plan, }.Init(b.ctx) isView := false isSequence := false + // It depends on ShowPredicateExtractor now + buildPattern := true switch show.Tp { case ast.ShowDatabases, ast.ShowVariables, ast.ShowTables, ast.ShowColumns, ast.ShowTableStatus, ast.ShowCollation: if (show.Tp == ast.ShowTables || show.Tp == ast.ShowTableStatus) && p.DBName == "" { return nil, ErrNoDB } - extractor := newShowBaseExtractor(*show) - if extractor.Extract() { + if extractor := newShowBaseExtractor(*show); extractor.Extract() { p.Extractor = extractor - // Avoid building Selection. - show.Pattern = nil + buildPattern = false } case ast.ShowCreateTable, ast.ShowCreateSequence, ast.ShowPlacementForTable, ast.ShowPlacementForPartition: var err error @@ -3019,7 +3020,8 @@ func (b *PlanBuilder) buildShow(ctx context.Context, show *ast.ShowStmt) (Plan, var err error var np LogicalPlan np = p - if show.Pattern != nil { + // If we have ShowPredicateExtractor, we do not buildSelection with Pattern + if show.Pattern != nil && buildPattern { show.Pattern.Expr = &ast.ColumnNameExpr{ Name: &ast.ColumnName{Name: p.OutputNames()[0].ColName}, } @@ -4645,12 +4647,20 @@ func buildShowSchema(s *ast.ShowStmt, isView bool, isSequence bool) (schema *exp case ast.ShowConfig: names = []string{"Type", "Instance", "Name", "Value"} case ast.ShowDatabases: - names = []string{"Database"} + fieldDB := "Database" + if patternName := extractPatternLikeName(s.Pattern); patternName != "" { + fieldDB = fmt.Sprintf("%s (%s)", fieldDB, patternName) + } + names = []string{fieldDB} case ast.ShowOpenTables: names = []string{"Database", "Table", "In_use", "Name_locked"} ftypes = []byte{mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeLong, mysql.TypeLong} case ast.ShowTables: - names = []string{fmt.Sprintf("Tables_in_%s", s.DBName)} + fieldTable := fmt.Sprintf("Tables_in_%s", s.DBName) + if patternName := extractPatternLikeName(s.Pattern); patternName != "" { + fieldTable = fmt.Sprintf("%s (%s)", fieldTable, patternName) + } + names = []string{fieldTable} if s.Full { names = append(names, "Table_type") } @@ -4870,3 +4880,14 @@ func (b *PlanBuilder) buildCompactTable(node *ast.CompactTableStmt) (Plan, error } return p, nil } + +func extractPatternLikeName(patternLike *ast.PatternLikeExpr) string { + if patternLike == nil { + return "" + } + switch v := patternLike.Pattern.(type) { + case *driver.ValueExpr: + return v.GetString() + } + return "" +} diff --git a/planner/core/planbuilder_test.go b/planner/core/planbuilder_test.go index e793d51a68f23..13494433f0fbe 100644 --- a/planner/core/planbuilder_test.go +++ b/planner/core/planbuilder_test.go @@ -21,7 +21,6 @@ import ( "strings" "testing" "unsafe" - _ "unsafe" // required by go:linkname "github.com/pingcap/errors" "github.com/pingcap/tidb/expression" diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index 01b6337ebc81f..85e120de5f340 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -128,7 +128,7 @@ func (p *PointGetPlan) Clone() (PhysicalPlan, error) { // ExplainInfo implements Plan interface. func (p *PointGetPlan) ExplainInfo() string { - accessObject, operatorInfo := p.AccessObject(false), p.OperatorInfo(false) + accessObject, operatorInfo := p.AccessObject().String(), p.OperatorInfo(false) if len(operatorInfo) == 0 { return accessObject } @@ -137,52 +137,13 @@ func (p *PointGetPlan) ExplainInfo() string { // ExplainNormalizedInfo implements Plan interface. func (p *PointGetPlan) ExplainNormalizedInfo() string { - accessObject, operatorInfo := p.AccessObject(true), p.OperatorInfo(true) + accessObject, operatorInfo := p.AccessObject().NormalizedString(), p.OperatorInfo(true) if len(operatorInfo) == 0 { return accessObject } return accessObject + ", " + operatorInfo } -// AccessObject implements dataAccesser interface. -func (p *PointGetPlan) AccessObject(normalized bool) string { - var buffer strings.Builder - tblName := p.TblInfo.Name.O - buffer.WriteString("table:") - buffer.WriteString(tblName) - if p.PartitionInfo != nil { - if normalized { - buffer.WriteString(", partition:?") - } else { - buffer.WriteString(", partition:") - buffer.WriteString(p.PartitionInfo.Name.O) - } - } - if p.IndexInfo != nil { - if p.IndexInfo.Primary && p.TblInfo.IsCommonHandle { - buffer.WriteString(", clustered index:") - buffer.WriteString(p.IndexInfo.Name.O) - buffer.WriteString("(") - } else { - buffer.WriteString(", index:") - buffer.WriteString(p.IndexInfo.Name.O) - buffer.WriteString("(") - } - for i, idxCol := range p.IndexInfo.Columns { - if tblCol := p.TblInfo.Columns[idxCol.Offset]; tblCol.Hidden { - buffer.WriteString(tblCol.GeneratedExprString) - } else { - buffer.WriteString(idxCol.Name.O) - } - if i+1 < len(p.IndexInfo.Columns) { - buffer.WriteString(", ") - } - } - buffer.WriteString(")") - } - return buffer.String() -} - // OperatorInfo implements dataAccesser interface. func (p *PointGetPlan) OperatorInfo(normalized bool) string { if p.Handle == nil && !p.Lock { @@ -337,53 +298,12 @@ func (p *BatchPointGetPlan) ToPB(ctx sessionctx.Context, _ kv.StoreType) (*tipb. // ExplainInfo implements Plan interface. func (p *BatchPointGetPlan) ExplainInfo() string { - return p.AccessObject(false) + ", " + p.OperatorInfo(false) + return p.AccessObject().String() + ", " + p.OperatorInfo(false) } // ExplainNormalizedInfo implements Plan interface. func (p *BatchPointGetPlan) ExplainNormalizedInfo() string { - return p.AccessObject(true) + ", " + p.OperatorInfo(true) -} - -// AccessObject implements physicalScan interface. -func (p *BatchPointGetPlan) AccessObject(normalized bool) string { - var buffer strings.Builder - tblName := p.TblInfo.Name.O - buffer.WriteString("table:") - buffer.WriteString(tblName) - if p.PartitionInfos != nil { - if normalized { - buffer.WriteString(", partition:?") - } else { - for i, partitionInfo := range p.PartitionInfos { - if i == 0 { - buffer.WriteString(", partition:") - } else { - buffer.WriteString(",") - } - buffer.WriteString(partitionInfo.Name.O) - } - } - } - if p.IndexInfo != nil { - if p.IndexInfo.Primary && p.TblInfo.IsCommonHandle { - buffer.WriteString(", clustered index:" + p.IndexInfo.Name.O + "(") - } else { - buffer.WriteString(", index:" + p.IndexInfo.Name.O + "(") - } - for i, idxCol := range p.IndexInfo.Columns { - if tblCol := p.TblInfo.Columns[idxCol.Offset]; tblCol.Hidden { - buffer.WriteString(tblCol.GeneratedExprString) - } else { - buffer.WriteString(idxCol.Name.O) - } - if i+1 < len(p.IndexInfo.Columns) { - buffer.WriteString(", ") - } - } - buffer.WriteString(")") - } - return buffer.String() + return p.AccessObject().NormalizedString() + ", " + p.OperatorInfo(true) } // OperatorInfo implements dataAccesser interface. diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go index 3d55d3f6c14d2..1b5d016606b30 100644 --- a/planner/core/preprocess.go +++ b/planner/core/preprocess.go @@ -21,7 +21,6 @@ import ( "strings" "github.com/pingcap/errors" - "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" @@ -156,9 +155,8 @@ type PreprocessorReturn struct { SnapshotTSEvaluator func(sessionctx.Context) (uint64, error) // LastSnapshotTS is the last evaluated snapshotTS if any // otherwise it defaults to zero - LastSnapshotTS uint64 - InfoSchema infoschema.InfoSchema - ReadReplicaScope string + LastSnapshotTS uint64 + InfoSchema infoschema.InfoSchema } // preprocessWith is used to record info from WITH statements like CTE name. @@ -1653,24 +1651,12 @@ func (p *preprocessor) updateStateFromStaleReadProcessor() error { if err := txnManager.EnterNewTxn(context.TODO(), newTxnRequest); err != nil { return err } - if err := txnManager.OnStmtStart(context.TODO()); err != nil { + if err := txnManager.OnStmtStart(context.TODO(), txnManager.GetCurrentStmt()); err != nil { return err } } } } - - // It is a little hacking for the below codes. `ReadReplicaScope` is used both by stale read's closest read and local txn. - // They are different features and the value for `ReadReplicaScope` will be conflicted in some scenes. - // But because local txn is still an experimental feature, we should make stale read work first. - if p.IsStaleness || p.ctx.GetSessionVars().GetReplicaRead().IsClosestRead() { - // When stale read or closet read is set, we read the tidb's locality as the read replica scope - p.ReadReplicaScope = config.GetTxnScopeFromConfig() - } else { - // Otherwise, use the scope from TxnCtx for local txn validation - p.ReadReplicaScope = p.ctx.GetSessionVars().TxnCtx.TxnScope - } - p.initedLastSnapshotTS = true return nil } diff --git a/planner/core/rule_decorrelate.go b/planner/core/rule_decorrelate.go index 330e2e86bc19a..7626f5863f2c2 100644 --- a/planner/core/rule_decorrelate.go +++ b/planner/core/rule_decorrelate.go @@ -189,6 +189,24 @@ func (s *decorrelateSolver) optimize(ctx context.Context, p LogicalPlan, opt *lo } appendRemoveProjTraceStep(apply, proj, opt) return s.optimize(ctx, p, opt) + } else if li, ok := innerPlan.(*LogicalLimit); ok { + // The presence of 'limit' in 'exists' will make the plan not optimal, so we need to decorrelate the 'limit' of subquery in optimization. + // e.g. select count(*) from test t1 where exists (select value from test t2 where t1.id = t2.id limit 1); When using 'limit' in subquery, the plan will not optimal. + // If apply is not SemiJoin, the output of it might be expanded even though we are `limit 1`. + if apply.JoinType != SemiJoin && apply.JoinType != LeftOuterSemiJoin && apply.JoinType != AntiSemiJoin && apply.JoinType != AntiLeftOuterSemiJoin { + goto NoOptimize + } + // If subquery has some filter condition, we will not optimize limit. + if len(apply.LeftConditions) > 0 || len(apply.RightConditions) > 0 || len(apply.OtherConditions) > 0 || len(apply.EqualConditions) > 0 { + goto NoOptimize + } + // Limit with non-0 offset will conduct an impact of itself on the final result set from its sub-child, consequently determining the bool value of the exist subquery. + if li.Offset == 0 { + innerPlan = li.children[0] + apply.SetChildren(outerPlan, innerPlan) + appendRemoveLimitTraceStep(li, opt) + return s.optimize(ctx, p, opt) + } } else if agg, ok := innerPlan.(*LogicalAggregation); ok { if apply.canPullUpAgg() && agg.canPullUp() { innerPlan = agg.children[0] @@ -377,6 +395,16 @@ func appendRemoveMaxOneRowTraceStep(m *LogicalMaxOneRow, opt *logicalOptimizeOp) opt.appendStepToCurrent(m.ID(), m.TP(), reason, action) } +func appendRemoveLimitTraceStep(limit *LogicalLimit, opt *logicalOptimizeOp) { + action := func() string { + return fmt.Sprintf("%v_%v removed from plan tree", limit.TP(), limit.ID()) + } + reason := func() string { + return fmt.Sprintf("%v_%v in 'exists' subquery need to remove in order to keep plan optimal", limit.TP(), limit.ID()) + } + opt.appendStepToCurrent(limit.ID(), limit.TP(), reason, action) +} + func appendRemoveProjTraceStep(p *LogicalApply, proj *LogicalProjection, opt *logicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v removed from plan tree", proj.TP(), proj.ID()) diff --git a/planner/core/rule_join_reorder.go b/planner/core/rule_join_reorder.go index 0294da8bac852..02d04cd77479a 100644 --- a/planner/core/rule_join_reorder.go +++ b/planner/core/rule_join_reorder.go @@ -194,19 +194,15 @@ func (s *joinReOrderSolver) optimizeRecursive(ctx sessionctx.Context, p LogicalP } if leadingHintInfo != nil && leadingHintInfo.leadingJoinOrder != nil { - if hasOuterJoin { - ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack("leading hint is inapplicable when we have outer join")) - } else { - if useGreedy { - ok, leftJoinGroup := baseGroupSolver.generateLeadingJoinGroup(curJoinGroup, leadingHintInfo) - if !ok { - ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack("leading hint is inapplicable, check if the leading hint table is valid")) - } else { - curJoinGroup = leftJoinGroup - } + if useGreedy { + ok, leftJoinGroup := baseGroupSolver.generateLeadingJoinGroup(curJoinGroup, leadingHintInfo, hasOuterJoin) + if !ok { + ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack("leading hint is inapplicable, check if the leading hint table is valid")) } else { - ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack("leading hint is inapplicable for the DP join reorder algorithm")) + curJoinGroup = leftJoinGroup } + } else { + ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack("leading hint is inapplicable for the DP join reorder algorithm")) } } @@ -298,7 +294,7 @@ type baseSingleGroupJoinOrderSolver struct { leadingJoinGroup LogicalPlan } -func (s *baseSingleGroupJoinOrderSolver) generateLeadingJoinGroup(curJoinGroup []LogicalPlan, hintInfo *tableHintInfo) (bool, []LogicalPlan) { +func (s *baseSingleGroupJoinOrderSolver) generateLeadingJoinGroup(curJoinGroup []LogicalPlan, hintInfo *tableHintInfo, hasOuterJoin bool) (bool, []LogicalPlan) { var leadingJoinGroup []LogicalPlan leftJoinGroup := make([]LogicalPlan, len(curJoinGroup)) copy(leftJoinGroup, curJoinGroup) @@ -324,6 +320,10 @@ func (s *baseSingleGroupJoinOrderSolver) generateLeadingJoinGroup(curJoinGroup [ var usedEdges []*expression.ScalarFunction var joinType JoinType leadingJoin, leadingJoinGroup[0], usedEdges, joinType = s.checkConnection(leadingJoin, leadingJoinGroup[0]) + if hasOuterJoin && usedEdges == nil { + // If the joinGroups contain the outer join, we disable the cartesian product. + return false, nil + } leadingJoin, s.otherConds = s.makeJoin(leadingJoin, leadingJoinGroup[0], usedEdges, joinType) leadingJoinGroup = leadingJoinGroup[1:] } diff --git a/planner/core/rule_join_reorder_test.go b/planner/core/rule_join_reorder_test.go index 59a53c41adb04..4c15c6052eafd 100644 --- a/planner/core/rule_join_reorder_test.go +++ b/planner/core/rule_join_reorder_test.go @@ -79,22 +79,6 @@ func TestLeadingJoinHint(t *testing.T) { tk.MustExec("create table t8(a int, b int, key(a));") runJoinReorderTestData(t, tk, "TestLeadingJoinHint") - // test cases for outer join - tk.MustExec("select /*+ leading(t1, t3) */ * from t1 left join t2 on t1.a=t2.a left join t3 on t2.b=t3.b") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 leading hint is inapplicable when we have outer join")) - tk.MustExec("select /*+ leading(t2) */ * from t1 left join t2 on t1.a=t2.a left join t3 on t2.b=t3.b") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 leading hint is inapplicable when we have outer join")) - tk.MustExec("select /*+ leading(t2, t3) */ * from t1 left join t2 on t1.a=t2.a left join t3 on t2.b=t3.b") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 leading hint is inapplicable when we have outer join")) - tk.MustExec("select /*+ leading(t1, t2, t3) */ * from t1 left join t2 on t1.a=t2.a left join t3 on t2.b=t3.b") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 leading hint is inapplicable when we have outer join")) - tk.MustExec("select /*+ leading(t1, t3) */ * from t1 join t2 on t1.a=t2.a left join t3 on t2.b=t3.b") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 leading hint is inapplicable when we have outer join")) - tk.MustExec("select /*+ leading(t1, t2) */ * from t1 join t2 on t1.a=t2.a left join t3 on t2.b=t3.b") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 leading hint is inapplicable when we have outer join")) - tk.MustExec("select /*+ leading(t3) */ * from t1 join t2 on t1.a=t2.a left join t3 on t2.b=t3.b") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 leading hint is inapplicable when we have outer join")) - // test cases for multiple leading hints tk.MustExec("select /*+ leading(t1) leading(t2) */ * from t1 join t2 on t1.a=t2.a join t3 on t2.b=t3.b") tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 We can only use one leading hint at most, when multiple leading hints are used, all leading hints will be invalid")) @@ -166,13 +150,6 @@ func TestJoinOrderHint(t *testing.T) { tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 There are no matching table names for (t1) in optimizer hint /*+ LEADING(t2, t1) */. Maybe you can use the table alias name", "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid")) - // conflict between table names - tk.MustExec("select /*+ leading(t3) */ * from t1 join t2 on t1.a=t2.a left join t3 on t2.b=t3.b") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 leading hint is inapplicable when we have outer join")) - - tk.MustExec("select /*+ leading(t1, t3) */ * from t1 join t2 on t1.a=t2.a left join t3 on t2.b=t3.b") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 leading hint is inapplicable when we have outer join")) - // table name in leading hint cross query block // Todo: Can not handle this case yet. Because when we extract the join group, it will get the join group {t1, t2, t3}. // So the table 't4' can not be used. @@ -329,3 +306,22 @@ func TestJoinOrderHint4Subquery(t *testing.T) { runJoinReorderTestData(t, tk, "TestJoinOrderHint4Subquery") } + +func TestLeadingJoinHint4OuterJoin(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t, t1, t2, t3, t4, t5, t6, t7, t8;") + tk.MustExec("create table t(a int, b int, key(a));") + tk.MustExec("create table t1(a int, b int, key(a));") + tk.MustExec("create table t2(a int, b int, key(a));") + tk.MustExec("create table t3(a int, b int, key(a));") + tk.MustExec("create table t4(a int, b int, key(a));") + tk.MustExec("create table t5(a int, b int, key(a));") + tk.MustExec("create table t6(a int, b int, key(a));") + tk.MustExec("create table t7(a int, b int, key(a));") + tk.MustExec("create table t8(a int, b int, key(a));") + runJoinReorderTestData(t, tk, "TestLeadingJoinHint4OuterJoin") +} diff --git a/planner/core/rule_predicate_push_down.go b/planner/core/rule_predicate_push_down.go index d0b9553d55085..59a29d788d65a 100644 --- a/planner/core/rule_predicate_push_down.go +++ b/planner/core/rule_predicate_push_down.go @@ -417,6 +417,11 @@ func (p *LogicalProjection) PredicatePushDown(predicates []expression.Expression return predicates, child } } + if len(p.children) == 1 { + if _, isDual := p.children[0].(*LogicalTableDual); isDual { + return predicates, p + } + } for _, cond := range predicates { newFilter := expression.ColumnSubstitute(cond, p.Schema(), p.Exprs) if !expression.HasGetSetVarFunc(newFilter) { diff --git a/planner/core/task.go b/planner/core/task.go index 2f6d853f6b382..7d88aac896812 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -667,7 +667,7 @@ func calcPagingCost(ctx sessionctx.Context, indexPlan PhysicalPlan, expectCnt ui // we want the diff between idxCst and pagingCst here, // however, the idxCst does not contain seekFactor, so a seekFactor needs to be removed - return pagingCst - sessVars.GetSeekFactor(nil) + return math.Max(pagingCst-sessVars.GetSeekFactor(nil), 0) } func (t *rootTask) convertToRootTask(_ sessionctx.Context) *rootTask { diff --git a/planner/core/testdata/integration_suite_in.json b/planner/core/testdata/integration_suite_in.json index a2a6c7e655efc..fcc575763c4af 100644 --- a/planner/core/testdata/integration_suite_in.json +++ b/planner/core/testdata/integration_suite_in.json @@ -288,6 +288,16 @@ "explain format = 'brief' select * from t where exists (select 1 from t t1 join t t2 on t1.a = t2.a and t1.a = t.a)" ] }, + { + "name": "TestDecorrelateLimitInSubquery", + "cases": [ + // Query with EXISTS and subquery with LIMIT should have the same plan, i.e, the Limit has been decorrelated. + "explain format = 'brief' select count(*) from test t1 where exists (select value from test t2 where t1.id = t2.id limit 1)", + "explain format = 'brief' select count(*) from test t1 where exists (select value from test t2 where t1.id = t2.id)", + "explain format = 'brief' select count(*) from test t1 where exists (select value from test t2 where t1.id = t2.id limit 1,2)", + "explain format = 'brief' select * from t where 9 in (select c from t s where s.c < t.c limit 3)" + ] + }, { "name": "TestMultiColMaxOneRow", "cases": [ @@ -392,7 +402,7 @@ // `left` has not been pushed to TiKV, but it has been pushed to TiFlash. // We expect a Selection will be added above IndexMerge. "select /*+ use_index_merge(t1) */ 1 from t1 where c1 = 'ab' or c2 = '10' and char_length(left(c1, 10)) = 10;", - + // c3 is part of idx_1, so it will be put in partial_path's IndexFilters instead of TableFilters. // But it still cannot be pushed to TiKV. This case cover code in DataSource.buildIndexMergeOrPath. "select /*+ use_index_merge(tt1) */ 1 from tt1 where c1 = 'de' or c2 = '10' and from_base64(to_base64(c3)) = '10';", @@ -403,7 +413,7 @@ // This case covert expression index. "select /*+ use_index_merge(tt3) */ 1 from tt3 where c1 < -10 or c2 < 10 and reverse(c3) = '2';", - + // If no hint, we cannot use index merge if filter cannot be pushed to any storage. "select 1 from t1 where c1 = 'de' or c2 = '10' and from_base64(to_base64(c1)) = 'ab';" ] @@ -928,5 +938,34 @@ "explain format = 'brief' select count(*) from rp_t where a = 1 or a = 20", "explain format = 'brief' select count(*) from hp_t where a = 1 or a = 20" ] + }, + { + "name": "TestTiFlashFineGrainedShuffle", + "cases": [ + // 1. Can use fine grained shuffle. + "explain format = 'brief' select row_number() over w1 from t1 window w1 as (partition by c1 order by c1);", + // Test two window function. + "explain format = 'brief' select row_number() over w1, rank() over w2 from t1 window w1 as (partition by c1 order by c1), w2 as (partition by c2);", + // Limit + Order. + "explain format = 'brief' select row_number() over w1, rank() over w2 from t1 window w1 as (partition by c1 order by c1), w2 as (partition by c2) order by 1, 2 limit 10;", + // // No partition by key in w2, so disabled. But w1 is still enabled. BUG: https://github.com/pingcap/tidb/pull/35256#discussion_r913324160 + // "explain format = 'brief' select row_number() over w1, row_number() over w2 from t1 window w1 as (partition by c1 order by c1), w2 as (order by c1);", + // GroupBy key and window function partition key are not same. + "explain format = 'brief' select row_number() over w1, count(c2) from t1 group by c1 having c1 > 10 window w1 as (partition by c2 order by c2);", + "explain format = 'brief' select row_number() over w1, count(c1) from t1 group by c2 having c2 > 10 window w1 as (partition by c1 order by c2);", + // Join, same as GroupBy. + "explain format = 'brief' select row_number() over w1 from t1 a join t1 b on a.c1 = b.c2 window w1 as (partition by a.c1);", + // Selection. + "explain format = 'brief' select row_number() over w1 from t1 where c1 < 100 window w1 as (partition by c1 order by c1);", + + // 2. Cannot use fine grained shuffle. + // No window function, so disabled. + "explain format = 'brief' select * from t1;", + // No partition key in window function, so disabled. + "explain format = 'brief' select row_number() over w1 from t1 window w1 as (order by c1);", + // GroupBy key is same with window function partition key, so they are in one fragment. + // But fine grained shuffle doesn't support group by for now. + "explain format = 'brief' select row_number() over w1, count(c2) from t1 group by c1 having c1 > 10 window w1 as (partition by c1 order by c2);" + ] } ] diff --git a/planner/core/testdata/integration_suite_out.json b/planner/core/testdata/integration_suite_out.json index f3f200872b93e..aeff80fd103ea 100644 --- a/planner/core/testdata/integration_suite_out.json +++ b/planner/core/testdata/integration_suite_out.json @@ -1538,6 +1538,65 @@ } ] }, + { + "Name": "TestDecorrelateLimitInSubquery", + "Cases": [ + { + "SQL": "explain format = 'brief' select count(*) from test t1 where exists (select value from test t2 where t1.id = t2.id limit 1)", + "Plan": [ + "HashAgg 1.00 root funcs:count(1)->Column#7", + "└─HashJoin 7992.00 root semi join, equal:[eq(test.test.id, test.test.id)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.test.id))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.test.id))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select count(*) from test t1 where exists (select value from test t2 where t1.id = t2.id)", + "Plan": [ + "HashAgg 1.00 root funcs:count(1)->Column#7", + "└─HashJoin 7992.00 root semi join, equal:[eq(test.test.id, test.test.id)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.test.id))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.test.id))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select count(*) from test t1 where exists (select value from test t2 where t1.id = t2.id limit 1,2)", + "Plan": [ + "HashAgg 1.00 root funcs:count(1)->Column#7", + "└─Apply 10000.00 root CARTESIAN semi join", + " ├─TableReader(Build) 10000.00 root data:TableFullScan", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─Limit(Probe) 2.00 root offset:1, count:2", + " └─TableReader 3.00 root data:Limit", + " └─Limit 3.00 cop[tikv] offset:0, count:3", + " └─Selection 3.00 cop[tikv] eq(test.test.id, test.test.id)", + " └─TableFullScan 3000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select * from t where 9 in (select c from t s where s.c < t.c limit 3)", + "Plan": [ + "Apply 10000.00 root CARTESIAN semi join", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo", + "└─Selection(Probe) 2.40 root eq(9, test.t.c)", + " └─Limit 3.00 root offset:0, count:3", + " └─TableReader 3.00 root data:Limit", + " └─Limit 3.00 cop[tikv] offset:0, count:3", + " └─Selection 3.00 cop[tikv] lt(test.t.c, test.t.c)", + " └─TableFullScan 3.75 cop[tikv] table:s keep order:false, stats:pseudo" + ] + } + ] + }, { "Name": "TestMultiColMaxOneRow", "Cases": [ @@ -6972,5 +7031,165 @@ ] } ] + }, + { + "Name": "TestTiFlashFineGrainedShuffle", + "Cases": [ + { + "SQL": "explain format = 'brief' select row_number() over w1 from t1 window w1 as (partition by c1 order by c1);", + "Plan": [ + "TableReader 10000.00 root data:ExchangeSender", + "└─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 10000.00 mpp[tiflash] Column#5, stream_count: 8", + " └─Window 10000.00 mpp[tiflash] row_number()->Column#5 over(partition by test.t1.c1 order by test.t1.c1 rows between current row and current row), stream_count: 8", + " └─Sort 10000.00 mpp[tiflash] test.t1.c1, test.t1.c1, stream_count: 8", + " └─ExchangeReceiver 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary], stream_count: 8", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1, rank() over w2 from t1 window w1 as (partition by c1 order by c1), w2 as (partition by c2);", + "Plan": [ + "TableReader 10000.00 root data:ExchangeSender", + "└─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 10000.00 mpp[tiflash] Column#7, Column#6, stream_count: 8", + " └─Window 10000.00 mpp[tiflash] row_number()->Column#7 over(partition by test.t1.c1 order by test.t1.c1 rows between current row and current row), stream_count: 8", + " └─Sort 10000.00 mpp[tiflash] test.t1.c1, test.t1.c1, stream_count: 8", + " └─ExchangeReceiver 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary], stream_count: 8", + " └─Window 10000.00 mpp[tiflash] rank()->Column#6 over(partition by test.t1.c2), stream_count: 8", + " └─Sort 10000.00 mpp[tiflash] test.t1.c2, stream_count: 8", + " └─ExchangeReceiver 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c2, collate: binary], stream_count: 8", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1, rank() over w2 from t1 window w1 as (partition by c1 order by c1), w2 as (partition by c2) order by 1, 2 limit 10;", + "Plan": [ + "Projection 10.00 root Column#7, Column#6", + "└─TopN 10.00 root Column#7, Column#6, offset:0, count:10", + " └─TableReader 10.00 root data:ExchangeSender", + " └─ExchangeSender 10.00 mpp[tiflash] ExchangeType: PassThrough", + " └─TopN 10.00 mpp[tiflash] Column#7, Column#6, offset:0, count:10", + " └─Window 10000.00 mpp[tiflash] row_number()->Column#7 over(partition by test.t1.c1 order by test.t1.c1 rows between current row and current row), stream_count: 8", + " └─Sort 10000.00 mpp[tiflash] test.t1.c1, test.t1.c1, stream_count: 8", + " └─ExchangeReceiver 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary], stream_count: 8", + " └─Window 10000.00 mpp[tiflash] rank()->Column#6 over(partition by test.t1.c2), stream_count: 8", + " └─Sort 10000.00 mpp[tiflash] test.t1.c2, stream_count: 8", + " └─ExchangeReceiver 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c2, collate: binary], stream_count: 8", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1, count(c2) from t1 group by c1 having c1 > 10 window w1 as (partition by c2 order by c2);", + "Plan": [ + "TableReader 2666.67 root data:ExchangeSender", + "└─ExchangeSender 2666.67 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 2666.67 mpp[tiflash] Column#6, Column#4, stream_count: 8", + " └─Window 2666.67 mpp[tiflash] row_number()->Column#6 over(partition by test.t1.c2 order by test.t1.c2 rows between current row and current row), stream_count: 8", + " └─Sort 2666.67 mpp[tiflash] test.t1.c2, test.t1.c2, stream_count: 8", + " └─ExchangeReceiver 2666.67 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 2666.67 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c2, collate: binary], stream_count: 8", + " └─Projection 2666.67 mpp[tiflash] Column#4, test.t1.c2", + " └─HashAgg 2666.67 mpp[tiflash] group by:test.t1.c1, funcs:count(test.t1.c2)->Column#4, funcs:firstrow(test.t1.c2)->test.t1.c2", + " └─ExchangeReceiver 3333.33 mpp[tiflash] ", + " └─ExchangeSender 3333.33 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary]", + " └─Selection 3333.33 mpp[tiflash] gt(test.t1.c1, 10)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1, count(c1) from t1 group by c2 having c2 > 10 window w1 as (partition by c1 order by c2);", + "Plan": [ + "TableReader 2666.67 root data:ExchangeSender", + "└─ExchangeSender 2666.67 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 2666.67 mpp[tiflash] Column#6, Column#4, stream_count: 8", + " └─Window 2666.67 mpp[tiflash] row_number()->Column#6 over(partition by test.t1.c1 order by test.t1.c2 rows between current row and current row), stream_count: 8", + " └─Sort 2666.67 mpp[tiflash] test.t1.c1, test.t1.c2, stream_count: 8", + " └─ExchangeReceiver 2666.67 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 2666.67 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary], stream_count: 8", + " └─Projection 2666.67 mpp[tiflash] Column#4, test.t1.c1, test.t1.c2", + " └─HashAgg 2666.67 mpp[tiflash] group by:test.t1.c2, funcs:count(test.t1.c1)->Column#4, funcs:firstrow(test.t1.c1)->test.t1.c1, funcs:firstrow(test.t1.c2)->test.t1.c2", + " └─ExchangeReceiver 3333.33 mpp[tiflash] ", + " └─ExchangeSender 3333.33 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c2, collate: binary]", + " └─Selection 3333.33 mpp[tiflash] gt(test.t1.c2, 10)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1 from t1 a join t1 b on a.c1 = b.c2 window w1 as (partition by a.c1);", + "Plan": [ + "TableReader 12487.50 root data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 12487.50 mpp[tiflash] Column#8, stream_count: 8", + " └─Window 12487.50 mpp[tiflash] row_number()->Column#8 over(partition by test.t1.c1 rows between current row and current row), stream_count: 8", + " └─Sort 12487.50 mpp[tiflash] test.t1.c1, stream_count: 8", + " └─ExchangeReceiver 12487.50 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary], stream_count: 8", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t1.c1, test.t1.c2)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t1.c1))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:a keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t1.c2))", + " └─TableFullScan 10000.00 mpp[tiflash] table:b keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1 from t1 where c1 < 100 window w1 as (partition by c1 order by c1);", + "Plan": [ + "TableReader 3323.33 root data:ExchangeSender", + "└─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 3323.33 mpp[tiflash] Column#5, stream_count: 8", + " └─Window 3323.33 mpp[tiflash] row_number()->Column#5 over(partition by test.t1.c1 order by test.t1.c1 rows between current row and current row), stream_count: 8", + " └─Sort 3323.33 mpp[tiflash] test.t1.c1, test.t1.c1, stream_count: 8", + " └─ExchangeReceiver 3323.33 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary], stream_count: 8", + " └─Selection 3323.33 mpp[tiflash] lt(test.t1.c1, 100)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select * from t1;", + "Plan": [ + "TableReader 10000.00 root data:ExchangeSender", + "└─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1 from t1 window w1 as (order by c1);", + "Plan": [ + "TableReader 10000.00 root data:ExchangeSender", + "└─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 10000.00 mpp[tiflash] Column#5", + " └─Window 10000.00 mpp[tiflash] row_number()->Column#5 over(order by test.t1.c1 rows between current row and current row)", + " └─Sort 10000.00 mpp[tiflash] test.t1.c1", + " └─ExchangeReceiver 10000.00 mpp[tiflash] ", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1, count(c2) from t1 group by c1 having c1 > 10 window w1 as (partition by c1 order by c2);", + "Plan": [ + "TableReader 2666.67 root data:ExchangeSender", + "└─ExchangeSender 2666.67 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 2666.67 mpp[tiflash] Column#6, Column#4", + " └─Window 2666.67 mpp[tiflash] row_number()->Column#6 over(partition by test.t1.c1 order by test.t1.c2 rows between current row and current row)", + " └─Sort 2666.67 mpp[tiflash] test.t1.c1, test.t1.c2", + " └─Projection 2666.67 mpp[tiflash] Column#4, test.t1.c1, test.t1.c2", + " └─HashAgg 2666.67 mpp[tiflash] group by:test.t1.c1, funcs:count(test.t1.c2)->Column#4, funcs:firstrow(test.t1.c1)->test.t1.c1, funcs:firstrow(test.t1.c2)->test.t1.c2", + " └─ExchangeReceiver 3333.33 mpp[tiflash] ", + " └─ExchangeSender 3333.33 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary]", + " └─Selection 3333.33 mpp[tiflash] gt(test.t1.c1, 10)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + } + ] } ] diff --git a/planner/core/testdata/join_reorder_suite_in.json b/planner/core/testdata/join_reorder_suite_in.json index 8b0e77e742422..3145779cc69bf 100644 --- a/planner/core/testdata/join_reorder_suite_in.json +++ b/planner/core/testdata/join_reorder_suite_in.json @@ -298,5 +298,66 @@ "select /*+ leading(t1, t2@sel_2) */ t1.a, (select min(t2.a) from t2) from t1 join t3 on t1.a = t3.a;", "select /*+ leading(t3, t2@sel_2) */ t1.a, (select min(t2.a) from t2) from t1 join t3 on t1.a = t3.a;" ] + }, + { + "name": "TestLeadingJoinHint4OuterJoin", + "cases": [ + "select /*+ leading(t3, t2) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "select /*+ leading(t3, t1) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "select /*+ leading(t1, t2) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "select /*+ leading(t3) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "select /*+ leading(t2) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "select /*+ leading(t1) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "select /*+ leading(t3, t2, t1) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "select /*+ leading(t1, t2, t3) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "select /*+ leading(t3, t1, t2) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "select /*+ leading(t2) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "select /*+ leading(t1) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "select /*+ leading(t3) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "select /*+ leading(t2, t1) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "select /*+ leading(t2, t3) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "select /*+ leading(t3, t1) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "select /*+ leading(t2, t1, t3) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "select /*+ leading(t1, t3, t2) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "select /*+ leading(t2, t3, t1) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "select /*+ leading(t1) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "select /*+ leading(t2) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "select /*+ leading(t3) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "select /*+ leading(t1, t2) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "select /*+ leading(t3, t2) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "select /*+ leading(t1, t3) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "select /*+ leading(t1, t2, t3) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "select /*+ leading(t3, t1, t2) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "select /*+ leading(t1) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t2) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t3) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t4) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t1, t2) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t1, t3) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t4, t1) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t4, t2) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t3, t2) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t3, t4) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t1, t2, t3) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t1, t4, t3) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t4, t2, t3) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t1, t2, t3, t4) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "select /*+ leading(t1) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select /*+ leading(t2) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select /*+ leading(t3) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select /*+ leading(t4) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select /*+ leading(t2, t1) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select /*+ leading(t2, t3) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select /*+ leading(t4, t1) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select /*+ leading(t3, t1) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select * from ((select /*+ leading(t5) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select * from ((select /*+ leading(t6) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select * from ((select /*+ leading(t5, t7) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select * from ((select /*+ leading(t6, t8, t7) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select /*+ leading(t3) */ * from ((select /*+ leading(t5) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select /*+ leading(t3, t1) */ * from ((select /*+ leading(t7) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select /*+ leading(t3, t1, t2) */ * from ((select /*+ leading(t6, t7) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "select /*+ leading(t3, t4) */ * from ((select /*+ leading(t5, t7, t8) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;" + ] } ] diff --git a/planner/core/testdata/join_reorder_suite_out.json b/planner/core/testdata/join_reorder_suite_out.json index 4fa0ac13bc667..f82b504714a2d 100644 --- a/planner/core/testdata/join_reorder_suite_out.json +++ b/planner/core/testdata/join_reorder_suite_out.json @@ -2865,78 +2865,74 @@ { "SQL": "select /*+ leading(t2) */ * from t1 join t2 on t1.a=t2.a left join t3 on t2.b=t3.b", "Plan": [ - "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.b, test.t3.b)]", - "├─TableReader(Build) 9990.00 root data:Selection", - "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", - "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", - "└─HashJoin(Probe) 12487.50 root inner join, equal:[eq(test.t1.a, test.t2.a)]", + "Projection 15609.38 root test.t1.a, test.t1.b, test.t2.a, test.t2.b, test.t3.a, test.t3.b", + "└─HashJoin 15609.38 root left outer join, equal:[eq(test.t2.b, test.t3.b)]", " ├─TableReader(Build) 9990.00 root data:Selection", - " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", - " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - " └─TableReader(Probe) 9990.00 root data:Selection", - " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", - " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root inner join, equal:[eq(test.t2.a, test.t1.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" ], - "Warning": [ - "Warning 1815 leading hint is inapplicable when we have outer join" - ] + "Warning": null }, { "SQL": "select /*+ leading(t3) */ * from t1 join t2 on t1.a=t2.a left join t3 on t2.b=t3.b", "Plan": [ - "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.b, test.t3.b)]", - "├─TableReader(Build) 9990.00 root data:Selection", - "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", - "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", - "└─HashJoin(Probe) 12487.50 root inner join, equal:[eq(test.t1.a, test.t2.a)]", + "Projection 15609.38 root test.t1.a, test.t1.b, test.t2.a, test.t2.b, test.t3.a, test.t3.b", + "└─HashJoin 15609.38 root inner join, equal:[eq(test.t2.a, test.t1.a)]", " ├─TableReader(Build) 9990.00 root data:Selection", - " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", - " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - " └─TableReader(Probe) 9990.00 root data:Selection", - " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", - " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t2.b, test.t3.b)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" ], - "Warning": [ - "Warning 1815 leading hint is inapplicable when we have outer join" - ] + "Warning": null }, { "SQL": "select /*+ leading(t2, t3) */ * from t1 join t2 on t1.a=t2.a left join t3 on t2.b=t3.b", "Plan": [ - "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.b, test.t3.b)]", - "├─TableReader(Build) 9990.00 root data:Selection", - "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", - "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", - "└─HashJoin(Probe) 12487.50 root inner join, equal:[eq(test.t1.a, test.t2.a)]", + "Projection 15609.38 root test.t1.a, test.t1.b, test.t2.a, test.t2.b, test.t3.a, test.t3.b", + "└─HashJoin 15609.38 root inner join, equal:[eq(test.t2.a, test.t1.a)]", " ├─TableReader(Build) 9990.00 root data:Selection", - " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", - " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - " └─TableReader(Probe) 9990.00 root data:Selection", - " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", - " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t2.b, test.t3.b)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" ], - "Warning": [ - "Warning 1815 leading hint is inapplicable when we have outer join" - ] + "Warning": null }, { "SQL": "select /*+ leading(t3, t2) */ * from t1 join t2 on t1.a=t2.a left join t3 on t2.b=t3.b", "Plan": [ - "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.b, test.t3.b)]", - "├─TableReader(Build) 9990.00 root data:Selection", - "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", - "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", - "└─HashJoin(Probe) 12487.50 root inner join, equal:[eq(test.t1.a, test.t2.a)]", + "Projection 15609.38 root test.t1.a, test.t1.b, test.t2.a, test.t2.b, test.t3.a, test.t3.b", + "└─HashJoin 15609.38 root inner join, equal:[eq(test.t2.a, test.t1.a)]", " ├─TableReader(Build) 9990.00 root data:Selection", - " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", - " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - " └─TableReader(Probe) 9990.00 root data:Selection", - " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", - " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t2.b, test.t3.b)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" ], - "Warning": [ - "Warning 1815 leading hint is inapplicable when we have outer join" - ] + "Warning": null }, { "SQL": "select /*+ leading(t3, t1) */ * from t1 join t2 on t1.a=t2.a left join t3 on t2.b=t3.b", @@ -2954,7 +2950,7 @@ " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "Warning": [ - "Warning 1815 leading hint is inapplicable when we have outer join" + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" ] }, { @@ -2989,9 +2985,7 @@ " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], - "Warning": [ - "Warning 1815 leading hint is inapplicable when we have outer join" - ] + "Warning": null }, { "SQL": "select /*+ leading(t2, t3) */ * from t1 join t2 on t1.a=t2.a right join t3 on t2.b=t3.b", @@ -3010,7 +3004,7 @@ ], "Warning": [ "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", - "Warning 1815 leading hint is inapplicable when we have outer join" + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" ] }, { @@ -3030,7 +3024,7 @@ ], "Warning": [ "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", - "Warning 1815 leading hint is inapplicable when we have outer join" + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" ] }, { @@ -3050,7 +3044,7 @@ ], "Warning": [ "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", - "Warning 1815 leading hint is inapplicable when we have outer join" + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" ] }, { @@ -4880,5 +4874,1277 @@ ] } ] + }, + { + "Name": "TestLeadingJoinHint4OuterJoin", + "Cases": [ + { + "SQL": "select /*+ leading(t3, t2) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t1.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.b, test.t3.b)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t3, t1) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t1.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.b, test.t3.b)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t1, t2) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t1.b, test.t3.b)]", + "├─TableReader(Build) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t2.a, test.t1.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t3) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t1.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.b, test.t3.b)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t2) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t1.b, test.t3.b)]", + "├─TableReader(Build) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t2.a, test.t1.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t1) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t1.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.b, test.t3.b)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t3, t2, t1) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t1.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.b, test.t3.b)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t1, t2, t3) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t1.b, test.t3.b)]", + "├─TableReader(Build) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t2.a, test.t1.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t3, t1, t2) */ * from t2 left join t1 on t2.a=t1.a left join t3 on t1.b=t3.b;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t1.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.b, test.t3.b)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.b))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t2) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "Plan": [ + "HashJoin 124875000.00 root CARTESIAN left outer join, left cond:[eq(test.t2.a, 1)]", + "├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t3.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check the join type or the join algorithm hint" + ] + }, + { + "SQL": "select /*+ leading(t1) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "Plan": [ + "HashJoin 124875000.00 root CARTESIAN left outer join, left cond:[eq(test.t2.a, 1)]", + "├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t3.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t3) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "Plan": [ + "HashJoin 124875000.00 root CARTESIAN left outer join, left cond:[eq(test.t2.a, 1)]", + "├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t3.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t2, t1) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "Plan": [ + "HashJoin 124875000.00 root CARTESIAN left outer join, left cond:[eq(test.t2.a, 1)]", + "├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t3.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check the join type or the join algorithm hint", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t2, t3) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "Plan": [ + "HashJoin 124875000.00 root CARTESIAN left outer join, left cond:[eq(test.t2.a, 1)]", + "├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t3.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check the join type or the join algorithm hint", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t3, t1) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "Plan": [ + "HashJoin 124875000.00 root CARTESIAN left outer join, left cond:[eq(test.t2.a, 1)]", + "├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t3.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t2, t1, t3) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "Plan": [ + "HashJoin 124875000.00 root CARTESIAN left outer join, left cond:[eq(test.t2.a, 1)]", + "├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t3.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check the join type or the join algorithm hint", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t1, t3, t2) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "Plan": [ + "HashJoin 124875000.00 root CARTESIAN left outer join, left cond:[eq(test.t2.a, 1)]", + "├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t3.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check the join type or the join algorithm hint", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t2, t3, t1) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=1;", + "Plan": [ + "HashJoin 124875000.00 root CARTESIAN left outer join, left cond:[eq(test.t2.a, 1)]", + "├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t3.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check the join type or the join algorithm hint", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t1) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t3.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root inner join, equal:[eq(test.t1.a, test.t3.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t2) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t3.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root inner join, equal:[eq(test.t1.a, test.t3.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t3) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t3.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─Projection(Probe) 12487.50 root test.t1.a, test.t1.b, test.t3.a, test.t3.b", + " └─HashJoin 12487.50 root inner join, equal:[eq(test.t3.a, test.t1.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t1, t2) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t3.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root inner join, equal:[eq(test.t1.a, test.t3.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t3, t2) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t3.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root inner join, equal:[eq(test.t1.a, test.t3.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t1, t3) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t3.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root inner join, equal:[eq(test.t1.a, test.t3.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t1, t2, t3) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t3.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root inner join, equal:[eq(test.t1.a, test.t3.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t3, t1, t2) */ * from t2 left join (t1 left join t3 on t1.a=t3.a) on t2.a=t3.a;", + "Plan": [ + "HashJoin 15609.38 root left outer join, equal:[eq(test.t2.a, test.t3.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root inner join, equal:[eq(test.t1.a, test.t3.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t1) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─HashJoin(Build) 12487.50 root inner join, equal:[eq(test.t3.a, test.t4.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t2) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─HashJoin(Build) 12487.50 root inner join, equal:[eq(test.t3.a, test.t4.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t3) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─HashJoin(Build) 12487.50 root inner join, equal:[eq(test.t3.a, test.t4.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t4) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─Projection(Build) 12487.50 root test.t3.a, test.t3.b, test.t4.a, test.t4.b", + "│ └─HashJoin 12487.50 root inner join, equal:[eq(test.t4.a, test.t3.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t1, t2) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─HashJoin(Build) 12487.50 root inner join, equal:[eq(test.t3.a, test.t4.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t1, t3) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─HashJoin(Build) 12487.50 root inner join, equal:[eq(test.t3.a, test.t4.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t4, t1) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─HashJoin(Build) 12487.50 root inner join, equal:[eq(test.t3.a, test.t4.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t4, t2) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─HashJoin(Build) 12487.50 root inner join, equal:[eq(test.t3.a, test.t4.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t3, t2) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─HashJoin(Build) 12487.50 root inner join, equal:[eq(test.t3.a, test.t4.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t3, t4) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─HashJoin(Build) 12487.50 root inner join, equal:[eq(test.t3.a, test.t4.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t1, t2, t3) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─HashJoin(Build) 12487.50 root inner join, equal:[eq(test.t3.a, test.t4.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t1, t4, t3) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─HashJoin(Build) 12487.50 root inner join, equal:[eq(test.t3.a, test.t4.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t4, t2, t3) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─HashJoin(Build) 12487.50 root inner join, equal:[eq(test.t3.a, test.t4.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t1, t2, t3, t4) */ * from (t1 left join t2 on t1.a=t2.a) left join (t3 left join t4 on t3.a=t4.a) on t2.a=t4.a;", + "Plan": [ + "HashJoin 19511.72 root left outer join, equal:[eq(test.t2.a, test.t4.a)]", + "├─HashJoin(Build) 12487.50 root inner join, equal:[eq(test.t3.a, test.t4.a)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t3.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t1) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t2) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t3) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t4) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t2, t1) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ leading(t2, t3) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t4, t1) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t3, t1) */ * from ((select t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select * from ((select /*+ leading(t5) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9990.00 root data:Selection", + "│ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t5.a, test.t6.a)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─IndexReader(Probe) 9990.00 root index:IndexFullScan", + "│ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select * from ((select /*+ leading(t6) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select * from ((select /*+ leading(t5, t7) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select * from ((select /*+ leading(t6, t8, t7) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid" + ] + }, + { + "SQL": "select /*+ leading(t3) */ * from ((select /*+ leading(t5) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 We can only use one leading hint at most, when multiple leading hints are used, all leading hints will be invalid" + ] + }, + { + "SQL": "select /*+ leading(t3, t1) */ * from ((select /*+ leading(t7) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 We can only use one leading hint at most, when multiple leading hints are used, all leading hints will be invalid" + ] + }, + { + "SQL": "select /*+ leading(t3, t1, t2) */ * from ((select /*+ leading(t6, t7) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 leading hint is inapplicable, check if the leading hint table is valid", + "Warning 1815 We can only use one leading hint at most, when multiple leading hints are used, all leading hints will be invalid" + ] + }, + { + "SQL": "select /*+ leading(t3, t4) */ * from ((select /*+ leading(t5, t7, t8) */ t8.a, t8.b from t8, t7, t6, t5 where t5.a = t6.a and t6.b=t7.b) t3 left join t4 on t3.a=t4.a) left join (t1 left join t2 on t1.a=t2.a) on t1.a=t4.a;", + "Plan": [ + "HashJoin 304261169.13 root CARTESIAN inner join", + "├─HashJoin(Build) 15593.77 root inner join, equal:[eq(test.t6.a, test.t5.a)]", + "│ ├─IndexReader(Build) 9990.00 root index:IndexFullScan", + "│ │ └─IndexFullScan 9990.00 cop[tikv] table:t5, index:a(a) keep order:false, stats:pseudo", + "│ └─HashJoin(Probe) 12475.01 root inner join, equal:[eq(test.t6.b, test.t7.b)]", + "│ ├─TableReader(Build) 9980.01 root data:Selection", + "│ │ └─Selection 9980.01 cop[tikv] not(isnull(test.t6.a)), not(isnull(test.t6.b))", + "│ │ └─TableFullScan 10000.00 cop[tikv] table:t6 keep order:false, stats:pseudo", + "│ └─TableReader(Probe) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t7.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t7 keep order:false, stats:pseudo", + "└─HashJoin(Probe) 19511.72 root left outer join, equal:[eq(test.t4.a, test.t1.a)]", + " ├─HashJoin(Build) 12487.50 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " │ ├─TableReader(Build) 9990.00 root data:Selection", + " │ │ └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " │ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─TableReader(Probe) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t1.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12487.50 root left outer join, equal:[eq(test.t8.a, test.t4.a)]", + " ├─TableReader(Build) 9990.00 root data:Selection", + " │ └─Selection 9990.00 cop[tikv] not(isnull(test.t4.a))", + " │ └─TableFullScan 10000.00 cop[tikv] table:t4 keep order:false, stats:pseudo", + " └─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t8 keep order:false, stats:pseudo" + ], + "Warning": [ + "Warning 1815 We can only use one leading hint at most, when multiple leading hints are used, all leading hints will be invalid" + ] + } + ] } ] diff --git a/planner/core/testdata/window_push_down_suite_out.json b/planner/core/testdata/window_push_down_suite_out.json index 2b7b7b893cda4..085d1326f3daa 100644 --- a/planner/core/testdata/window_push_down_suite_out.json +++ b/planner/core/testdata/window_push_down_suite_out.json @@ -37,10 +37,10 @@ "Plan": [ "TableReader_24 10000.00 root data:ExchangeSender_23", "└─ExchangeSender_23 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Window_22 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid rows between current row and current row)", - " └─Sort_13 10000.00 mpp[tiflash] test.employee.deptid", - " └─ExchangeReceiver_12 10000.00 mpp[tiflash] ", - " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Window_22 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid rows between current row and current row), stream_count: 8", + " └─Sort_13 10000.00 mpp[tiflash] test.employee.deptid, stream_count: 8", + " └─ExchangeReceiver_12 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─TableFullScan_10 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": null @@ -50,11 +50,11 @@ "Plan": [ "TableReader_30 10000.00 root data:ExchangeSender_29", "└─ExchangeSender_29 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection_7 10000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#7", - " └─Window_28 10000.00 mpp[tiflash] row_number()->Column#7 over(partition by Column#6 rows between current row and current row)", - " └─Sort_14 10000.00 mpp[tiflash] Column#6", - " └─ExchangeReceiver_13 10000.00 mpp[tiflash] ", - " └─ExchangeSender_12 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: Column#6, collate: binary]", + " └─Projection_7 10000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#7, stream_count: 8", + " └─Window_28 10000.00 mpp[tiflash] row_number()->Column#7 over(partition by Column#6 rows between current row and current row), stream_count: 8", + " └─Sort_14 10000.00 mpp[tiflash] Column#6, stream_count: 8", + " └─ExchangeReceiver_13 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_12 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: Column#6, collate: binary], stream_count: 8", " └─Projection_10 10000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, plus(test.employee.deptid, 1)->Column#6", " └─TableFullScan_11 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], @@ -65,10 +65,10 @@ "Plan": [ "TableReader_24 10000.00 root data:ExchangeSender_23", "└─ExchangeSender_23 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Window_22 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid order by test.employee.salary desc rows between current row and current row)", - " └─Sort_13 10000.00 mpp[tiflash] test.employee.deptid, test.employee.salary:desc", - " └─ExchangeReceiver_12 10000.00 mpp[tiflash] ", - " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Window_22 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid order by test.employee.salary desc rows between current row and current row), stream_count: 8", + " └─Sort_13 10000.00 mpp[tiflash] test.employee.deptid, test.employee.salary:desc, stream_count: 8", + " └─ExchangeReceiver_12 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─TableFullScan_10 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": null @@ -78,10 +78,10 @@ "Plan": [ "TableReader_24 10000.00 root data:ExchangeSender_23", "└─ExchangeSender_23 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Window_22 10000.00 mpp[tiflash] rank()->Column#7, dense_rank()->Column#8 over(partition by test.employee.deptid)", - " └─Sort_13 10000.00 mpp[tiflash] test.employee.deptid", - " └─ExchangeReceiver_12 10000.00 mpp[tiflash] ", - " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Window_22 10000.00 mpp[tiflash] rank()->Column#7, dense_rank()->Column#8 over(partition by test.employee.deptid), stream_count: 8", + " └─Sort_13 10000.00 mpp[tiflash] test.employee.deptid, stream_count: 8", + " └─ExchangeReceiver_12 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─TableFullScan_10 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": null @@ -91,12 +91,12 @@ "Plan": [ "TableReader_36 10000.00 root data:ExchangeSender_35", "└─ExchangeSender_35 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection_9 10000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#8, Column#7", - " └─Window_34 10000.00 mpp[tiflash] row_number()->Column#8 over(partition by test.employee.deptid rows between current row and current row)", - " └─Window_12 10000.00 mpp[tiflash] rank()->Column#7 over(partition by test.employee.deptid)", - " └─Sort_17 10000.00 mpp[tiflash] test.employee.deptid", - " └─ExchangeReceiver_16 10000.00 mpp[tiflash] ", - " └─ExchangeSender_15 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Projection_9 10000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#8, Column#7, stream_count: 8", + " └─Window_34 10000.00 mpp[tiflash] row_number()->Column#8 over(partition by test.employee.deptid rows between current row and current row), stream_count: 8", + " └─Window_12 10000.00 mpp[tiflash] rank()->Column#7 over(partition by test.employee.deptid), stream_count: 8", + " └─Sort_17 10000.00 mpp[tiflash] test.employee.deptid, stream_count: 8", + " └─ExchangeReceiver_16 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_15 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─TableFullScan_14 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": null @@ -119,10 +119,10 @@ "Plan": [ "TableReader_36 10000.00 root data:ExchangeSender_35", "└─ExchangeSender_35 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Window_34 10000.00 mpp[tiflash] rank()->Column#8 over(partition by test.employee.deptid)", - " └─Sort_20 10000.00 mpp[tiflash] test.employee.deptid", - " └─ExchangeReceiver_19 10000.00 mpp[tiflash] ", - " └─ExchangeSender_18 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Window_34 10000.00 mpp[tiflash] rank()->Column#8 over(partition by test.employee.deptid), stream_count: 8", + " └─Sort_20 10000.00 mpp[tiflash] test.employee.deptid, stream_count: 8", + " └─ExchangeReceiver_19 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_18 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─Window_14 10000.00 mpp[tiflash] row_number()->Column#6 over(rows between current row and current row)", " └─ExchangeReceiver_17 10000.00 mpp[tiflash] ", " └─ExchangeSender_16 10000.00 mpp[tiflash] ExchangeType: PassThrough", @@ -285,10 +285,10 @@ "Plan": [ "TableReader_24 10000.00 root data:ExchangeSender_23", "└─ExchangeSender_23 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Window_22 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.empid order by test.employee.salary rows between current row and current row)", - " └─Sort_13 10000.00 mpp[tiflash] test.employee.empid, test.employee.salary", - " └─ExchangeReceiver_12 10000.00 mpp[tiflash] ", - " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.empid, collate: binary]", + " └─Window_22 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.empid order by test.employee.salary rows between current row and current row), stream_count: 8", + " └─Sort_13 10000.00 mpp[tiflash] test.employee.empid, test.employee.salary, stream_count: 8", + " └─ExchangeReceiver_12 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.empid, collate: binary], stream_count: 8", " └─TableFullScan_10 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": [ @@ -353,10 +353,10 @@ "Plan": [ "TableReader_45 1.00 root data:ExchangeSender_44", "└─ExchangeSender_44 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Window_43 1.00 mpp[tiflash] row_number()->Column#7 over(partition by Column#5 rows between current row and current row)", - " └─Sort_20 1.00 mpp[tiflash] Column#5", - " └─ExchangeReceiver_19 1.00 mpp[tiflash] ", - " └─ExchangeSender_18 1.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: Column#5, collate: binary]", + " └─Window_43 1.00 mpp[tiflash] row_number()->Column#7 over(partition by Column#5 rows between current row and current row), stream_count: 8", + " └─Sort_20 1.00 mpp[tiflash] Column#5, stream_count: 8", + " └─ExchangeReceiver_19 1.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_18 1.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: Column#5, collate: binary], stream_count: 8", " └─Projection_14 1.00 mpp[tiflash] Column#5", " └─HashAgg_15 1.00 mpp[tiflash] funcs:count(distinct test.employee.empid)->Column#5", " └─ExchangeReceiver_17 1.00 mpp[tiflash] ", @@ -405,10 +405,10 @@ " └─ExchangeReceiver_43 1.00 mpp[tiflash] ", " └─ExchangeSender_42 1.00 mpp[tiflash] ExchangeType: PassThrough", " └─HashAgg_39 1.00 mpp[tiflash] group by:test.employee.empid, ", - " └─Window_27 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid rows between current row and current row)", - " └─Sort_18 10000.00 mpp[tiflash] test.employee.deptid", - " └─ExchangeReceiver_17 10000.00 mpp[tiflash] ", - " └─ExchangeSender_16 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Window_27 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid rows between current row and current row), stream_count: 8", + " └─Sort_18 10000.00 mpp[tiflash] test.employee.deptid, stream_count: 8", + " └─ExchangeReceiver_17 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_16 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─TableFullScan_15 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": null @@ -436,10 +436,10 @@ " └─HashAgg_46 10000.00 mpp[tiflash] group by:Column#6, funcs:count(test.employee.empid)->Column#7", " └─ExchangeReceiver_32 10000.00 mpp[tiflash] ", " └─ExchangeSender_31 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: Column#6, collate: binary]", - " └─Window_30 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid rows between current row and current row)", - " └─Sort_21 10000.00 mpp[tiflash] test.employee.deptid", - " └─ExchangeReceiver_20 10000.00 mpp[tiflash] ", - " └─ExchangeSender_19 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Window_30 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid rows between current row and current row), stream_count: 8", + " └─Sort_21 10000.00 mpp[tiflash] test.employee.deptid, stream_count: 8", + " └─ExchangeReceiver_20 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_19 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─TableFullScan_18 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": null diff --git a/planner/funcdep/BUILD.bazel b/planner/funcdep/BUILD.bazel index 0687c1177e8a9..2a0c09b58a244 100644 --- a/planner/funcdep/BUILD.bazel +++ b/planner/funcdep/BUILD.bazel @@ -17,6 +17,7 @@ go_library( go_test( name = "funcdep_test", + timeout = "short", srcs = [ "extract_fd_test.go", "fast_int_set_bench_test.go", diff --git a/planner/funcdep/extract_fd_test.go b/planner/funcdep/extract_fd_test.go index b5bb646cba073..ba4a4d12b5436 100644 --- a/planner/funcdep/extract_fd_test.go +++ b/planner/funcdep/extract_fd_test.go @@ -214,7 +214,7 @@ func TestFDSet_ExtractFD(t *testing.T) { for i, tt := range tests { comment := fmt.Sprintf("case:%v sql:%s", i, tt.sql) require.NoError(t, tk.Session().PrepareTxnCtx(context.TODO())) - require.NoError(t, sessiontxn.GetTxnManager(tk.Session()).OnStmtStart(context.TODO())) + require.NoError(t, sessiontxn.GetTxnManager(tk.Session()).OnStmtStart(context.TODO(), nil)) stmt, err := par.ParseOneStmt(tt.sql, "", "") require.NoError(t, err, comment) tk.Session().GetSessionVars().PlanID = 0 @@ -255,21 +255,21 @@ func TestFDSet_ExtractFDForApply(t *testing.T) { }{ { sql: "select * from X where exists (select * from Y where m=a limit 1)", - // For this Apply, it's essentially a semi join, for every `a` in X, do the inner loop once. + // For this query, it's essentially a semi join, for every `a` in X, do the inner loop once. // +- datasource(x) - // +- limit + // +- where // +- datasource(Y) - best: "Apply{DataScan(X)->DataScan(Y)->Limit}->Projection", + best: "Join{DataScan(X)->DataScan(Y)}(test.x.a,test.y.m)->Projection", // Since semi join will keep the **all** rows of the outer table, it's FD can be derived. fd: "{(1)-->(2-5), (2,3)~~>(1,4,5)} >>> {(1)-->(2-5), (2,3)~~>(1,4,5)}", }, { sql: "select a, b from X where exists (select * from Y where m=a limit 1)", - // For this Apply, it's essentially a semi join, for every `a` in X, do the inner loop once. + // For this query, it's essentially a semi join, for every `a` in X, do the inner loop once. // +- datasource(x) - // +- limit + // +- where // +- datasource(Y) - best: "Apply{DataScan(X)->DataScan(Y)->Limit}->Projection", // semi join + best: "Join{DataScan(X)->DataScan(Y)}(test.x.a,test.y.m)->Projection", // semi join // Since semi join will keep the **part** rows of the outer table, it's FD can be derived. fd: "{(1)-->(2-5), (2,3)~~>(1,4,5)} >>> {(1)-->(2)}", }, @@ -312,7 +312,7 @@ func TestFDSet_ExtractFDForApply(t *testing.T) { is := testGetIS(t, tk.Session()) for i, tt := range tests { require.NoError(t, tk.Session().PrepareTxnCtx(context.TODO())) - require.NoError(t, sessiontxn.GetTxnManager(tk.Session()).OnStmtStart(context.TODO())) + require.NoError(t, sessiontxn.GetTxnManager(tk.Session()).OnStmtStart(context.TODO(), nil)) comment := fmt.Sprintf("case:%v sql:%s", i, tt.sql) stmt, err := par.ParseOneStmt(tt.sql, "", "") require.NoError(t, err, comment) diff --git a/planner/implementation/BUILD.bazel b/planner/implementation/BUILD.bazel index a7fd495a31e3f..ec042cb40b382 100644 --- a/planner/implementation/BUILD.bazel +++ b/planner/implementation/BUILD.bazel @@ -23,6 +23,7 @@ go_library( go_test( name = "implementation_test", + timeout = "short", srcs = [ "base_test.go", "main_test.go", diff --git a/planner/memo/BUILD.bazel b/planner/memo/BUILD.bazel index 5e6fb48a5f06d..7886691fd3b9c 100644 --- a/planner/memo/BUILD.bazel +++ b/planner/memo/BUILD.bazel @@ -20,6 +20,7 @@ go_library( go_test( name = "memo_test", + timeout = "short", srcs = [ "expr_iterator_test.go", "group_expr_test.go", diff --git a/planner/util/BUILD.bazel b/planner/util/BUILD.bazel index 85359c0e11cef..78f443d4d1fa9 100644 --- a/planner/util/BUILD.bazel +++ b/planner/util/BUILD.bazel @@ -22,6 +22,7 @@ go_library( go_test( name = "util_test", + timeout = "short", srcs = [ "main_test.go", "path_test.go", diff --git a/plugin/BUILD.bazel b/plugin/BUILD.bazel index 85e61355b78fd..1a8623dc4eea9 100644 --- a/plugin/BUILD.bazel +++ b/plugin/BUILD.bazel @@ -37,6 +37,7 @@ go_test( ], embed = [":plugin"], deps = [ + "//kv", "//parser/mysql", "//server", "//session", diff --git a/plugin/integration_test.go b/plugin/integration_test.go index 1c62fa440da6d..84cb8d2076a24 100644 --- a/plugin/integration_test.go +++ b/plugin/integration_test.go @@ -21,6 +21,7 @@ import ( "strings" "testing" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/plugin" "github.com/pingcap/tidb/server" @@ -36,7 +37,7 @@ func TestAuditLogNormal(t *testing.T) { defer clean() sv := server.CreateMockServer(t, store) defer sv.Close() - conn := server.CreateMockConn(t, store, sv) + conn := server.CreateMockConn(t, sv) defer conn.Close() session.DisableStats4Test() session.SetSchemaLease(0) @@ -698,7 +699,8 @@ func TestAuditLogNormal(t *testing.T) { testResults = testResults[:0] errMsg := fmt.Sprintf("statement: %s", test.sql) query := append([]byte{mysql.ComQuery}, []byte(test.sql)...) - err := conn.Dispatch(context.Background(), query) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) + err := conn.Dispatch(ctx, query) require.NoError(t, err, errMsg) resultCount := test.resCnt if resultCount == 0 { diff --git a/privilege/privileges/BUILD.bazel b/privilege/privileges/BUILD.bazel index fc3084c3ff602..462e8950e5076 100644 --- a/privilege/privileges/BUILD.bazel +++ b/privilege/privileges/BUILD.bazel @@ -12,6 +12,7 @@ go_library( deps = [ "//errno", "//infoschema", + "//kv", "//parser/ast", "//parser/auth", "//parser/mysql", @@ -29,12 +30,14 @@ go_library( "//util/sqlexec", "//util/stringutil", "@com_github_pingcap_errors//:errors", + "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", ], ) go_test( name = "privileges_test", + timeout = "short", srcs = [ "cache_test.go", "main_test.go", diff --git a/privilege/privileges/cache.go b/privilege/privileges/cache.go index d1e85b3d45091..d18a882cdca2c 100644 --- a/privilege/privileges/cache.go +++ b/privilege/privileges/cache.go @@ -26,6 +26,7 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/mysql" @@ -41,6 +42,7 @@ import ( "github.com/pingcap/tidb/util/sqlexec" "github.com/pingcap/tidb/util/stringutil" "go.uber.org/zap" + "golang.org/x/exp/slices" ) var ( @@ -566,7 +568,7 @@ func (p *MySQLPrivilege) LoadDefaultRoles(ctx sessionctx.Context) error { func (p *MySQLPrivilege) loadTable(sctx sessionctx.Context, sql string, decodeTableRow func(chunk.Row, []*ast.ResultField) error) error { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) rs, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) if err != nil { return errors.Trace(err) @@ -1257,7 +1259,7 @@ func (p *MySQLPrivilege) showGrants(user, host string, roles []*auth.RoleIdentit gs = append(gs, s) } } - sort.Strings(gs[sortFromIdx:]) + slices.Sort(gs[sortFromIdx:]) // Show table scope grants. sortFromIdx = len(gs) @@ -1291,7 +1293,7 @@ func (p *MySQLPrivilege) showGrants(user, host string, roles []*auth.RoleIdentit gs = append(gs, s) } } - sort.Strings(gs[sortFromIdx:]) + slices.Sort(gs[sortFromIdx:]) // Show column scope grants, column and table are combined. // A map of "DB.Table" => Priv(col1, col2 ...) @@ -1310,7 +1312,7 @@ func (p *MySQLPrivilege) showGrants(user, host string, roles []*auth.RoleIdentit s := fmt.Sprintf(`GRANT %s ON %s TO '%s'@'%s'`, privCols, k, user, host) gs = append(gs, s) } - sort.Strings(gs[sortFromIdx:]) + slices.Sort(gs[sortFromIdx:]) // Show role grants. graphKey := user + "@" + host @@ -1324,7 +1326,7 @@ func (p *MySQLPrivilege) showGrants(user, host string, roles []*auth.RoleIdentit tmp := fmt.Sprintf("'%s'@'%s'", roleName, roleHost) sortedRes = append(sortedRes, tmp) } - sort.Strings(sortedRes) + slices.Sort(sortedRes) for i, r := range sortedRes { g += r if i != len(sortedRes)-1 { @@ -1370,12 +1372,12 @@ func (p *MySQLPrivilege) showGrants(user, host string, roles []*auth.RoleIdentit // Merge the DYNAMIC privs into a line for non-grantable and then grantable. if len(dynamicPrivs) > 0 { - sort.Strings(dynamicPrivs) + slices.Sort(dynamicPrivs) s := fmt.Sprintf("GRANT %s ON *.* TO '%s'@'%s'", strings.Join(dynamicPrivs, ","), user, host) gs = append(gs, s) } if len(grantableDynamicPrivs) > 0 { - sort.Strings(grantableDynamicPrivs) + slices.Sort(grantableDynamicPrivs) s := fmt.Sprintf("GRANT %s ON *.* TO '%s'@'%s' WITH GRANT OPTION", strings.Join(grantableDynamicPrivs, ","), user, host) gs = append(gs, s) } diff --git a/privilege/privileges/privileges_test.go b/privilege/privileges/privileges_test.go index c7d465c52006e..9b159309c0795 100644 --- a/privilege/privileges/privileges_test.go +++ b/privilege/privileges/privileges_test.go @@ -1415,7 +1415,8 @@ func TestMetricsSchema(t *testing.T) { Hostname: "localhost", }, nil, nil) - rs, err := tk.Session().ExecuteInternal(context.Background(), test.stmt) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) + rs, err := tk.Session().ExecuteInternal(ctx, test.stmt) if err == nil { _, err = session.GetRows4Test(context.Background(), tk.Session(), rs) } @@ -1891,33 +1892,34 @@ func TestSecurityEnhancedLocalBackupRestore(t *testing.T) { Hostname: "localhost", }, nil, nil) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) // Prior to SEM nolocal has permission, the error should be because backup requires tikv - _, err := tk.Session().ExecuteInternal(context.Background(), "BACKUP DATABASE * TO 'Local:///tmp/test';") + _, err := tk.Session().ExecuteInternal(ctx, "BACKUP DATABASE * TO 'Local:///tmp/test';") require.EqualError(t, err, "BACKUP requires tikv store, not unistore") - _, err = tk.Session().ExecuteInternal(context.Background(), "RESTORE DATABASE * FROM 'LOCAl:///tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "RESTORE DATABASE * FROM 'LOCAl:///tmp/test';") require.EqualError(t, err, "RESTORE requires tikv store, not unistore") sem.Enable() defer sem.Disable() // With SEM enabled nolocal does not have permission, but yeslocal does. - _, err = tk.Session().ExecuteInternal(context.Background(), "BACKUP DATABASE * TO 'local:///tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "BACKUP DATABASE * TO 'local:///tmp/test';") require.EqualError(t, err, "[planner:8132]Feature 'local storage' is not supported when security enhanced mode is enabled") - _, err = tk.Session().ExecuteInternal(context.Background(), "BACKUP DATABASE * TO 'file:///tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "BACKUP DATABASE * TO 'file:///tmp/test';") require.EqualError(t, err, "[planner:8132]Feature 'local storage' is not supported when security enhanced mode is enabled") - _, err = tk.Session().ExecuteInternal(context.Background(), "BACKUP DATABASE * TO '/tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "BACKUP DATABASE * TO '/tmp/test';") require.EqualError(t, err, "[planner:8132]Feature 'local storage' is not supported when security enhanced mode is enabled") - _, err = tk.Session().ExecuteInternal(context.Background(), "RESTORE DATABASE * FROM 'LOCAl:///tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "RESTORE DATABASE * FROM 'LOCAl:///tmp/test';") require.EqualError(t, err, "[planner:8132]Feature 'local storage' is not supported when security enhanced mode is enabled") - _, err = tk.Session().ExecuteInternal(context.Background(), "BACKUP DATABASE * TO 'hdfs:///tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "BACKUP DATABASE * TO 'hdfs:///tmp/test';") require.EqualError(t, err, "[planner:8132]Feature 'hdfs storage' is not supported when security enhanced mode is enabled") - _, err = tk.Session().ExecuteInternal(context.Background(), "RESTORE DATABASE * FROM 'HDFS:///tmp/test';") + _, err = tk.Session().ExecuteInternal(ctx, "RESTORE DATABASE * FROM 'HDFS:///tmp/test';") require.EqualError(t, err, "[planner:8132]Feature 'hdfs storage' is not supported when security enhanced mode is enabled") } diff --git a/server/BUILD.bazel b/server/BUILD.bazel index 7a8b99826ef04..1093d540550e8 100644 --- a/server/BUILD.bazel +++ b/server/BUILD.bazel @@ -50,6 +50,7 @@ go_library( "//session/txninfo", "//sessionctx", "//sessionctx/binloginfo", + "//sessionctx/sessionstates", "//sessionctx/stmtctx", "//sessionctx/variable", "//sessiontxn", @@ -115,6 +116,7 @@ go_library( go_test( name = "server_test", + timeout = "short", srcs = [ "column_test.go", "conn_stmt_test.go", diff --git a/server/conn_test.go b/server/conn_test.go index 0307e6a76bae3..f9661226ae1c3 100644 --- a/server/conn_test.go +++ b/server/conn_test.go @@ -686,14 +686,15 @@ func TestConnExecutionTimeout(t *testing.T) { // There is no underlying netCon, use failpoint to avoid panic require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/server/FakeClientConn", "return(1)")) - - se, err := session.CreateSession4Test(store) - require.NoError(t, err) + defer func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/server/FakeClientConn")) + }() + tk := testkit.NewTestKit(t, store) connID := uint64(1) - se.SetConnectionID(connID) + tk.Session().SetConnectionID(connID) tc := &TiDBContext{ - Session: se, + Session: tk.Session(), stmts: make(map[int]*TiDBStatement), } cc := &clientConn{ @@ -714,46 +715,32 @@ func TestConnExecutionTimeout(t *testing.T) { handle := dom.ExpensiveQueryHandle().SetSessionManager(srv) go handle.Run() - _, err = se.Execute(context.Background(), "use test;") - require.NoError(t, err) - _, err = se.Execute(context.Background(), "CREATE TABLE testTable2 (id bigint PRIMARY KEY, age int)") - require.NoError(t, err) + tk.MustExec("use test;") + tk.MustExec("CREATE TABLE testTable2 (id bigint PRIMARY KEY, age int)") for i := 0; i < 10; i++ { str := fmt.Sprintf("insert into testTable2 values(%d, %d)", i, i%80) - _, err = se.Execute(context.Background(), str) - require.NoError(t, err) + tk.MustExec(str) } - _, err = se.Execute(context.Background(), "select SLEEP(1);") - require.NoError(t, err) - - _, err = se.Execute(context.Background(), "set @@max_execution_time = 500;") - require.NoError(t, err) - - err = cc.handleQuery(context.Background(), "select * FROM testTable2 WHERE SLEEP(1);") - require.NoError(t, err) + tk.MustExec("select SLEEP(1);") + tk.MustExec("set @@max_execution_time = 500;") + tk.MustQuery("select * FROM testTable2 WHERE SLEEP(1);") + tk.MustExec("set @@max_execution_time = 1500;") + tk.MustExec("set @@tidb_expensive_query_time_threshold = 1;") - _, err = se.Execute(context.Background(), "set @@max_execution_time = 1500;") + records, err := tk.Exec("select SLEEP(2);") require.NoError(t, err) + tk1 := testkit.NewTestKit(t, store) + tk1.ResultSetToResult(records, fmt.Sprintf("%v", records)).Check(testkit.Rows("1")) + require.NoError(t, records.Close()) - _, err = se.Execute(context.Background(), "set @@tidb_expensive_query_time_threshold = 1;") - require.NoError(t, err) - - records, err := se.Execute(context.Background(), "select SLEEP(2);") - require.NoError(t, err) - tk := testkit.NewTestKit(t, store) - tk.ResultSetToResult(records[0], fmt.Sprintf("%v", records[0])).Check(testkit.Rows("1")) - - _, err = se.Execute(context.Background(), "set @@max_execution_time = 0;") - require.NoError(t, err) + tk.MustExec("set @@max_execution_time = 0;") err = cc.handleQuery(context.Background(), "select * FROM testTable2 WHERE SLEEP(1);") require.NoError(t, err) err = cc.handleQuery(context.Background(), "select /*+ MAX_EXECUTION_TIME(100)*/ * FROM testTable2 WHERE SLEEP(1);") require.NoError(t, err) - - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/server/FakeClientConn")) } func TestShutDown(t *testing.T) { diff --git a/server/driver.go b/server/driver.go index 0363758e3e47b..ae996715113ae 100644 --- a/server/driver.go +++ b/server/driver.go @@ -72,6 +72,8 @@ type ResultSet interface { StoreFetchedRows(rows []chunk.Row) GetFetchedRows() []chunk.Row Close() error + // IsClosed checks whether the result set is closed. + IsClosed() bool } // fetchNotifier represents notifier will be called in COM_FETCH. diff --git a/server/driver_tidb.go b/server/driver_tidb.go index 4fd75ba43b01a..bd96059a30ca4 100644 --- a/server/driver_tidb.go +++ b/server/driver_tidb.go @@ -17,6 +17,8 @@ package server import ( "context" "crypto/tls" + "fmt" + "strings" "sync/atomic" "github.com/pingcap/errors" @@ -27,6 +29,8 @@ import ( "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/sessionstates" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" @@ -50,8 +54,7 @@ func NewTiDBDriver(store kv.Storage) *TiDBDriver { // TiDBContext implements QueryCtx. type TiDBContext struct { session.Session - currentDB string - stmts map[int]*TiDBStatement + stmts map[int]*TiDBStatement } // TiDBStatement implements PreparedStatement. @@ -199,10 +202,10 @@ func (qd *TiDBDriver) OpenCtx(connID uint64, capability uint32, collation uint8, se.SetClientCapability(capability) se.SetConnectionID(connID) tc := &TiDBContext{ - Session: se, - currentDB: dbname, - stmts: make(map[int]*TiDBStatement), + Session: se, + stmts: make(map[int]*TiDBStatement), } + se.SetSessionStatesHandler(sessionstates.StatePrepareStmt, tc) return tc, nil } @@ -211,11 +214,6 @@ func (tc *TiDBContext) GetWarnings() []stmtctx.SQLWarn { return tc.GetSessionVars().StmtCtx.GetWarnings() } -// CurrentDB implements QueryCtx CurrentDB method. -func (tc *TiDBContext) CurrentDB() string { - return tc.currentDB -} - // WarningCount implements QueryCtx WarningCount method. func (tc *TiDBContext) WarningCount() uint16 { return tc.GetSessionVars().StmtCtx.WarningCount() @@ -308,6 +306,89 @@ func (tc *TiDBContext) GetStmtStats() *stmtstats.StatementStats { return tc.Session.GetStmtStats() } +// EncodeSessionStates implements SessionStatesHandler.EncodeSessionStates interface. +func (tc *TiDBContext) EncodeSessionStates(ctx context.Context, sctx sessionctx.Context, sessionStates *sessionstates.SessionStates) error { + sessionVars := tc.Session.GetSessionVars() + sessionStates.PreparedStmts = make(map[uint32]*sessionstates.PreparedStmtInfo, len(sessionVars.PreparedStmts)) + for preparedID, preparedObj := range sessionVars.PreparedStmts { + preparedStmt, ok := preparedObj.(*core.CachedPrepareStmt) + if !ok { + return errors.Errorf("invalid CachedPreparedStmt type") + } + sessionStates.PreparedStmts[preparedID] = &sessionstates.PreparedStmtInfo{ + StmtText: preparedStmt.StmtText, + StmtDB: preparedStmt.StmtDB, + } + } + for name, id := range sessionVars.PreparedStmtNameToID { + // Only text protocol statements have names. + if preparedStmtInfo, ok := sessionStates.PreparedStmts[id]; ok { + preparedStmtInfo.Name = name + } + } + for id, stmt := range tc.stmts { + // Only binary protocol statements have paramTypes. + preparedStmtInfo, ok := sessionStates.PreparedStmts[uint32(id)] + if !ok { + return errors.Errorf("prepared statement %d not found", id) + } + // Bound params are sent by CMD_STMT_SEND_LONG_DATA, the proxy can wait for COM_STMT_EXECUTE. + for _, boundParam := range stmt.BoundParams() { + if boundParam != nil { + return session.ErrCannotMigrateSession.GenWithStackByArgs("prepared statements have bound params") + } + } + if rs := stmt.GetResultSet(); rs != nil && !rs.IsClosed() { + return session.ErrCannotMigrateSession.GenWithStackByArgs("prepared statements have open result sets") + } + preparedStmtInfo.ParamTypes = stmt.GetParamsType() + } + return nil +} + +// DecodeSessionStates implements SessionStatesHandler.DecodeSessionStates interface. +func (tc *TiDBContext) DecodeSessionStates(ctx context.Context, sctx sessionctx.Context, sessionStates *sessionstates.SessionStates) error { + if len(sessionStates.PreparedStmts) == 0 { + return nil + } + sessionVars := tc.Session.GetSessionVars() + savedPreparedStmtID := sessionVars.GetNextPreparedStmtID() + savedCurrentDB := sessionVars.CurrentDB + defer func() { + sessionVars.SetNextPreparedStmtID(savedPreparedStmtID - 1) + sessionVars.CurrentDB = savedCurrentDB + }() + + for id, preparedStmtInfo := range sessionStates.PreparedStmts { + // Set the next id and currentDB manually. + sessionVars.SetNextPreparedStmtID(id - 1) + sessionVars.CurrentDB = preparedStmtInfo.StmtDB + if preparedStmtInfo.Name == "" { + // Binary protocol: add to sessionVars.PreparedStmts and TiDBContext.stmts. + stmt, _, _, err := tc.Prepare(preparedStmtInfo.StmtText) + if err != nil { + return err + } + // Only binary protocol uses paramsType, which is passed from the first COM_STMT_EXECUTE. + stmt.SetParamsType(preparedStmtInfo.ParamTypes) + } else { + // Text protocol: add to sessionVars.PreparedStmts and sessionVars.PreparedStmtNameToID. + stmtText := strings.ReplaceAll(preparedStmtInfo.StmtText, "\\", "\\\\") + stmtText = strings.ReplaceAll(stmtText, "'", "\\'") + // Add single quotes because the sql_mode might contain ANSI_QUOTES. + sql := fmt.Sprintf("PREPARE `%s` FROM '%s'", preparedStmtInfo.Name, stmtText) + stmts, err := tc.Parse(ctx, sql) + if err != nil { + return err + } + if _, err = tc.ExecuteStmt(ctx, stmts[0]); err != nil { + return err + } + } + } + return nil +} + type tidbResultSet struct { recordSet sqlexec.RecordSet columns []*ColumnInfo @@ -344,6 +425,11 @@ func (trs *tidbResultSet) Close() error { return err } +// IsClosed implements ResultSet.IsClosed interface. +func (trs *tidbResultSet) IsClosed() bool { + return atomic.LoadInt32(&trs.closed) == 1 +} + // OnFetchReturned implements fetchNotifier#OnFetchReturned func (trs *tidbResultSet) OnFetchReturned() { if cl, ok := trs.recordSet.(fetchNotifier); ok { diff --git a/server/http_handler.go b/server/http_handler.go index 5db67bcd3be0c..331f0206ef6dd 100644 --- a/server/http_handler.go +++ b/server/http_handler.go @@ -1001,7 +1001,7 @@ func getSchemaTablesStorageInfo(h *schemaStorageHandler, schema *model.CIStr, ta } defer s.Close() - ctx := s.(sessionctx.Context) + sctx := s.(sessionctx.Context) condition := make([]string, 0) params := make([]interface{}, 0) @@ -1016,17 +1016,19 @@ func getSchemaTablesStorageInfo(h *schemaStorageHandler, schema *model.CIStr, ta sql := `select TABLE_SCHEMA,TABLE_NAME,TABLE_ROWS,AVG_ROW_LENGTH,DATA_LENGTH,MAX_DATA_LENGTH,INDEX_LENGTH,DATA_FREE from INFORMATION_SCHEMA.TABLES` if len(condition) > 0 { + //nolint: gosec sql += ` WHERE ` + strings.Join(condition, ` AND `) } var results sqlexec.RecordSet - if results, err = ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql, params...); err != nil { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) + if results, err = sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql, params...); err != nil { logutil.BgLogger().Error(`ExecuteInternal`, zap.Error(err)) } else if results != nil { messages = make([]*schemaTableStorage, 0) defer terror.Call(results.Close) for { req := results.NewChunk(nil) - if err = results.Next(context.TODO(), req); err != nil { + if err = results.Next(ctx, req); err != nil { break } diff --git a/server/mock_conn.go b/server/mock_conn.go index 7ecb304426aa8..4ef086b01d3b7 100644 --- a/server/mock_conn.go +++ b/server/mock_conn.go @@ -23,8 +23,7 @@ import ( "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/session" + tmysql "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/util/arena" "github.com/pingcap/tidb/util/chunk" "github.com/stretchr/testify/require" @@ -85,18 +84,14 @@ func CreateMockServer(t *testing.T, store kv.Storage) *Server { } // CreateMockConn creates a mock connection together with a session. -func CreateMockConn(t *testing.T, store kv.Storage, server *Server) MockConn { - se, err := session.CreateSession4Test(store) +func CreateMockConn(t *testing.T, server *Server) MockConn { + tc, err := server.driver.OpenCtx(uint64(0), 0, uint8(tmysql.DefaultCollationID), "", nil) require.NoError(t, err) - tc := &TiDBContext{ - Session: se, - stmts: make(map[int]*TiDBStatement), - } cc := &clientConn{ server: server, salt: []byte{}, - collation: mysql.DefaultCollationID, + collation: tmysql.DefaultCollationID, alloc: arena.NewAllocator(1024), chunkAlloc: chunk.NewAllocator(), pkt: &packetIO{ diff --git a/server/mock_conn_test.go b/server/mock_conn_test.go index 8bcadd0da1c06..576dcf5959ee2 100644 --- a/server/mock_conn_test.go +++ b/server/mock_conn_test.go @@ -28,7 +28,7 @@ func TestMockConn(t *testing.T) { defer clean() server := CreateMockServer(t, store) defer server.Close() - conn := CreateMockConn(t, store, server) + conn := CreateMockConn(t, server) defer conn.Close() require.NoError(t, conn.HandleQuery(context.Background(), "select 1")) diff --git a/server/plan_replayer.go b/server/plan_replayer.go index b783fad87f66f..39938e867f32e 100644 --- a/server/plan_replayer.go +++ b/server/plan_replayer.go @@ -81,6 +81,7 @@ func handleDownloadFile(handler downloadFileHandler, w http.ResponseWriter, req return } if exist { + //nolint: gosec file, err := os.Open(path) if err != nil { writeError(w, err) diff --git a/server/server.go b/server/server.go index 13ed052391f59..dde54399a7cd9 100644 --- a/server/server.go +++ b/server/server.go @@ -36,8 +36,7 @@ import ( "io" "math/rand" "net" - "net/http" - + "net/http" //nolint:goimports // For pprof _ "net/http/pprof" // #nosec G108 "os" @@ -50,6 +49,7 @@ import ( "github.com/blacktear23/go-proxyprotocol" "github.com/pingcap/errors" "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/kv" @@ -718,6 +718,25 @@ func killConn(conn *clientConn) { conn.mu.RLock() cancelFunc := conn.mu.cancelFunc conn.mu.RUnlock() + + // If the connection being killed is a DDL Job, + // we need to CANCEL the matching jobID first. + if sessVars.StmtCtx.IsDDLJobInQueue { + jobID := sessVars.StmtCtx.DDLJobID + err := kv.RunInNewTxn(context.Background(), conn.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + // errs is the error per job, there is only one submitted + // err is the error of the overall task + errs, err := ddl.CancelJobs(txn, []int64{jobID}) + if len(errs) > 0 { + logutil.BgLogger().Warn("error canceling DDL job", zap.Error(errs[0])) + } + return err + }) + if err != nil { + logutil.BgLogger().Warn("could not cancel DDL job", zap.Error(err)) + } + } + if cancelFunc != nil { cancelFunc() } diff --git a/session/BUILD.bazel b/session/BUILD.bazel index a48f8238cc5ca..b75f26dc03634 100644 --- a/session/BUILD.bazel +++ b/session/BUILD.bazel @@ -49,7 +49,6 @@ go_library( "//sessionctx/variable", "//sessiontxn", "//sessiontxn/isolation", - "//sessiontxn/legacy", "//sessiontxn/staleread", "//statistics", "//statistics/handle", @@ -101,6 +100,7 @@ go_library( go_test( name = "session_test", + timeout = "short", srcs = [ "bench_test.go", "bootstrap_test.go", @@ -134,6 +134,7 @@ go_test( "//planner/core", "//sessionctx", "//sessionctx/variable", + "//sessiontxn", "//statistics", "//store/mockstore", "//table", diff --git a/session/advisory_locks.go b/session/advisory_locks.go index aca6914de2029..f51bb061a119c 100644 --- a/session/advisory_locks.go +++ b/session/advisory_locks.go @@ -17,6 +17,7 @@ package session import ( "context" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/terror" ) @@ -67,6 +68,7 @@ func (a *advisoryLock) Close() { // We will never COMMIT the transaction, but the err indicates // if the lock was successfully acquired. func (a *advisoryLock) GetLock(lockName string, timeout int64) error { + a.ctx = kv.WithInternalSourceType(a.ctx, kv.InternalTxnOthers) _, err := a.session.ExecuteInternal(a.ctx, "SET innodb_lock_wait_timeout = %?", timeout) if err != nil { return err diff --git a/session/bench_test.go b/session/bench_test.go index c1164ec32de40..f66838c0598ba 100644 --- a/session/bench_test.go +++ b/session/bench_test.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/benchdaily" @@ -1812,8 +1813,9 @@ func BenchmarkCompileExecutePreparedStmt(b *testing.B) { is := se.GetInfoSchema() b.ResetTimer() + stmtExec := &ast.ExecuteStmt{ExecID: stmtID, BinaryArgs: args} for i := 0; i < b.N; i++ { - _, _, _, err := executor.CompileExecutePreparedStmt(context.Background(), se, stmtID, is.(infoschema.InfoSchema), 0, kv.GlobalTxnScope, args) + _, _, _, err := executor.CompileExecutePreparedStmt(context.Background(), se, stmtExec, is.(infoschema.InfoSchema)) if err != nil { b.Fatal(err) } diff --git a/session/bootstrap.go b/session/bootstrap.go index d63181829993d..e9db7a08c3ed7 100644 --- a/session/bootstrap.go +++ b/session/bootstrap.go @@ -35,6 +35,7 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/model" @@ -722,8 +723,9 @@ var ( ) func checkBootstrapped(s Session) (bool, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) // Check if system db exists. - _, err := s.ExecuteInternal(context.Background(), "USE %n", mysql.SystemDB) + _, err := s.ExecuteInternal(ctx, "USE %n", mysql.SystemDB) if err != nil && infoschema.ErrDatabaseNotExists.NotEqual(err) { logutil.BgLogger().Fatal("check bootstrap error", zap.Error(err)) @@ -739,7 +741,7 @@ func checkBootstrapped(s Session) (bool, error) { isBootstrapped := sVal == varTrue if isBootstrapped { // Make sure that doesn't affect the following operations. - if err = s.CommitTxn(context.Background()); err != nil { + if err = s.CommitTxn(ctx); err != nil { return false, errors.Trace(err) } } @@ -749,7 +751,7 @@ func checkBootstrapped(s Session) (bool, error) { // getTiDBVar gets variable value from mysql.tidb table. // Those variables are used by TiDB server. func getTiDBVar(s Session, name string) (sVal string, isNull bool, e error) { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) rs, err := s.ExecuteInternal(ctx, `SELECT HIGH_PRIORITY VARIABLE_VALUE FROM %n.%n WHERE VARIABLE_NAME= %?`, mysql.SystemDB, mysql.TiDBTable, @@ -789,7 +791,8 @@ func upgrade(s Session) { } updateBootstrapVer(s) - _, err = s.ExecuteInternal(context.Background(), "COMMIT") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err = s.ExecuteInternal(ctx, "COMMIT") if err != nil { sleepTime := 1 * time.Second @@ -877,8 +880,9 @@ func upgradeToVer8(s Session, ver int64) { if ver >= version8 { return } + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) // This is a dummy upgrade, it checks whether upgradeToVer7 success, if not, do it again. - if _, err := s.ExecuteInternal(context.Background(), "SELECT HIGH_PRIORITY `Process_priv` FROM mysql.user LIMIT 0"); err == nil { + if _, err := s.ExecuteInternal(ctx, "SELECT HIGH_PRIORITY `Process_priv` FROM mysql.user LIMIT 0"); err == nil { return } upgradeToVer7(s, ver) @@ -894,7 +898,8 @@ func upgradeToVer9(s Session, ver int64) { } func doReentrantDDL(s Session, sql string, ignorableErrs ...error) { - _, err := s.ExecuteInternal(context.Background(), sql) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err := s.ExecuteInternal(ctx, sql) for _, ignorableErr := range ignorableErrs { if terror.ErrorEqual(err, ignorableErr) { return @@ -920,7 +925,8 @@ func upgradeToVer11(s Session, ver int64) { if ver >= version11 { return } - _, err := s.ExecuteInternal(context.Background(), "ALTER TABLE mysql.user ADD COLUMN `References_priv` ENUM('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' AFTER `Grant_priv`") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err := s.ExecuteInternal(ctx, "ALTER TABLE mysql.user ADD COLUMN `References_priv` ENUM('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' AFTER `Grant_priv`") if err != nil { if terror.ErrorEqual(err, infoschema.ErrColumnExists) { return @@ -934,7 +940,7 @@ func upgradeToVer12(s Session, ver int64) { if ver >= version12 { return } - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) _, err := s.ExecuteInternal(ctx, "BEGIN") terror.MustNil(err) sql := "SELECT HIGH_PRIORITY user, host, password FROM mysql.user WHERE password != ''" @@ -988,7 +994,7 @@ func upgradeToVer13(s Session, ver int64) { "ALTER TABLE mysql.user ADD COLUMN `Alter_routine_priv` ENUM('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' AFTER `Create_routine_priv`", "ALTER TABLE mysql.user ADD COLUMN `Event_priv` ENUM('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' AFTER `Create_user_priv`", } - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) for _, sql := range sqls { _, err := s.ExecuteInternal(ctx, sql) if err != nil { @@ -1017,7 +1023,7 @@ func upgradeToVer14(s Session, ver int64) { "ALTER TABLE mysql.db ADD COLUMN `Event_priv` ENUM('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' AFTER `Execute_priv`", "ALTER TABLE mysql.db ADD COLUMN `Trigger_priv` ENUM('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N' AFTER `Event_priv`", } - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) for _, sql := range sqls { _, err := s.ExecuteInternal(ctx, sql) if err != nil { @@ -1034,7 +1040,8 @@ func upgradeToVer15(s Session, ver int64) { return } var err error - _, err = s.ExecuteInternal(context.Background(), CreateGCDeleteRangeTable) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err = s.ExecuteInternal(ctx, CreateGCDeleteRangeTable) if err != nil { logutil.BgLogger().Fatal("upgradeToVer15 error", zap.Error(err)) } @@ -1239,7 +1246,8 @@ func upgradeToVer38(s Session, ver int64) { return } var err error - _, err = s.ExecuteInternal(context.Background(), CreateGlobalPrivTable) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err = s.ExecuteInternal(ctx, CreateGlobalPrivTable) if err != nil { logutil.BgLogger().Fatal("upgradeToVer38 error", zap.Error(err)) } @@ -1407,7 +1415,7 @@ func upgradeToVer55(s Session, ver int64) { } selectSQL := "select HIGH_PRIORITY * from mysql.global_variables where variable_name in ('" + strings.Join(names, quoteCommaQuote) + "')" - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) rs, err := s.ExecuteInternal(ctx, selectSQL) terror.MustNil(err) defer terror.Call(rs.Close) @@ -1513,8 +1521,9 @@ func upgradeToVer67(s Session, ver int64) { mustExecute(s, "COMMIT") }() mustExecute(s, h.LockBindInfoSQL()) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) var rs sqlexec.RecordSet - rs, err = s.ExecuteInternal(context.Background(), + rs, err = s.ExecuteInternal(ctx, `SELECT bind_sql, default_db, status, create_time, charset, collation, source FROM mysql.bind_info WHERE source != 'builtin' @@ -1733,7 +1742,7 @@ func upgradeToVer80(s Session, ver int64) { } // Check if tidb_analyze_version exists in mysql.GLOBAL_VARIABLES. // If not, insert "tidb_analyze_version | 1" since this is the old behavior before we introduce this variable. - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) rs, err := s.ExecuteInternal(ctx, "SELECT VARIABLE_VALUE FROM %n.%n WHERE VARIABLE_NAME=%?;", mysql.SystemDB, mysql.GlobalVariablesTable, variable.TiDBAnalyzeVersion) terror.MustNil(err) @@ -1756,7 +1765,7 @@ func upgradeToVer81(s Session, ver int64) { } // Check if tidb_enable_index_merge exists in mysql.GLOBAL_VARIABLES. // If not, insert "tidb_enable_index_merge | off". - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) rs, err := s.ExecuteInternal(ctx, "SELECT VARIABLE_VALUE FROM %n.%n WHERE VARIABLE_NAME=%?;", mysql.SystemDB, mysql.GlobalVariablesTable, variable.TiDBEnableIndexMerge) terror.MustNil(err) @@ -2046,7 +2055,7 @@ func doDMLWorks(s Session) { case variable.TiDBEnablePaging: vVal = variable.BoolToOnOff(variable.DefTiDBEnablePaging) } - value := fmt.Sprintf(`("%s", "%s")`, strings.ToLower(k), vVal) + value := fmt.Sprintf(`("%s", "%s")`, k, vVal) values = append(values, value) } sql := fmt.Sprintf("INSERT HIGH_PRIORITY INTO %s.%s VALUES %s;", mysql.SystemDB, mysql.GlobalVariablesTable, @@ -2069,7 +2078,8 @@ func doDMLWorks(s Session) { writeStmtSummaryVars(s) - _, err := s.ExecuteInternal(context.Background(), "COMMIT") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err := s.ExecuteInternal(ctx, "COMMIT") if err != nil { sleepTime := 1 * time.Second logutil.BgLogger().Info("doDMLWorks failed", zap.Error(err), zap.Duration("sleeping time", sleepTime)) @@ -2087,7 +2097,8 @@ func doDMLWorks(s Session) { } func mustExecute(s Session, sql string, args ...interface{}) { - _, err := s.ExecuteInternal(context.Background(), sql, args...) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + _, err := s.ExecuteInternal(ctx, sql, args...) if err != nil { debug.PrintStack() logutil.BgLogger().Fatal("mustExecute error", zap.Error(err)) diff --git a/session/nontransactional.go b/session/nontransactional.go index a637a93f1e9b1..d7fb53e085785 100644 --- a/session/nontransactional.go +++ b/session/nontransactional.go @@ -346,7 +346,7 @@ func doOneJob(ctx context.Context, job *job, totalJobCount int, options statemen zap.Int("jobSize", job.jobSize), zap.String("deleteSQL", deleteSQLInLog)) } if rs != nil { - rs.Close() + _ = rs.Close() } return "" } @@ -377,7 +377,9 @@ func buildShardJobs(ctx context.Context, stmt *ast.NonTransactionalDeleteStmt, s return nil, errors.Errorf("Non-transactional delete, expecting 1 record set, but got %d", len(rss)) } rs := rss[0] - defer rs.Close() + defer func() { + _ = rs.Close() + }() batchSize := int(stmt.Limit) if batchSize <= 0 { diff --git a/session/schema_amender_test.go b/session/schema_amender_test.go index a5dbb039624fd..2f07dbe395905 100644 --- a/session/schema_amender_test.go +++ b/session/schema_amender_test.go @@ -17,15 +17,18 @@ package session import ( "bytes" "context" + "fmt" "strconv" "testing" "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/tablecodec" @@ -254,6 +257,8 @@ func TestAmendCollectAndGenMutations(t *testing.T) { store: store, sessionVars: variable.NewSessionVars(), } + se.mu.values = make(map[fmt.Stringer]interface{}) + domain.BindDomain(se, &domain.Domain{}) startStates := []model.SchemaState{model.StateNone, model.StateDeleteOnly, model.StateWriteOnly, model.StateWriteReorganization} for _, startState := range startStates { endStatMap := ConstOpAddIndex[startState] @@ -404,10 +409,9 @@ func TestAmendCollectAndGenMutations(t *testing.T) { logutil.BgLogger().Info("[TEST]finish to write old txn data") // Write data for this new transaction, its memory buffer will be used by schema amender. - txn, err := se.store.Begin() + err = sessiontxn.NewTxn(ctx, se) require.NoError(t, err) - se.txn.changeInvalidToValid(txn) - txn, err = se.Txn(true) + txn, err := se.Txn(false) require.NoError(t, err) var checkKeys []kv.Key for i, key := range mutations.GetKeys() { diff --git a/session/session.go b/session/session.go index d01c3e7e549d3..e751d9443e458 100644 --- a/session/session.go +++ b/session/session.go @@ -25,6 +25,7 @@ import ( "encoding/hex" "encoding/json" stderrs "errors" + "flag" "fmt" "math/rand" "runtime/pprof" @@ -40,30 +41,6 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/tidb/parser" - "github.com/pingcap/tidb/parser/ast" - "github.com/pingcap/tidb/parser/auth" - "github.com/pingcap/tidb/parser/charset" - "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/parser/terror" - "github.com/pingcap/tidb/sessionctx/sessionstates" - "github.com/pingcap/tidb/sessiontxn" - "github.com/pingcap/tidb/sessiontxn/legacy" - "github.com/pingcap/tidb/sessiontxn/staleread" - "github.com/pingcap/tidb/store/driver/txn" - "github.com/pingcap/tidb/store/helper" - "github.com/pingcap/tidb/table" - "github.com/pingcap/tidb/table/temptable" - "github.com/pingcap/tidb/util/logutil/consistency" - "github.com/pingcap/tidb/util/sem" - "github.com/pingcap/tidb/util/topsql" - topsqlstate "github.com/pingcap/tidb/util/topsql/state" - "github.com/pingcap/tidb/util/topsql/stmtstats" - "github.com/pingcap/tipb/go-binlog" - tikverr "github.com/tikv/client-go/v2/error" - "go.uber.org/zap" - "github.com/pingcap/tidb/bindinfo" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/placement" @@ -75,6 +52,13 @@ import ( "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/owner" + "github.com/pingcap/tidb/parser" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/auth" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/planner" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/plugin" @@ -83,11 +67,18 @@ import ( "github.com/pingcap/tidb/session/txninfo" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/binloginfo" + "github.com/pingcap/tidb/sessionctx/sessionstates" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/sessiontxn" + "github.com/pingcap/tidb/sessiontxn/staleread" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/statistics/handle" storeerr "github.com/pingcap/tidb/store/driver/error" + "github.com/pingcap/tidb/store/driver/txn" + "github.com/pingcap/tidb/store/helper" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/table/temptable" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/telemetry" "github.com/pingcap/tidb/types" @@ -98,14 +89,22 @@ import ( "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/kvcache" "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/logutil/consistency" + "github.com/pingcap/tidb/util/sem" "github.com/pingcap/tidb/util/sli" "github.com/pingcap/tidb/util/sqlexec" "github.com/pingcap/tidb/util/tableutil" "github.com/pingcap/tidb/util/timeutil" + "github.com/pingcap/tidb/util/topsql" + topsqlstate "github.com/pingcap/tidb/util/topsql/state" + "github.com/pingcap/tidb/util/topsql/stmtstats" + "github.com/pingcap/tipb/go-binlog" + tikverr "github.com/tikv/client-go/v2/error" tikvstore "github.com/tikv/client-go/v2/kv" "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/tikv" tikvutil "github.com/tikv/client-go/v2/util" + "go.uber.org/zap" ) var ( @@ -123,7 +122,8 @@ var ( sessionExecuteParseDurationInternal = metrics.SessionExecuteParseDuration.WithLabelValues(metrics.LblInternal) sessionExecuteParseDurationGeneral = metrics.SessionExecuteParseDuration.WithLabelValues(metrics.LblGeneral) - telemetryCTEUsage = metrics.TelemetrySQLCTECnt + telemetryCTEUsage = metrics.TelemetrySQLCTECnt + telemetryMultiSchemaChangeUsage = metrics.TelemetryMultiSchemaChangeCnt ) // Session context, it is consistent with the lifecycle of a client connection. @@ -149,6 +149,8 @@ type Session interface { // ExecutePreparedStmt executes a prepared statement. ExecutePreparedStmt(ctx context.Context, stmtID uint32, param []types.Datum) (sqlexec.RecordSet, error) DropPreparedStmt(stmtID uint32) error + // SetSessionStatesHandler sets SessionStatesHandler for type stateType. + SetSessionStatesHandler(stateType sessionstates.SessionStateType, handler sessionctx.SessionStatesHandler) SetClientCapability(uint32) // Set client capability flags. SetConnectionID(uint64) SetCommandValue(byte) @@ -249,6 +251,9 @@ type session struct { // regularly. stmtStats *stmtstats.StatementStats + // Used to encode and decode each type of session states. + sessionStatesHandlers map[sessionstates.SessionStateType]sessionctx.SessionStatesHandler + // Contains a list of sessions used to collect advisory locks. advisoryLocks map[string]*advisoryLock } @@ -1112,7 +1117,7 @@ func (s *session) retry(ctx context.Context, maxCnt uint) (err error) { } _, digest := s.sessionVars.StmtCtx.SQLDigest() s.txn.onStmtStart(digest.String()) - if err = sessiontxn.GetTxnManager(s).OnStmtStart(ctx); err == nil { + if err = sessiontxn.GetTxnManager(s).OnStmtStart(ctx, st.GetStmtNode()); err == nil { _, err = st.Exec(ctx) } s.txn.onStmtEnd() @@ -1248,6 +1253,9 @@ func drainRecordSet(ctx context.Context, se *session, rs sqlexec.RecordSet, allo // getTableValue executes restricted sql and the result is one column. // It returns a string value. func (s *session) getTableValue(ctx context.Context, tblName string, varName string) (string, error) { + if ctx.Value(kv.RequestSourceKey) == nil { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnSysVar) + } rows, fields, err := s.ExecRestrictedSQL(ctx, nil, "SELECT VARIABLE_VALUE FROM %n.%n WHERE VARIABLE_NAME=%?", mysql.SystemDB, tblName, varName) if err != nil { return "", err @@ -1266,6 +1274,7 @@ func (s *session) getTableValue(ctx context.Context, tblName string, varName str // replaceGlobalVariablesTableValue executes restricted sql updates the variable value // It will then notify the etcd channel that the value has changed. func (s *session) replaceGlobalVariablesTableValue(ctx context.Context, varName, val string) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnSysVar) _, _, err := s.ExecRestrictedSQL(ctx, nil, `REPLACE INTO %n.%n (variable_name, variable_value) VALUES (%?, %?)`, mysql.SystemDB, mysql.GlobalVariablesTable, varName, val) if err != nil { return err @@ -1353,7 +1362,8 @@ func (s *session) SetGlobalSysVarOnly(name, value string) (err error) { // SetTiDBTableValue implements GlobalVarAccessor.SetTiDBTableValue interface. func (s *session) SetTiDBTableValue(name, value, comment string) error { - _, _, err := s.ExecRestrictedSQL(context.TODO(), nil, `REPLACE INTO mysql.tidb (variable_name, variable_value, comment) VALUES (%?, %?, %?)`, name, value, comment) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnSysVar) + _, _, err := s.ExecRestrictedSQL(ctx, nil, `REPLACE INTO mysql.tidb (variable_name, variable_value, comment) VALUES (%?, %?, %?)`, name, value, comment) return err } @@ -1726,6 +1736,7 @@ func (s *session) ExecRestrictedStmt(ctx context.Context, stmtNode ast.StmtNode, func ExecRestrictedStmt4Test(ctx context.Context, s Session, stmtNode ast.StmtNode, opts ...sqlexec.OptionFuncAlias) ( []chunk.Row, []*ast.ResultField, error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnOthers) return s.(*session).ExecRestrictedStmt(ctx, stmtNode, opts...) } @@ -1918,7 +1929,7 @@ func (s *session) ExecuteStmt(ctx context.Context, stmtNode ast.StmtNode) (sqlex s.txn.onStmtStart(digest.String()) defer s.txn.onStmtEnd() - if err := s.onTxnManagerStmtStartOrRetry(ctx); err != nil { + if err := s.onTxnManagerStmtStartOrRetry(ctx, stmtNode); err != nil { return nil, err } @@ -1929,6 +1940,9 @@ func (s *session) ExecuteStmt(ctx context.Context, stmtNode ast.StmtNode) (sqlex } }) + stmtLabel := executor.GetStmtLabel(stmtNode) + s.setRequestSource(ctx, stmtLabel, stmtNode) + // Transform abstract syntax tree to a physical plan(stored in executor.ExecStmt). compiler := executor.Compiler{Ctx: s} stmt, err := compiler.Compile(ctx, stmtNode) @@ -1981,11 +1995,11 @@ func (s *session) ExecuteStmt(ctx context.Context, stmtNode ast.StmtNode) (sqlex return recordSet, nil } -func (s *session) onTxnManagerStmtStartOrRetry(ctx context.Context) error { +func (s *session) onTxnManagerStmtStartOrRetry(ctx context.Context, node ast.StmtNode) error { if s.sessionVars.RetryInfo.Retrying { return sessiontxn.GetTxnManager(s).OnStmtRetry(ctx) } - return sessiontxn.GetTxnManager(s).OnStmtStart(ctx) + return sessiontxn.GetTxnManager(s).OnStmtStart(ctx, node) } func (s *session) validateStatementReadOnlyInStaleness(stmtNode ast.StmtNode) error { @@ -2202,7 +2216,8 @@ func (s *session) PrepareStmt(sql string) (stmtID uint32, paramCount int, fields return } - if err = s.onTxnManagerStmtStartOrRetry(ctx); err != nil { + prepareStmt := &ast.PrepareStmt{SQLText: sql} + if err = s.onTxnManagerStmtStartOrRetry(ctx, prepareStmt); err != nil { return } @@ -2221,19 +2236,21 @@ func (s *session) PrepareStmt(sql string) (stmtID uint32, paramCount int, fields return prepareExec.ID, prepareExec.ParamCount, prepareExec.Fields, nil } -func (s *session) preparedStmtExec(ctx context.Context, - is infoschema.InfoSchema, snapshotTS uint64, - stmtID uint32, prepareStmt *plannercore.CachedPrepareStmt, replicaReadScope string, args []types.Datum) (sqlexec.RecordSet, error) { - +func (s *session) preparedStmtExec(ctx context.Context, execStmt *ast.ExecuteStmt, prepareStmt *plannercore.CachedPrepareStmt) (sqlexec.RecordSet, error) { failpoint.Inject("assertTxnManagerInPreparedStmtExec", func() { sessiontxn.RecordAssert(s, "assertTxnManagerInPreparedStmtExec", true) - sessiontxn.AssertTxnManagerInfoSchema(s, is) - if snapshotTS != 0 { - sessiontxn.AssertTxnManagerReadTS(s, snapshotTS) + if prepareStmt.SnapshotTSEvaluator != nil { + staleread.AssertStmtStaleness(s, true) + ts, err := prepareStmt.SnapshotTSEvaluator(s) + if err != nil { + panic(err) + } + sessiontxn.AssertTxnManagerReadTS(s, ts) } }) - st, tiFlashPushDown, tiFlashExchangePushDown, err := executor.CompileExecutePreparedStmt(ctx, s, stmtID, is, snapshotTS, replicaReadScope, args) + is := sessiontxn.GetTxnManager(s).GetTxnInfoSchema() + st, tiFlashPushDown, tiFlashExchangePushDown, err := executor.CompileExecutePreparedStmt(ctx, s, execStmt, is) if err != nil { return nil, err } @@ -2253,23 +2270,17 @@ func (s *session) preparedStmtExec(ctx context.Context, // cachedPointPlanExec is a short path currently ONLY for cached "point select plan" execution func (s *session) cachedPointPlanExec(ctx context.Context, - is infoschema.InfoSchema, stmtID uint32, prepareStmt *plannercore.CachedPrepareStmt, replicaReadScope string, args []types.Datum) (sqlexec.RecordSet, bool, error) { + execAst *ast.ExecuteStmt, prepareStmt *plannercore.CachedPrepareStmt) (sqlexec.RecordSet, bool, error) { prepared := prepareStmt.PreparedAst - // compile ExecStmt - execAst := &ast.ExecuteStmt{ExecID: stmtID} - if err := executor.ResetContextOfStmt(s, execAst); err != nil { - return nil, false, err - } failpoint.Inject("assertTxnManagerInCachedPlanExec", func() { sessiontxn.RecordAssert(s, "assertTxnManagerInCachedPlanExec", true) - sessiontxn.AssertTxnManagerInfoSchema(s, is) // stale read should not reach here staleread.AssertStmtStaleness(s, false) }) - execAst.BinaryArgs = args + is := sessiontxn.GetTxnManager(s).GetTxnInfoSchema() execPlan, err := planner.OptimizeExecStmt(ctx, s, execAst, is) if err != nil { return nil, false, err @@ -2281,15 +2292,14 @@ func (s *session) cachedPointPlanExec(ctx context.Context, stmtCtx := s.GetSessionVars().StmtCtx stmt := &executor.ExecStmt{ - GoCtx: ctx, - InfoSchema: is, - Plan: execPlan, - StmtNode: execAst, - Ctx: s, - OutputNames: execPlan.OutputNames(), - PsStmt: prepareStmt, - Ti: &executor.TelemetryInfo{}, - ReplicaReadScope: replicaReadScope, + GoCtx: ctx, + InfoSchema: is, + Plan: execPlan, + StmtNode: execAst, + Ctx: s, + OutputNames: execPlan.OutputNames(), + PsStmt: prepareStmt, + Ti: &executor.TelemetryInfo{}, } compileDuration := time.Since(s.sessionVars.StartTime) sessionExecuteCompileDurationGeneral.Observe(compileDuration.Seconds()) @@ -2316,7 +2326,7 @@ func (s *session) cachedPointPlanExec(ctx context.Context, var resultSet sqlexec.RecordSet switch execPlan.(type) { case *plannercore.PointGetPlan: - resultSet, err = stmt.PointGet(ctx, is) + resultSet, err = stmt.PointGet(ctx) s.txn.changeToInvalid() case *plannercore.Update: stmtCtx.Priority = kv.PriorityHigh @@ -2333,9 +2343,9 @@ func (s *session) cachedPointPlanExec(ctx context.Context, // IsCachedExecOk check if we can execute using plan cached in prepared structure // Be careful with the short path, current precondition is ths cached plan satisfying // IsPointGetWithPKOrUniqueKeyByAutoCommit -func (s *session) IsCachedExecOk(ctx context.Context, preparedStmt *plannercore.CachedPrepareStmt, isStaleness bool) (bool, error) { +func (s *session) IsCachedExecOk(preparedStmt *plannercore.CachedPrepareStmt) (bool, error) { prepared := preparedStmt.PreparedAst - if prepared.CachedPlan == nil || isStaleness { + if prepared.CachedPlan == nil || staleread.IsStmtStaleness(s) { return false, nil } // check auto commit @@ -2388,51 +2398,49 @@ func (s *session) ExecutePreparedStmt(ctx context.Context, stmtID uint32, args [ return nil, errors.Errorf("invalid CachedPrepareStmt type") } - var is infoschema.InfoSchema - var snapshotTS uint64 - replicaReadScope := oracle.GlobalTxnScope + execStmt := &ast.ExecuteStmt{ExecID: stmtID, BinaryArgs: args} + if err := executor.ResetContextOfStmt(s, execStmt); err != nil { + return nil, err + } staleReadProcessor := staleread.NewStaleReadProcessor(s) if err = staleReadProcessor.OnExecutePreparedStmt(preparedStmt.SnapshotTSEvaluator); err != nil { return nil, err } - txnManager := sessiontxn.GetTxnManager(s) if staleReadProcessor.IsStaleness() { - snapshotTS = staleReadProcessor.GetStalenessReadTS() - is = staleReadProcessor.GetStalenessInfoSchema() - replicaReadScope = config.GetTxnScopeFromConfig() - err = txnManager.EnterNewTxn(ctx, &sessiontxn.EnterNewTxnRequest{ - Type: sessiontxn.EnterNewTxnWithReplaceProvider, - Provider: staleread.NewStalenessTxnContextProvider(s, snapshotTS, is), + s.sessionVars.StmtCtx.IsStaleness = true + err = sessiontxn.GetTxnManager(s).EnterNewTxn(ctx, &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnWithReplaceProvider, + Provider: staleread.NewStalenessTxnContextProvider( + s, + staleReadProcessor.GetStalenessReadTS(), + staleReadProcessor.GetStalenessInfoSchema(), + ), }) if err != nil { return nil, err } - } else { - is = s.GetInfoSchema().(infoschema.InfoSchema) } - staleness := snapshotTS > 0 executor.CountStmtNode(preparedStmt.PreparedAst.Stmt, s.sessionVars.InRestrictedSQL) - ok, err = s.IsCachedExecOk(ctx, preparedStmt, staleness) + cacheExecOk, err := s.IsCachedExecOk(preparedStmt) if err != nil { return nil, err } s.txn.onStmtStart(preparedStmt.SQLDigest.String()) defer s.txn.onStmtEnd() - if err = s.onTxnManagerStmtStartOrRetry(ctx); err != nil { + if err = s.onTxnManagerStmtStartOrRetry(ctx, execStmt); err != nil { return nil, err } + s.setRequestSource(ctx, preparedStmt.PreparedAst.StmtType, preparedStmt.PreparedAst.Stmt) + // even the txn is valid, still need to set session variable for coprocessor usage. + s.sessionVars.RequestSourceType = preparedStmt.PreparedAst.StmtType - if p, isOK := txnManager.GetContextProvider().(*legacy.SimpleTxnContextProvider); isOK { - p.InfoSchema = is - } - - if ok { - rs, ok, err := s.cachedPointPlanExec(ctx, txnManager.GetTxnInfoSchema(), stmtID, preparedStmt, replicaReadScope, args) + if cacheExecOk { + rs, ok, err := s.cachedPointPlanExec(ctx, execStmt, preparedStmt) if err != nil { return nil, err } @@ -2440,7 +2448,7 @@ func (s *session) ExecutePreparedStmt(ctx context.Context, stmtID uint32, args [ return rs, nil } } - return s.preparedStmtExec(ctx, txnManager.GetTxnInfoSchema(), snapshotTS, stmtID, preparedStmt, replicaReadScope, args) + return s.preparedStmtExec(ctx, execStmt, preparedStmt) } func (s *session) DropPreparedStmt(stmtID uint32) error { @@ -2469,85 +2477,8 @@ func (s *session) Txn(active bool) (kv.Transaction, error) { if !active { return &s.txn, nil } - if !s.txn.validOrPending() { - return &s.txn, errors.AddStack(kv.ErrInvalidTxn) - } - if s.txn.pending() { - defer func(begin time.Time) { - s.sessionVars.DurationWaitTS = time.Since(begin) - }(time.Now()) - // Transaction is lazy initialized. - // PrepareTxnCtx is called to get a tso future, makes s.txn a pending txn, - // If Txn() is called later, wait for the future to get a valid txn. - if err := s.txn.changePendingToValid(s.currentCtx); err != nil { - logutil.BgLogger().Error("active transaction fail", - zap.Error(err)) - s.txn.cleanup() - s.sessionVars.TxnCtx.StartTS = 0 - return &s.txn, err - } - s.sessionVars.TxnCtx.StartTS = s.txn.StartTS() - if s.sessionVars.TxnCtx.IsPessimistic { - s.txn.SetOption(kv.Pessimistic, true) - } - if !s.sessionVars.IsAutocommit() && s.sessionVars.SnapshotTS == 0 { - s.sessionVars.SetInTxn(true) - } - s.sessionVars.TxnCtx.CouldRetry = s.isTxnRetryable() - s.txn.SetVars(s.sessionVars.KVVars) - readReplicaType := s.sessionVars.GetReplicaRead() - if readReplicaType.IsFollowerRead() { - s.txn.SetOption(kv.ReplicaRead, readReplicaType) - } - s.txn.SetOption(kv.SnapInterceptor, s.getSnapshotInterceptor()) - if s.GetSessionVars().StmtCtx.WeakConsistency { - s.txn.SetOption(kv.IsolationLevel, kv.RC) - } - setTxnAssertionLevel(&s.txn, s.sessionVars.AssertionLevel) - } - return &s.txn, nil -} - -// isTxnRetryable (if returns true) means the transaction could retry. -// If the transaction is in pessimistic mode, do not retry. -// If the session is already in transaction, enable retry or internal SQL could retry. -// If not, the transaction could always retry, because it should be auto committed transaction. -// Anyway the retry limit is 0, the transaction could not retry. -func (s *session) isTxnRetryable() bool { - sessVars := s.sessionVars - - // The pessimistic transaction no need to retry. - if sessVars.TxnCtx.IsPessimistic { - return false - } - - // If retry limit is 0, the transaction could not retry. - if sessVars.RetryLimit == 0 { - return false - } - - // When `@@tidb_snapshot` is set, it is a ready-only statement and will not cause the errors that should retry a transaction in optimistic mode. - if sessVars.SnapshotTS != 0 { - return false - } - - // If the session is not InTxn, it is an auto-committed transaction. - // The auto-committed transaction could always retry. - if !sessVars.InTxn() { - return true - } - - // The internal transaction could always retry. - if sessVars.InRestrictedSQL { - return true - } - - // If the retry is enabled, the transaction could retry. - if !sessVars.DisableTxnAutoRetry { - return true - } - - return false + _, err := sessiontxn.GetTxnManager(s).ActivateTxn() + return &s.txn, err } func (s *session) NewTxn(ctx context.Context) error { @@ -2565,7 +2496,7 @@ func (s *session) NewTxn(ctx context.Context) error { } setTxnAssertionLevel(txn, s.sessionVars.AssertionLevel) s.txn.changeInvalidToValid(txn) - is := temptable.AttachLocalTemporaryTableInfoSchema(s, domain.GetDomain(s).InfoSchema()) + is := s.GetDomainInfoSchema() s.sessionVars.TxnCtx = &variable.TransactionContext{ TxnCtxNoNeedToRestore: variable.TxnCtxNoNeedToRestore{ InfoSchema: is, @@ -2577,6 +2508,12 @@ func (s *session) NewTxn(ctx context.Context) error { }, } s.txn.SetOption(kv.SnapInterceptor, s.getSnapshotInterceptor()) + if s.GetSessionVars().InRestrictedSQL { + s.txn.SetOption(kv.RequestSourceInternal, true) + if source := ctx.Value(kv.RequestSourceKey); source != nil { + s.txn.SetOption(kv.RequestSourceType, source.(kv.RequestSource).RequestSourceType) + } + } return nil } @@ -2601,7 +2538,7 @@ func (s *session) NewStaleTxnWithStartTS(ctx context.Context, startTS uint64) er if err := s.checkBeforeNewTxn(ctx); err != nil { return err } - txnScope := config.GetTxnScopeFromConfig() + txnScope := kv.GlobalTxnScope txn, err := s.store.Begin(tikv.WithTxnScope(txnScope), tikv.WithStartTS(startTS)) if err != nil { return err @@ -2767,6 +2704,11 @@ func (s *session) RefreshVars(ctx context.Context) error { return nil } +// SetSessionStatesHandler implements the Session.SetSessionStatesHandler interface. +func (s *session) SetSessionStatesHandler(stateType sessionstates.SessionStateType, handler sessionctx.SessionStatesHandler) { + s.sessionStatesHandlers[stateType] = handler +} + // CreateSession4Test creates a new session environment for test. func CreateSession4Test(store kv.Storage) (Session, error) { se, err := CreateSession4TestWithOpt(store, nil) @@ -2822,8 +2764,6 @@ func CreateSessionWithOpt(store kv.Storage, opt *Opt) (Session, error) { } privilege.BindPrivilegeManager(s, pm) - sessionBindHandle := bindinfo.NewSessionBindHandle(parser.New()) - s.SetValue(bindinfo.SessionBindInfoKeyType, sessionBindHandle) // Add stats collector, and it will be freed by background stats worker // which periodically updates stats using the collected data. if do.StatsHandle() != nil && do.StatsUpdating() { @@ -2837,8 +2777,8 @@ func CreateSessionWithOpt(store kv.Storage, opt *Opt) (Session, error) { } // loadCollationParameter loads collation parameter from mysql.tidb -func loadCollationParameter(se *session) (bool, error) { - para, err := se.getTableValue(context.TODO(), mysql.TiDBTable, tidbNewCollationEnabled) +func loadCollationParameter(ctx context.Context, se *session) (bool, error) { + para, err := se.getTableValue(ctx, mysql.TiDBTable, tidbNewCollationEnabled) if err != nil { return false, err } @@ -2857,6 +2797,7 @@ var errResultIsEmpty = dbterror.ClassExecutor.NewStd(errno.ErrResultIsEmpty) // BootstrapSession runs the first time when the TiDB server start. func BootstrapSession(store kv.Storage) (*domain.Domain, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) cfg := config.GetGlobalConfig() if len(cfg.Instance.PluginLoad) > 0 { err := plugin.Load(context.Background(), plugin.Config{ @@ -2882,14 +2823,14 @@ func BootstrapSession(store kv.Storage) (*domain.Domain, error) { ses[0].GetSessionVars().InRestrictedSQL = true // get system tz from mysql.tidb - tz, err := ses[0].getTableValue(context.TODO(), mysql.TiDBTable, tidbSystemTZ) + tz, err := ses[0].getTableValue(ctx, mysql.TiDBTable, tidbSystemTZ) if err != nil { return nil, err } timeutil.SetSystemTZ(tz) // get the flag from `mysql`.`tidb` which indicating if new collations are enabled. - newCollationEnabled, err := loadCollationParameter(ses[0]) + newCollationEnabled, err := loadCollationParameter(ctx, ses[0]) if err != nil { return nil, err } @@ -2930,7 +2871,7 @@ func BootstrapSession(store kv.Storage) (*domain.Domain, error) { if err != nil { return nil, err } - err = executor.LoadOptRuleBlacklist(ses[5]) + err = executor.LoadOptRuleBlacklist(ctx, ses[5]) if err != nil { return nil, err } @@ -2938,8 +2879,10 @@ func BootstrapSession(store kv.Storage) (*domain.Domain, error) { if dom.GetEtcdClient() != nil { // We only want telemetry data in production-like clusters. When TiDB is deployed over other engines, // for example, unistore engine (used for local tests), we just skip it. Its etcd client is nil. - dom.TelemetryReportLoop(ses[5]) - dom.TelemetryRotateSubWindowLoop(ses[5]) + go func() { + dom.TelemetryReportLoop(ses[5]) + dom.TelemetryRotateSubWindowLoop(ses[5]) + }() } // A sub context for update table stats, and other contexts for concurrent stats loading. @@ -3013,12 +2956,13 @@ func createSessionWithOpt(store kv.Storage, opt *Opt) (*session, error) { return nil, err } s := &session{ - store: store, - sessionVars: variable.NewSessionVars(), - ddlOwnerManager: dom.DDL().OwnerManager(), - client: store.GetClient(), - mppClient: store.GetMPPClient(), - stmtStats: stmtstats.CreateStatementStats(), + store: store, + sessionVars: variable.NewSessionVars(), + ddlOwnerManager: dom.DDL().OwnerManager(), + client: store.GetClient(), + mppClient: store.GetMPPClient(), + stmtStats: stmtstats.CreateStatementStats(), + sessionStatesHandlers: make(map[sessionstates.SessionStateType]sessionctx.SessionStatesHandler), } s.functionUsageMu.builtinFunctionUsage = make(telemetry.BuiltinFunctionsUsage) if plannercore.PreparedPlanCacheEnabled() { @@ -3041,6 +2985,7 @@ func createSessionWithOpt(store kv.Storage, opt *Opt) (*session, error) { sessionBindHandle := bindinfo.NewSessionBindHandle(parser.New()) s.SetValue(bindinfo.SessionBindInfoKeyType, sessionBindHandle) + s.SetSessionStatesHandler(sessionstates.StateBinding, sessionBindHandle) return s, nil } @@ -3050,11 +2995,12 @@ func createSessionWithOpt(store kv.Storage, opt *Opt) (*session, error) { // a lock context, which cause we can't call createSession directly. func CreateSessionWithDomain(store kv.Storage, dom *domain.Domain) (*session, error) { s := &session{ - store: store, - sessionVars: variable.NewSessionVars(), - client: store.GetClient(), - mppClient: store.GetMPPClient(), - stmtStats: stmtstats.CreateStatementStats(), + store: store, + sessionVars: variable.NewSessionVars(), + client: store.GetClient(), + mppClient: store.GetMPPClient(), + stmtStats: stmtstats.CreateStatementStats(), + sessionStatesHandlers: make(map[sessionstates.SessionStateType]sessionctx.SessionStatesHandler), } s.functionUsageMu.builtinFunctionUsage = make(telemetry.BuiltinFunctionsUsage) if plannercore.PreparedPlanCacheEnabled() { @@ -3085,7 +3031,8 @@ func getStoreBootstrapVersion(store kv.Storage) int64 { var ver int64 // check in kv store - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + err := kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { var err error t := meta.NewMeta(txn) ver, err = t.GetBootstrapVersion() @@ -3107,7 +3054,8 @@ func getStoreBootstrapVersion(store kv.Storage) int64 { func finishBootstrap(store kv.Storage) { setStoreBootstrapped(store.UUID()) - err := kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) + err := kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { t := meta.NewMeta(txn) err := t.FinishBootstrap(currentBootstrapVersion) return err @@ -3204,11 +3152,11 @@ func (s *session) PrepareTSFuture(ctx context.Context, future oracle.Future, sco return nil } -func (s *session) GetPreparedTSFuture() oracle.Future { - if future := s.txn.txnFuture; future != nil { - return future.future +func (s *session) GetPreparedTxnFuture() sessionctx.TxnFuture { + if !s.txn.validOrPending() { + return nil } - return nil + return &s.txn } // RefreshTxnCtx implements context.RefreshTxnCtx interface. @@ -3228,13 +3176,6 @@ func (s *session) RefreshTxnCtx(ctx context.Context) error { return sessiontxn.NewTxn(ctx, s) } -// GetSnapshotWithTS returns a snapshot with ts. -func (s *session) GetSnapshotWithTS(ts uint64) kv.Snapshot { - snap := s.GetStore().GetSnapshot(kv.Version{Ver: ts}) - snap.SetOption(kv.SnapInterceptor, s.getSnapshotInterceptor()) - return snap -} - // GetStore gets the store of session. func (s *session) GetStore() kv.Storage { return s.store @@ -3483,6 +3424,10 @@ func (s *session) updateTelemetryMetric(es *executor.ExecStmt) { } else { telemetryCTEUsage.WithLabelValues("notCTE").Inc() } + + if ti.UseMultiSchemaChange { + telemetryMultiSchemaChangeUsage.Inc() + } } // GetBuiltinFunctionUsage returns the replica of counting of builtin function usage @@ -3512,8 +3457,32 @@ func (s *session) GetStmtStats() *stmtstats.StatementStats { } // EncodeSessionStates implements SessionStatesHandler.EncodeSessionStates interface. -func (s *session) EncodeSessionStates(ctx context.Context, sctx sessionctx.Context, sessionStates *sessionstates.SessionStates) (err error) { - if err = s.sessionVars.EncodeSessionStates(ctx, sessionStates); err != nil { +func (s *session) EncodeSessionStates(ctx context.Context, sctx sessionctx.Context, sessionStates *sessionstates.SessionStates) error { + // Transaction status is hard to encode, so we do not support it. + s.txn.mu.Lock() + valid := s.txn.Valid() + s.txn.mu.Unlock() + if valid { + return ErrCannotMigrateSession.GenWithStackByArgs("session has an active transaction") + } + // Data in local temporary tables is hard to encode, so we do not support it. + // Check temporary tables here to avoid circle dependency. + if s.sessionVars.LocalTemporaryTables != nil { + localTempTables := s.sessionVars.LocalTemporaryTables.(*infoschema.LocalTemporaryTables) + if localTempTables.Count() > 0 { + return ErrCannotMigrateSession.GenWithStackByArgs("session has local temporary tables") + } + } + // The advisory locks will be released when the session is closed. + if len(s.advisoryLocks) > 0 { + return ErrCannotMigrateSession.GenWithStackByArgs("session has advisory locks") + } + // The TableInfo stores session ID and server ID, so the session cannot be migrated. + if len(s.lockedTables) > 0 { + return ErrCannotMigrateSession.GenWithStackByArgs("session has locked tables") + } + + if err := s.sessionVars.EncodeSessionStates(ctx, sessionStates); err != nil { return err } @@ -3534,25 +3503,65 @@ func (s *session) EncodeSessionStates(ctx context.Context, sctx sessionctx.Conte continue } // Get all session variables because the default values may change between versions. - if val, keep, err := variable.GetSessionStatesSystemVar(s.sessionVars, sv.Name); err == nil && keep { + if val, keep, err := variable.GetSessionStatesSystemVar(s.sessionVars, sv.Name); err != nil { + return err + } else if keep { sessionStates.SystemVars[sv.Name] = val } } - return + + // Encode prepared statements and sql bindings. + for _, handler := range s.sessionStatesHandlers { + if err := handler.EncodeSessionStates(ctx, s, sessionStates); err != nil { + return err + } + } + return nil } // DecodeSessionStates implements SessionStatesHandler.DecodeSessionStates interface. -func (s *session) DecodeSessionStates(ctx context.Context, sctx sessionctx.Context, sessionStates *sessionstates.SessionStates) (err error) { +func (s *session) DecodeSessionStates(ctx context.Context, sctx sessionctx.Context, sessionStates *sessionstates.SessionStates) error { + // Decode prepared statements and sql bindings. + for _, handler := range s.sessionStatesHandlers { + if err := handler.DecodeSessionStates(ctx, s, sessionStates); err != nil { + return err + } + } + // Decode session variables. for name, val := range sessionStates.SystemVars { - if err = variable.SetSessionSystemVar(s.sessionVars, name, val); err != nil { + if err := variable.SetSessionSystemVar(s.sessionVars, name, val); err != nil { return err } } - // Decode stmt ctx after session vars because setting session vars may override stmt ctx, such as warnings. - if err = s.sessionVars.DecodeSessionStates(ctx, sessionStates); err != nil { - return err + // Decoding session vars / prepared statements may override stmt ctx, such as warnings, + // so we decode stmt ctx at last. + return s.sessionVars.DecodeSessionStates(ctx, sessionStates) +} + +func (s *session) setRequestSource(ctx context.Context, stmtLabel string, stmtNode ast.StmtNode) { + if !s.isInternal() { + if txn, _ := s.Txn(false); txn != nil && txn.Valid() { + txn.SetOption(kv.RequestSourceType, stmtLabel) + } else { + s.sessionVars.RequestSourceType = stmtLabel + } + } else { + if source := ctx.Value(kv.RequestSourceKey); source != nil { + s.sessionVars.RequestSourceType = source.(kv.RequestSource).RequestSourceType + } else { + // panic in test mode in case there are requests without source in the future. + // log warnings in production mode. + if flag.Lookup("test.v") != nil || flag.Lookup("check.v") != nil { + panic("unexpected no source type context, if you see this error, " + + "the `RequestSourceTypeKey` is missing in your context") + } else { + logutil.Logger(ctx).Warn("unexpected no source type context, if you see this warning, "+ + "the `RequestSourceTypeKey` is missing in the context", + zap.Bool("internal", s.isInternal()), + zap.String("sql", stmtNode.Text())) + } + } } - return err } diff --git a/session/session_test/session_test.go b/session/session_test/session_test.go index 2a0f95fcbc473..a6227be338959 100644 --- a/session/session_test/session_test.go +++ b/session/session_test/session_test.go @@ -246,16 +246,15 @@ func TestDisableTxnAutoRetry(t *testing.T) { tk1.MustExec("update no_retry set id = 5") // RestrictedSQL should retry. - tk1.Session().GetSessionVars().InRestrictedSQL = true - tk1.MustExec("begin") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) + tk1.Session().ExecuteInternal(ctx, "begin") tk2.MustExec("update no_retry set id = 6") - tk1.MustExec("update no_retry set id = 7") - tk1.MustExec("commit") + tk1.Session().ExecuteInternal(ctx, "update no_retry set id = 7") + tk1.Session().ExecuteInternal(ctx, "commit") // test for disable transaction local latch - tk1.Session().GetSessionVars().InRestrictedSQL = false defer config.RestoreFunc()() config.UpdateGlobal(func(conf *config.Config) { conf.TxnLocalLatches.Enabled = false diff --git a/session/tidb.go b/session/tidb.go index 12ee40da2d4be..d0530488b5b3f 100644 --- a/session/tidb.go +++ b/session/tidb.go @@ -378,5 +378,6 @@ func ResultSetToStringSlice(ctx context.Context, s Session, rs sqlexec.RecordSet // Session errors. var ( - ErrForUpdateCantRetry = dbterror.ClassSession.NewStd(errno.ErrForUpdateCantRetry) + ErrForUpdateCantRetry = dbterror.ClassSession.NewStd(errno.ErrForUpdateCantRetry) + ErrCannotMigrateSession = dbterror.ClassSession.NewStd(errno.ErrCannotMigrateSession) ) diff --git a/session/tidb_test.go b/session/tidb_test.go index 70831a8f64d89..2023921445026 100644 --- a/session/tidb_test.go +++ b/session/tidb_test.go @@ -47,7 +47,8 @@ func TestSysSessionPoolGoroutineLeak(t *testing.T) { for i := 0; i < count; i++ { s := stmts[i] wg.Run(func() { - _, _, err := se.ExecRestrictedStmt(context.Background(), s) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) + _, _, err := se.ExecRestrictedStmt(ctx, s) require.NoError(t, err) }) } diff --git a/session/txn.go b/session/txn.go index 5cb87948bd6ce..94227d2cf8209 100644 --- a/session/txn.go +++ b/session/txn.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/session/txninfo" @@ -82,8 +83,26 @@ func (txn *LazyTxn) CacheTableInfo(id int64, info *model.TableInfo) { func (txn *LazyTxn) init() { txn.mutations = make(map[int64]*binlog.TableMutation) txn.mu.Lock() - txn.mu.TxnInfo.State = txninfo.TxnIdle - txn.mu.Unlock() + defer txn.mu.Unlock() + txn.mu.TxnInfo = txninfo.TxnInfo{} +} + +// call this under lock! +func (txn *LazyTxn) updateState(state txninfo.TxnRunningState) { + if txn.mu.TxnInfo.State != state { + lastState := txn.mu.TxnInfo.State + lastStateChangeTime := txn.mu.TxnInfo.LastStateChangeTime + txn.mu.TxnInfo.State = state + txn.mu.TxnInfo.LastStateChangeTime = time.Now() + if !lastStateChangeTime.IsZero() { + hasLockLbl := "false" + if !txn.mu.TxnInfo.BlockStartTime.IsZero() { + hasLockLbl = "true" + } + metrics.TxnDurationHistogram.WithLabelValues(txninfo.StateLabel(lastState), hasLockLbl).Observe(time.Since(lastStateChangeTime).Seconds()) + } + metrics.TxnStatusEnteringCounter.WithLabelValues(txninfo.StateLabel(state)).Inc() + } } func (txn *LazyTxn) initStmtBuf() { @@ -136,12 +155,22 @@ func (txn *LazyTxn) resetTxnInfo( currentSQLDigest string, allSQLDigests []string, ) { + if !txn.mu.LastStateChangeTime.IsZero() { + lastState := txn.mu.State + hasLockLbl := "false" + if !txn.mu.TxnInfo.BlockStartTime.IsZero() { + hasLockLbl = "true" + } + metrics.TxnDurationHistogram.WithLabelValues(txninfo.StateLabel(lastState), hasLockLbl).Observe(time.Since(txn.mu.TxnInfo.LastStateChangeTime).Seconds()) + } if txn.mu.TxnInfo.StartTS != 0 { txninfo.Recorder.OnTrxEnd(&txn.mu.TxnInfo) } txn.mu.TxnInfo = txninfo.TxnInfo{} txn.mu.TxnInfo.StartTS = startTS txn.mu.TxnInfo.State = state + metrics.TxnStatusEnteringCounter.WithLabelValues(txninfo.StateLabel(state)).Inc() + txn.mu.TxnInfo.LastStateChangeTime = time.Now() txn.mu.TxnInfo.EntriesCount = entriesCount txn.mu.TxnInfo.EntriesSize = entriesSize txn.mu.TxnInfo.CurrentSQLDigest = currentSQLDigest @@ -272,11 +301,21 @@ func (txn *LazyTxn) changeToInvalid() { txn.txnFuture = nil txn.mu.Lock() - defer txn.mu.Unlock() + lastState := txn.mu.TxnInfo.State + lastStateChangeTime := txn.mu.TxnInfo.LastStateChangeTime + hasLock := !txn.mu.TxnInfo.BlockStartTime.IsZero() if txn.mu.TxnInfo.StartTS != 0 { txninfo.Recorder.OnTrxEnd(&txn.mu.TxnInfo) } txn.mu.TxnInfo = txninfo.TxnInfo{} + txn.mu.Unlock() + if !lastStateChangeTime.IsZero() { + hasLockLbl := "false" + if hasLock { + hasLockLbl = "true" + } + metrics.TxnDurationHistogram.WithLabelValues(txninfo.StateLabel(lastState), hasLockLbl).Observe(time.Since(lastStateChangeTime).Seconds()) + } } func (txn *LazyTxn) onStmtStart(currentSQLDigest string) { @@ -286,7 +325,7 @@ func (txn *LazyTxn) onStmtStart(currentSQLDigest string) { txn.mu.Lock() defer txn.mu.Unlock() - txn.mu.TxnInfo.State = txninfo.TxnRunning + txn.updateState(txninfo.TxnRunning) txn.mu.TxnInfo.CurrentSQLDigest = currentSQLDigest // Keeps at most 50 history sqls to avoid consuming too much memory. const maxTransactionStmtHistory int = 50 @@ -297,9 +336,9 @@ func (txn *LazyTxn) onStmtStart(currentSQLDigest string) { func (txn *LazyTxn) onStmtEnd() { txn.mu.Lock() + defer txn.mu.Unlock() txn.mu.TxnInfo.CurrentSQLDigest = "" - txn.mu.TxnInfo.State = txninfo.TxnIdle - txn.mu.Unlock() + txn.updateState(txninfo.TxnIdle) } var hasMockAutoIncIDRetry = int64(0) @@ -340,7 +379,7 @@ func (txn *LazyTxn) Commit(ctx context.Context) error { } txn.mu.Lock() - txn.mu.TxnInfo.State = txninfo.TxnCommitting + txn.updateState(txninfo.TxnCommitting) txn.mu.Unlock() failpoint.Inject("mockSlowCommit", func(_ failpoint.Value) {}) @@ -374,7 +413,7 @@ func (txn *LazyTxn) Commit(ctx context.Context) error { func (txn *LazyTxn) Rollback() error { defer txn.reset() txn.mu.Lock() - txn.mu.TxnInfo.State = txninfo.TxnRollingBack + txn.updateState(txninfo.TxnRollingBack) txn.mu.Unlock() // mockSlowRollback is used to mock a rollback which takes a long time failpoint.Inject("mockSlowRollback", func(_ failpoint.Value) {}) @@ -396,7 +435,7 @@ func (txn *LazyTxn) LockKeys(ctx context.Context, lockCtx *kv.LockCtx, keys ...k var originState txninfo.TxnRunningState txn.mu.Lock() originState = txn.mu.TxnInfo.State - txn.mu.TxnInfo.State = txninfo.TxnLockWaiting + txn.updateState(txninfo.TxnLockAcquiring) txn.mu.TxnInfo.BlockStartTime.Valid = true txn.mu.TxnInfo.BlockStartTime.Time = t txn.mu.Unlock() @@ -405,7 +444,7 @@ func (txn *LazyTxn) LockKeys(ctx context.Context, lockCtx *kv.LockCtx, keys ...k txn.mu.Lock() defer txn.mu.Unlock() - txn.mu.TxnInfo.State = originState + txn.updateState(originState) txn.mu.TxnInfo.BlockStartTime.Valid = false txn.mu.TxnInfo.EntriesCount = uint64(txn.Transaction.Len()) txn.mu.TxnInfo.EntriesSize = uint64(txn.Transaction.Size()) @@ -441,6 +480,30 @@ func (txn *LazyTxn) KeysNeedToLock() ([]kv.Key, error) { return keys, nil } +// Wait converts pending txn to valid +func (txn *LazyTxn) Wait(ctx context.Context, sctx sessionctx.Context) (kv.Transaction, error) { + if !txn.validOrPending() { + return txn, errors.AddStack(kv.ErrInvalidTxn) + } + if txn.pending() { + defer func(begin time.Time) { + sctx.GetSessionVars().DurationWaitTS = time.Since(begin) + }(time.Now()) + + // Transaction is lazy initialized. + // PrepareTxnCtx is called to get a tso future, makes s.txn a pending txn, + // If Txn() is called later, wait for the future to get a valid txn. + if err := txn.changePendingToValid(ctx); err != nil { + logutil.BgLogger().Error("active transaction fail", + zap.Error(err)) + txn.cleanup() + sctx.GetSessionVars().TxnCtx.StartTS = 0 + return txn, err + } + } + return txn, nil +} + func keyNeedToLock(k, v []byte, flags kv.KeyFlags) bool { isTableKey := bytes.HasPrefix(k, tablecodec.TablePrefix()) if !isTableKey { diff --git a/session/txninfo/txn_info.go b/session/txninfo/txn_info.go index 60d2133f01d0c..f62564e052a92 100644 --- a/session/txninfo/txn_info.go +++ b/session/txninfo/txn_info.go @@ -33,14 +33,28 @@ const ( TxnIdle TxnRunningState = iota // TxnRunning means the transaction is running, i.e. executing a statement TxnRunning - // TxnLockWaiting means the transaction is blocked on a lock - TxnLockWaiting - // TxnCommitting means the transaction is (at least trying to) committing + // TxnLockAcquiring means the transaction is trying to acquire a lock + TxnLockAcquiring + // TxnCommitting means`` the transaction is (at least trying to) committing TxnCommitting // TxnRollingBack means the transaction is rolling back TxnRollingBack ) +// StateLabel is used to translate TxnRunningState to its prometheus label name. +var stateLabel map[TxnRunningState]string = map[TxnRunningState]string{ + TxnIdle: "idle", + TxnRunning: "executing_sql", + TxnLockAcquiring: "acquiring_lock", + TxnCommitting: "committing", + TxnRollingBack: "rolling_back", +} + +// StateLabel is used to translate TxnRunningState to its prometheus label name. +func StateLabel(state TxnRunningState) string { + return stateLabel[state] +} + const ( // IDStr is the column name of the TIDB_TRX table's ID column. IDStr = "ID" @@ -90,7 +104,9 @@ type TxnInfo struct { // Current execution state of the transaction. State TxnRunningState - // Last trying to block start time. Invalid if State is not TxnLockWaiting. + // When last time `State` changes, for metrics + LastStateChangeTime time.Time + // Last trying to block start time. Invalid if State is not TxnLockAcquiring. BlockStartTime struct { Valid bool time.Time diff --git a/session/txnmanager.go b/session/txnmanager.go index 19d28ae014f70..9e5e9848b171c 100644 --- a/session/txnmanager.go +++ b/session/txnmanager.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessiontxn" @@ -46,6 +47,7 @@ func getTxnManager(sctx sessionctx.Context) sessiontxn.TxnManager { type txnManager struct { sctx sessionctx.Context + stmtNode ast.StmtNode ctxProvider sessiontxn.TxnContextProvider } @@ -92,6 +94,36 @@ func (m *txnManager) GetStmtForUpdateTS() (uint64, error) { return ts, nil } +func (m *txnManager) GetTxnScope() string { + if m.ctxProvider == nil { + return kv.GlobalTxnScope + } + return m.ctxProvider.GetTxnScope() +} + +func (m *txnManager) GetReadReplicaScope() string { + if m.ctxProvider == nil { + return kv.GlobalReplicaScope + } + return m.ctxProvider.GetReadReplicaScope() +} + +// GetSnapshotWithStmtReadTS gets snapshot with read ts +func (m *txnManager) GetSnapshotWithStmtReadTS() (kv.Snapshot, error) { + if m.ctxProvider == nil { + return nil, errors.New("context provider not set") + } + return m.ctxProvider.GetSnapshotWithStmtReadTS() +} + +// GetSnapshotWithStmtForUpdateTS gets snapshot with for update ts +func (m *txnManager) GetSnapshotWithStmtForUpdateTS() (kv.Snapshot, error) { + if m.ctxProvider == nil { + return nil, errors.New("context provider not set") + } + return m.ctxProvider.GetSnapshotWithStmtForUpdateTS() +} + func (m *txnManager) GetContextProvider() sessiontxn.TxnContextProvider { return m.ctxProvider } @@ -116,14 +148,21 @@ func (m *txnManager) EnterNewTxn(ctx context.Context, r *sessiontxn.EnterNewTxnR func (m *txnManager) OnTxnEnd() { m.ctxProvider = nil + m.stmtNode = nil +} + +func (m *txnManager) GetCurrentStmt() ast.StmtNode { + return m.stmtNode } // OnStmtStart is the hook that should be called when a new statement started -func (m *txnManager) OnStmtStart(ctx context.Context) error { +func (m *txnManager) OnStmtStart(ctx context.Context, node ast.StmtNode) error { + m.stmtNode = node + if m.ctxProvider == nil { return errors.New("context provider not set") } - return m.ctxProvider.OnStmtStart(ctx) + return m.ctxProvider.OnStmtStart(ctx, m.stmtNode) } // OnStmtErrorForNextAction is the hook that should be called when a new statement get an error @@ -134,6 +173,14 @@ func (m *txnManager) OnStmtErrorForNextAction(point sessiontxn.StmtErrorHandlePo return m.ctxProvider.OnStmtErrorForNextAction(point, err) } +// ActivateTxn decides to activate txn according to the parameter `active` +func (m *txnManager) ActivateTxn() (kv.Transaction, error) { + if m.ctxProvider == nil { + return nil, errors.AddStack(kv.ErrInvalidTxn) + } + return m.ctxProvider.ActivateTxn() +} + // OnStmtRetry is the hook that should be called when a statement retry func (m *txnManager) OnStmtRetry(ctx context.Context) error { if m.ctxProvider == nil { diff --git a/sessionctx/BUILD.bazel b/sessionctx/BUILD.bazel index 3b542b3e8b0bd..3d966088021ba 100644 --- a/sessionctx/BUILD.bazel +++ b/sessionctx/BUILD.bazel @@ -24,6 +24,7 @@ go_library( go_test( name = "sessionctx_test", + timeout = "short", srcs = [ "context_test.go", "main_test.go", diff --git a/sessionctx/binloginfo/BUILD.bazel b/sessionctx/binloginfo/BUILD.bazel index d5964ad8cd4af..ff9c03dbfa3e2 100644 --- a/sessionctx/binloginfo/BUILD.bazel +++ b/sessionctx/binloginfo/BUILD.bazel @@ -25,6 +25,7 @@ go_library( go_test( name = "binloginfo_test", + timeout = "short", srcs = [ "binloginfo_test.go", "main_test.go", diff --git a/sessionctx/context.go b/sessionctx/context.go index 5c4128412e0a0..21b89ae72a351 100644 --- a/sessionctx/context.go +++ b/sessionctx/context.go @@ -109,9 +109,6 @@ type Context interface { // only used to daemon session like `statsHandle` to detect global variable change. RefreshVars(context.Context) error - // GetSnapshotWithTS returns a snapshot with start ts - GetSnapshotWithTS(ts uint64) kv.Snapshot - // GetStore returns the store of session. GetStore() kv.Storage @@ -152,8 +149,8 @@ type Context interface { HasLockedTables() bool // PrepareTSFuture uses to prepare timestamp by future. PrepareTSFuture(ctx context.Context, future oracle.Future, scope string) error - // GetPreparedTSFuture returns the prepared ts future - GetPreparedTSFuture() oracle.Future + // GetPreparedTxnFuture returns the prepared ts future + GetPreparedTxnFuture() TxnFuture // StoreIndexUsage stores the index usage information. StoreIndexUsage(tblID int64, idxID int64, rowsSelected int64) // GetTxnWriteThroughputSLI returns the TxnWriteThroughputSLI. @@ -176,6 +173,13 @@ type Context interface { ReleaseAllAdvisoryLocks() int } +// TxnFuture is an interface where implementations have a kv.Transaction field and after +// calling Wait of the TxnFuture, the kv.Transaction will become valid. +type TxnFuture interface { + // Wait converts pending txn to valid + Wait(ctx context.Context, sctx Context) (kv.Transaction, error) +} + type basicCtxType int func (t basicCtxType) String() string { diff --git a/sessionctx/sessionstates/BUILD.bazel b/sessionctx/sessionstates/BUILD.bazel index ba5cb9254f9f8..1cd0a6c172cc2 100644 --- a/sessionctx/sessionstates/BUILD.bazel +++ b/sessionctx/sessionstates/BUILD.bazel @@ -7,19 +7,26 @@ go_library( visibility = ["//visibility:public"], deps = [ "//parser/types", + "//sessionctx/stmtctx", "//types", ], ) go_test( name = "sessionstates_test", + timeout = "short", srcs = ["session_states_test.go"], deps = [ + "//config", "//errno", "//parser/mysql", + "//parser/terror", + "//server", "//sessionctx/variable", "//testkit", + "//types", "//util/sem", + "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", ], ) diff --git a/sessionctx/sessionstates/session_states.go b/sessionctx/sessionstates/session_states.go index 10a2756dd04f4..a9636e2f90014 100644 --- a/sessionctx/sessionstates/session_states.go +++ b/sessionctx/sessionstates/session_states.go @@ -22,6 +22,25 @@ import ( "github.com/pingcap/tidb/types" ) +// SessionStateType is the type of session states. +type SessionStateType int + +// These enums represents the types of session state handlers. +const ( + // StatePrepareStmt represents prepared statements. + StatePrepareStmt SessionStateType = iota + // StateBinding represents session SQL bindings. + StateBinding +) + +// PreparedStmtInfo contains the information about prepared statements, both text and binary protocols. +type PreparedStmtInfo struct { + Name string `json:"name,omitempty"` + StmtText string `json:"text"` + StmtDB string `json:"db,omitempty"` + ParamTypes []byte `json:"types,omitempty"` +} + // QueryInfo represents the information of last executed query. It's used to expose information for test purpose. type QueryInfo struct { TxnScope string `json:"txn_scope"` @@ -42,6 +61,7 @@ type SessionStates struct { UserVars map[string]*types.Datum `json:"user-var-values,omitempty"` UserVarTypes map[string]*ptypes.FieldType `json:"user-var-types,omitempty"` SystemVars map[string]string `json:"sys-vars,omitempty"` + PreparedStmts map[uint32]*PreparedStmtInfo `json:"prepared-stmts,omitempty"` PreparedStmtID uint32 `json:"prepared-stmt-id,omitempty"` Status uint16 `json:"status,omitempty"` CurrentDB string `json:"current-db,omitempty"` @@ -56,4 +76,6 @@ type SessionStates struct { LastAffectedRows int64 `json:"affected-rows,omitempty"` LastInsertID uint64 `json:"last-insert-id,omitempty"` Warnings []stmtctx.SQLWarn `json:"warnings,omitempty"` + // Define it as string to avoid cycle import. + Bindings string `json:"bindings,omitempty"` } diff --git a/sessionctx/sessionstates/session_states_test.go b/sessionctx/sessionstates/session_states_test.go index 29101af06f392..b4b886a893af7 100644 --- a/sessionctx/sessionstates/session_states_test.go +++ b/sessionctx/sessionstates/session_states_test.go @@ -15,16 +15,23 @@ package sessionstates_test import ( + "context" + "encoding/binary" "fmt" "strconv" "strings" "testing" "time" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/server" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/sem" "github.com/stretchr/testify/require" ) @@ -269,6 +276,18 @@ func TestSessionCtx(t *testing.T) { tk.MustQuery("select database()").Check(testkit.Rows("test")) }, }, + { + // check CurrentDB + setFunc: func(tk *testkit.TestKit) any { + tk.MustExec("create database test1") + tk.MustExec("use test1") + tk.MustExec("drop database test1") + return nil + }, + checkFunc: func(tk *testkit.TestKit, param any) { + tk.MustQuery("select database()").Check(testkit.Rows("")) + }, + }, { // check LastTxnInfo checkFunc: func(tk *testkit.TestKit, param any) { @@ -554,6 +573,822 @@ func TestStatementCtx(t *testing.T) { } } +func TestPreparedStatements(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + sv := server.CreateMockServer(t, store) + defer sv.Close() + + tests := []struct { + setFunc func(tk *testkit.TestKit, conn server.MockConn) any + checkFunc func(tk *testkit.TestKit, conn server.MockConn, param any) + restoreErr int + cleanFunc func(tk *testkit.TestKit) + }{ + { + // no such statement + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustGetErrCode("execute stmt", errno.ErrPreparedStmtNotFound) + }, + }, + { + // deallocate it after prepare + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("prepare stmt from 'select 1'") + tk.MustExec("deallocate prepare stmt") + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustGetErrCode("execute stmt", errno.ErrPreparedStmtNotFound) + }, + }, + { + // statement with no parameters + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("insert into test.t1 value(1), (2), (3)") + tk.MustExec("prepare stmt from 'select * from test.t1 order by id'") + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustQuery("execute stmt").Check(testkit.Rows("1", "2", "3")) + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // statement with user-defined parameters + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("insert into test.t1 value(1), (2), (3)") + tk.MustExec("prepare stmt from 'select * from test.t1 where id>? order by id limit ?'") + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustExec("set @a=1, @b=1") + tk.MustQuery("execute stmt using @a, @b").Check(testkit.Rows("2")) + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // execute the statement multiple times + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("prepare stmt1 from 'insert into test.t1 value(?), (?), (?)'") + tk.MustExec("prepare stmt2 from 'select * from test.t1 order by id'") + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustQuery("execute stmt2").Check(testkit.Rows()) + tk.MustExec("set @a=1, @b=2, @c=3") + tk.MustExec("execute stmt1 using @a, @b, @c") + tk.MustQuery("execute stmt2").Check(testkit.Rows("1", "2", "3")) + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // update session variables after prepare + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("set names utf8mb4 collate utf8mb4_general_ci") + tk.MustExec("prepare stmt from 'select @@character_set_client, @@collation_connection'") + tk.MustQuery("execute stmt").Check(testkit.Rows("utf8mb4 utf8mb4_general_ci")) + tk.MustExec("set names gbk collate gbk_chinese_ci") + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustQuery("execute stmt").Check(testkit.Rows("gbk gbk_chinese_ci")) + }, + }, + { + // session-scoped ANSI_QUOTES + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("set sql_mode='ANSI_QUOTES'") + tk.MustExec("prepare stmt from 'select \\'a\\''") + tk.MustQuery("execute stmt").Check(testkit.Rows("a")) + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustQuery("execute stmt").Check(testkit.Rows("a")) + }, + }, + { + // global-scoped ANSI_QUOTES + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("set global sql_mode='ANSI_QUOTES'") + tk.MustExec("prepare stmt from \"select \\\"a\\\"\"") + tk.MustQuery("execute stmt").Check(testkit.Rows("a")) + return nil + }, + restoreErr: errno.ErrBadField, + }, + { + // statement name + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("prepare `stmt 1` from 'select 1'") + tk.MustQuery("execute `stmt 1`").Check(testkit.Rows("1")) + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustQuery("execute `stmt 1`").Check(testkit.Rows("1")) + }, + }, + { + // multiple prepared statements + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("insert into test.t1 value(1), (2), (3)") + tk.MustExec("prepare stmt1 from 'select * from test.t1 order by id'") + tk.MustExec("prepare stmt2 from 'select * from test.t1 where id=?'") + tk.MustExec("set @a=1") + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustQuery("execute stmt1").Check(testkit.Rows("1", "2", "3")) + tk.MustQuery("execute stmt2 using @a").Check(testkit.Rows("1")) + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // change current db after prepare + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("use test") + tk.MustExec("create table t1(id int)") + tk.MustExec("insert into t1 value(1), (2), (3)") + tk.MustExec("prepare stmt from 'select * from t1 order by id'") + tk.MustExec("use mysql") + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustQuery("select database()").Check(testkit.Rows("mysql")) + tk.MustQuery("execute stmt").Check(testkit.Rows("1", "2", "3")) + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // update user variable after prepare + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("insert into test.t1 value(1), (2), (3)") + tk.MustExec("set @a=1") + tk.MustExec("prepare stmt from 'select * from test.t1 where id=?'") + tk.MustQuery("execute stmt using @a").Check(testkit.Rows("1")) + tk.MustExec("set @a=2") + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustQuery("execute stmt using @a").Check(testkit.Rows("2")) + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // alter table after prepare + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("insert into test.t1 value(1)") + tk.MustExec("prepare stmt from 'select * from test.t1'") + tk.MustExec("alter table test.t1 add column c char(1) default 'a'") + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustQuery("execute stmt").Check(testkit.Rows("1 a")) + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // drop and create table after prepare + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("prepare stmt from 'select * from test.t1'") + tk.MustExec("drop table test.t1") + tk.MustExec("create table test.t1(id int, c char(1))") + tk.MustExec("insert into test.t1 value(1, 'a')") + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustQuery("execute stmt").Check(testkit.Rows("1 a")) + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // drop table after prepare + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("prepare stmt from 'select * from test.t1'") + tk.MustExec("drop table test.t1") + return nil + }, + restoreErr: errno.ErrNoSuchTable, + }, + { + // drop db after prepare + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("create database test1") + tk.MustExec("use test1") + tk.MustExec("create table t1(id int)") + tk.MustExec("prepare stmt from 'select * from t1'") + tk.MustExec("drop database test1") + return nil + }, + restoreErr: errno.ErrNoSuchTable, + }, + { + // update sql_mode after prepare + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("set sql_mode=''") + tk.MustExec("create table test.t1(id int, name char(10))") + tk.MustExec("insert into test.t1 value(1, 'a')") + tk.MustExec("prepare stmt from 'select id, name from test.t1 group by id'") + tk.MustExec("set sql_mode='ONLY_FULL_GROUP_BY'") + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + // The prepare statement is decoded after decoding session variables, + // so `SET SESSION_STATES` won't report errors. + tk.MustGetErrCode("execute stmt", errno.ErrFieldNotInGroupBy) + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // update global sql_mode after prepare + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("set sql_mode=''") + tk.MustExec("create table test.t1(id int, name char(10))") + tk.MustExec("prepare stmt from 'select id, name from test.t1 group by id'") + tk.MustExec("set global sql_mode='ONLY_FULL_GROUP_BY'") + return nil + }, + restoreErr: errno.ErrFieldNotInGroupBy, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + tk.MustExec("set global sql_mode=default") + }, + }, + { + // warnings won't be affected + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + // Decoding this prepared statement should report a warning. + tk.MustExec("prepare stmt from 'select 0/0'") + // Override the warning. + tk.MustQuery("select 1") + return nil + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustQuery("show warnings").Check(testkit.Rows()) + }, + }, + { + // test binary-protocol prepared statement + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + stmtID, _, _, err := tk.Session().PrepareStmt("select ?") + require.NoError(t, err) + return stmtID + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + datum := []types.Datum{types.NewDatum(1)} + rs, err := tk.Session().ExecutePreparedStmt(context.Background(), param.(uint32), datum) + require.NoError(t, err) + tk.ResultSetToResult(rs, "").Check(testkit.Rows("1")) + }, + }, + { + // no such prepared statement + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + _, err := tk.Session().ExecutePreparedStmt(context.Background(), 1, nil) + errEqualsCode(t, err, errno.ErrPreparedStmtNotFound) + }, + }, + { + // both text and binary protocols + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("prepare stmt from 'select 10'") + stmtID, _, _, err := tk.Session().PrepareStmt("select ?") + require.NoError(t, err) + return stmtID + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + tk.MustQuery("execute stmt").Check(testkit.Rows("10")) + datum := []types.Datum{types.NewDatum(1)} + rs, err := tk.Session().ExecutePreparedStmt(context.Background(), param.(uint32), datum) + require.NoError(t, err) + tk.ResultSetToResult(rs, "").Check(testkit.Rows("1")) + }, + }, + { + // drop binary protocol statements + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + stmtID, _, _, err := tk.Session().PrepareStmt("select ?") + require.NoError(t, err) + return stmtID + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + err := tk.Session().DropPreparedStmt(param.(uint32)) + require.NoError(t, err) + }, + }, + { + // execute the statement multiple times + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + tk.MustExec("create table test.t1(id int)") + stmtID1, _, _, err := tk.Session().PrepareStmt("insert into test.t1 value(?), (?), (?)") + require.NoError(t, err) + stmtID2, _, _, err := tk.Session().PrepareStmt("select * from test.t1 order by id") + require.NoError(t, err) + return []uint32{stmtID1, stmtID2} + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + stmtIDs := param.([]uint32) + rs, err := tk.Session().ExecutePreparedStmt(context.Background(), stmtIDs[1], nil) + require.NoError(t, err) + tk.ResultSetToResult(rs, "").Check(testkit.Rows()) + datum := []types.Datum{types.NewDatum(1), types.NewDatum(2), types.NewDatum(3)} + _, err = tk.Session().ExecutePreparedStmt(context.Background(), stmtIDs[0], datum) + require.NoError(t, err) + rs, err = tk.Session().ExecutePreparedStmt(context.Background(), stmtIDs[1], nil) + require.NoError(t, err) + tk.ResultSetToResult(rs, "").Check(testkit.Rows("1", "2", "3")) + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // the latter stmt ID should be bigger + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + stmtID, _, _, err := tk.Session().PrepareStmt("select ?") + require.NoError(t, err) + return stmtID + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + stmtID, _, _, err := tk.Session().PrepareStmt("select ?") + require.NoError(t, err) + require.True(t, stmtID > param.(uint32)) + }, + }, + { + // execute the statement with cursor + setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + cmd := append([]byte{mysql.ComStmtPrepare}, []byte("select ?")...) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + cmd = getExecuteBytes(1, true, true, paramInfo{value: 1, isNull: false}) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + cmd = getFetchBytes(1, 10) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + // This COM_STMT_FETCH returns EOF. + cmd = getFetchBytes(1, 10) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + return uint32(1) + }, + checkFunc: func(tk *testkit.TestKit, conn server.MockConn, param any) { + cmd := getExecuteBytes(param.(uint32), false, false, paramInfo{value: 1, isNull: false}) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + }, + }, + // Skip this case. Refer to https://github.com/pingcap/tidb/issues/35784. + //{ + // // update privilege after prepare + // setFunc: func(tk *testkit.TestKit, conn server.MockConn) any { + // rootTk := testkit.NewTestKit(t, store) + // rootTk.MustExec(`CREATE USER 'u1'@'localhost'`) + // rootTk.MustExec("create table test.t1(id int)") + // require.True(t, tk.Session().Auth(&auth.UserIdentity{Username: "u1", Hostname: "localhost"}, nil, nil)) + // rootTk.MustExec(`GRANT SELECT ON test.t1 TO 'u1'@'localhost'`) + // tk.MustExec("prepare stmt from 'select * from test.t1'") + // rootTk.MustExec(`REVOKE SELECT ON test.t1 FROM 'u1'@'localhost'`) + // return nil + // }, + // prepareFunc: func(tk *testkit.TestKit, conn server.MockConn) { + // require.True(t, tk.Session().Auth(&auth.UserIdentity{Username: "u1", Hostname: "localhost"}, nil, nil)) + // }, + // restoreErr: errno.ErrNoSuchTable, + // cleanFunc: func(tk *testkit.TestKit) { + // rootTk := testkit.NewTestKit(t, store) + // rootTk.MustExec("drop user 'u1'@'localhost'") + // rootTk.MustExec("drop table test.t1") + // }, + //}, + } + + for _, tt := range tests { + conn1 := server.CreateMockConn(t, sv) + tk1 := testkit.NewTestKitWithSession(t, store, conn1.Context().Session) + var param any + if tt.setFunc != nil { + param = tt.setFunc(tk1, conn1) + } + conn2 := server.CreateMockConn(t, sv) + tk2 := testkit.NewTestKitWithSession(t, store, conn2.Context().Session) + rows := tk1.MustQuery("show session_states").Rows() + require.Len(t, rows, 1) + state := rows[0][0].(string) + state = strings.ReplaceAll(state, "\\", "\\\\") + state = strings.ReplaceAll(state, "'", "\\'") + setSQL := fmt.Sprintf("set session_states '%s'", state) + if tt.restoreErr != 0 { + tk2.MustGetErrCode(setSQL, tt.restoreErr) + } else { + tk2.MustExec(setSQL) + tt.checkFunc(tk2, conn2, param) + } + if tt.cleanFunc != nil { + tt.cleanFunc(tk1) + } + conn1.Close() + conn2.Close() + } +} + +func TestSQLBinding(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("create table test.t1(id int primary key, name varchar(10), key(name))") + + tests := []struct { + setFunc func(tk *testkit.TestKit) any + checkFunc func(tk *testkit.TestKit, param any) + restoreErr int + cleanFunc func(tk *testkit.TestKit) + }{ + { + // no bindings + checkFunc: func(tk *testkit.TestKit, param any) { + tk.MustQuery("show session bindings").Check(testkit.Rows()) + }, + }, + { + // use binding and drop it + setFunc: func(tk *testkit.TestKit) any { + tk.MustExec("create session binding for select * from test.t1 using select * from test.t1 use index(name)") + rows := tk.MustQuery("show session bindings").Rows() + require.Equal(t, 1, len(rows)) + return rows + }, + checkFunc: func(tk *testkit.TestKit, param any) { + tk.MustQuery("show session bindings").Check(param.([][]any)) + require.True(t, tk.HasPlan("select * from test.t1", "IndexFullScan")) + tk.MustExec("drop session binding for select * from test.t1") + tk.MustQuery("show session bindings").Check(testkit.Rows()) + }, + }, + { + // use hint + setFunc: func(tk *testkit.TestKit) any { + tk.MustExec("create session binding for select * from test.t1 using select /*+ use_index(test.t1, name) */ * from test.t1") + rows := tk.MustQuery("show session bindings").Rows() + require.Equal(t, 1, len(rows)) + return rows + }, + checkFunc: func(tk *testkit.TestKit, param any) { + tk.MustQuery("show session bindings").Check(param.([][]any)) + require.True(t, tk.HasPlan("select * from test.t1", "IndexFullScan")) + tk.MustExec("drop session binding for select * from test.t1") + tk.MustQuery("show session bindings").Check(testkit.Rows()) + }, + }, + { + // drop binding + setFunc: func(tk *testkit.TestKit) any { + tk.MustExec("create session binding for select * from test.t1 using select * from test.t1 use index(name)") + tk.MustExec("drop session binding for select * from test.t1") + return nil + }, + checkFunc: func(tk *testkit.TestKit, param any) { + tk.MustQuery("show session bindings").Check(testkit.Rows()) + }, + }, + { + // default db + setFunc: func(tk *testkit.TestKit) any { + tk.MustExec("use test") + tk.MustExec("create session binding for select * from t1 using select * from t1 use index(name)") + tk.MustExec("use mysql") + rows := tk.MustQuery("show session bindings").Rows() + require.Equal(t, 1, len(rows)) + return rows + }, + checkFunc: func(tk *testkit.TestKit, param any) { + tk.MustQuery("show session bindings").Check(param.([][]any)) + require.True(t, tk.HasPlan("select * from test.t1", "IndexFullScan")) + }, + }, + { + // drop table + setFunc: func(tk *testkit.TestKit) any { + tk.MustExec("create session binding for select * from test.t1 using select * from test.t1 use index(name)") + tk.MustExec("drop table test.t1") + return nil + }, + restoreErr: errno.ErrNoSuchTable, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("create table test.t1(id int primary key, name varchar(10), key(name))") + }, + }, + { + // drop db + setFunc: func(tk *testkit.TestKit) any { + tk.MustExec("create database test1") + tk.MustExec("use test1") + tk.MustExec("create table t1(id int primary key, name varchar(10), key(name))") + tk.MustExec("create session binding for select * from t1 using select /*+ use_index(t1, name) */ * from t1") + tk.MustExec("drop database test1") + return nil + }, + restoreErr: errno.ErrNoSuchTable, + }, + { + // alter the table + setFunc: func(tk *testkit.TestKit) any { + tk.MustExec("create session binding for select * from test.t1 using select * from test.t1 use index(name)") + tk.MustExec("alter table test.t1 drop index name") + return nil + }, + restoreErr: errno.ErrKeyDoesNotExist, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("alter table test.t1 add index name(name)") + }, + }, + { + // both global and session bindings + setFunc: func(tk *testkit.TestKit) any { + tk.MustExec("create global binding for select * from test.t1 using select * from test.t1 use index(primary)") + tk.MustExec("create session binding for select * from test.t1 using select * from test.t1 use index(name)") + sessionRows := tk.MustQuery("show bindings").Rows() + require.Equal(t, 1, len(sessionRows)) + globalRows := tk.MustQuery("show global bindings").Rows() + require.Equal(t, 1, len(globalRows)) + return [][][]any{sessionRows, globalRows} + }, + checkFunc: func(tk *testkit.TestKit, param any) { + rows := param.([][][]any) + tk.MustQuery("show bindings").Check(rows[0]) + tk.MustQuery("show global bindings").Check(rows[1]) + require.True(t, tk.HasPlan("select * from test.t1", "IndexFullScan")) + tk.MustExec("drop session binding for select * from test.t1") + require.True(t, tk.HasPlan("select * from test.t1", "TableFullScan")) + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop global binding for select * from test.t1") + }, + }, + { + // multiple bindings + setFunc: func(tk *testkit.TestKit) any { + tk.MustExec("create session binding for select * from test.t1 using select * from test.t1 use index(name)") + tk.MustExec("create session binding for select count(*) from test.t1 using select count(*) from test.t1 use index(primary)") + tk.MustExec("create session binding for select name from test.t1 using select name from test.t1 use index(primary)") + rows := tk.MustQuery("show bindings").Rows() + require.Equal(t, 3, len(rows)) + return rows + }, + checkFunc: func(tk *testkit.TestKit, param any) { + tk.MustQuery("show bindings").Check(param.([][]any)) + require.True(t, tk.HasPlan("select * from test.t1", "IndexFullScan")) + }, + }, + } + + for _, tt := range tests { + tk1 := testkit.NewTestKit(t, store) + var param any + if tt.setFunc != nil { + param = tt.setFunc(tk1) + } + rows := tk1.MustQuery("show session_states").Rows() + require.Len(t, rows, 1) + state := rows[0][0].(string) + state = strconv.Quote(state) + setSQL := fmt.Sprintf("set session_states %s", state) + tk2 := testkit.NewTestKit(t, store) + if tt.restoreErr != 0 { + tk2.MustGetErrCode(setSQL, tt.restoreErr) + } else { + tk2.MustExec(setSQL) + tt.checkFunc(tk2, param) + } + if tt.cleanFunc != nil { + tt.cleanFunc(tk1) + } + } +} + +func TestShowStateFail(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + sv := server.CreateMockServer(t, store) + defer sv.Close() + + tests := []struct { + setFunc func(tk *testkit.TestKit, conn server.MockConn) + showErr int + cleanFunc func(tk *testkit.TestKit) + }{ + { + // in an active transaction + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + tk.MustExec("begin") + }, + showErr: errno.ErrCannotMigrateSession, + }, + { + // out of transaction + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + tk.MustExec("begin") + tk.MustExec("commit") + }, + }, + { + // created a global temporary table + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + tk.MustExec("create global temporary table test.t1(id int) on commit delete rows") + tk.MustExec("insert into test.t1 value(1)") + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // created a local temporary table + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + tk.MustExec("create temporary table test.t1(id int)") + }, + showErr: errno.ErrCannotMigrateSession, + }, + { + // drop the local temporary table + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + tk.MustExec("create temporary table test.t1(id int)") + tk.MustExec("drop table test.t1") + }, + }, + { + // hold and advisory lock + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + tk.MustQuery("SELECT get_lock('testlock1', 0)").Check(testkit.Rows("1")) + }, + showErr: errno.ErrCannotMigrateSession, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustQuery("SELECT release_lock('testlock1')").Check(testkit.Rows("1")) + }, + }, + { + // release the advisory lock + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + tk.MustQuery("SELECT get_lock('testlock1', 0)").Check(testkit.Rows("1")) + tk.MustQuery("SELECT release_lock('testlock1')").Check(testkit.Rows("1")) + }, + }, + { + // hold table locks + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("lock tables test.t1 write") + tk.MustQuery("show warnings").Check(testkit.Rows()) + }, + showErr: errno.ErrCannotMigrateSession, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // unlock the tables + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("lock tables test.t1 write") + tk.MustExec("unlock tables") + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // after COM_STMT_SEND_LONG_DATA + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + cmd := append([]byte{mysql.ComStmtPrepare}, []byte("select ?")...) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + cmd = getLongDataBytes(1, 0, []byte("abc")) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + }, + showErr: errno.ErrCannotMigrateSession, + }, + { + // after COM_STMT_SEND_LONG_DATA and COM_STMT_EXECUTE + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + cmd := append([]byte{mysql.ComStmtPrepare}, []byte("select ?")...) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + cmd = getLongDataBytes(1, 0, []byte("abc")) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + cmd = getExecuteBytes(1, false, true, paramInfo{value: 1, isNull: false}) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + }, + }, + { + // query with cursor, and data is not fetched + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("insert test.t1 value(1), (2), (3)") + cmd := append([]byte{mysql.ComStmtPrepare}, []byte("select * from test.t1")...) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + cmd = getExecuteBytes(1, true, false) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + }, + showErr: errno.ErrCannotMigrateSession, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // fetched all the data but the EOF packet is not sent + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("insert test.t1 value(1), (2), (3)") + cmd := append([]byte{mysql.ComStmtPrepare}, []byte("select * from test.t1")...) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + cmd = getExecuteBytes(1, true, false) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + cmd = getFetchBytes(1, 10) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + }, + showErr: errno.ErrCannotMigrateSession, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // EOF is sent + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("insert test.t1 value(1), (2), (3)") + cmd := append([]byte{mysql.ComStmtPrepare}, []byte("select * from test.t1")...) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + cmd = getExecuteBytes(1, true, false) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + cmd = getFetchBytes(1, 10) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + // This COM_STMT_FETCH returns EOF. + cmd = getFetchBytes(1, 10) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + { + // statement is reset + setFunc: func(tk *testkit.TestKit, conn server.MockConn) { + tk.MustExec("create table test.t1(id int)") + tk.MustExec("insert test.t1 value(1), (2), (3)") + cmd := append([]byte{mysql.ComStmtPrepare}, []byte("select * from test.t1")...) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + cmd = getExecuteBytes(1, true, false) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + cmd = getResetBytes(1) + require.NoError(t, conn.Dispatch(context.Background(), cmd)) + }, + cleanFunc: func(tk *testkit.TestKit) { + tk.MustExec("drop table test.t1") + }, + }, + } + + defer config.RestoreFunc()() + config.UpdateGlobal(func(conf *config.Config) { + conf.EnableTableLock = true + }) + for _, tt := range tests { + conn1 := server.CreateMockConn(t, sv) + tk1 := testkit.NewTestKitWithSession(t, store, conn1.Context().Session) + tt.setFunc(tk1, conn1) + if tt.showErr == 0 { + tk2 := testkit.NewTestKit(t, store) + showSessionStatesAndSet(t, tk1, tk2) + } else { + err := tk1.QueryToErr("show session_states") + errEqualsCode(t, err, tt.showErr) + } + if tt.cleanFunc != nil { + tt.cleanFunc(tk1) + } + conn1.Close() + } +} + func showSessionStatesAndSet(t *testing.T, tk1, tk2 *testkit.TestKit) { rows := tk1.MustQuery("show session_states").Rows() require.Len(t, rows, 1) @@ -562,3 +1397,96 @@ func showSessionStatesAndSet(t *testing.T, tk1, tk2 *testkit.TestKit) { setSQL := fmt.Sprintf("set session_states %s", state) tk2.MustExec(setSQL) } + +func errEqualsCode(t *testing.T, err error, code int) { + require.NotNil(t, err) + originErr := errors.Cause(err) + tErr, ok := originErr.(*terror.Error) + require.True(t, ok) + sqlErr := terror.ToSQLError(tErr) + require.Equal(t, code, int(sqlErr.Code)) +} + +// create bytes for COM_STMT_SEND_LONG_DATA +func getLongDataBytes(stmtID uint32, paramID uint16, param []byte) []byte { + buf := make([]byte, 7+len(param)) + pos := 0 + buf[pos] = mysql.ComStmtSendLongData + pos++ + binary.LittleEndian.PutUint32(buf[pos:], stmtID) + pos += 4 + binary.LittleEndian.PutUint16(buf[pos:], paramID) + pos += 2 + buf = append(buf[:pos], param...) + return buf +} + +type paramInfo struct { + value uint32 + isNull bool +} + +// create bytes for COM_STMT_EXECUTE. It only supports int type for convenience. +func getExecuteBytes(stmtID uint32, useCursor bool, newParam bool, params ...paramInfo) []byte { + nullBitmapLen := (len(params) + 7) >> 3 + buf := make([]byte, 11+nullBitmapLen+len(params)*6) + pos := 0 + buf[pos] = mysql.ComStmtExecute + pos++ + binary.LittleEndian.PutUint32(buf[pos:], stmtID) + pos += 4 + if useCursor { + buf[pos] = 1 + } + pos++ + binary.LittleEndian.PutUint32(buf[pos:], 1) + pos += 4 + for i, param := range params { + if param.isNull { + buf[pos+(i>>3)] |= 1 << (i % 8) + } + } + pos += nullBitmapLen + if newParam { + buf[pos] = 1 + pos++ + for i := 0; i < len(params); i++ { + buf[pos] = mysql.TypeLong + pos++ + buf[pos] = 0 + pos++ + } + } else { + buf[pos] = 0 + pos++ + } + for _, param := range params { + if !param.isNull { + binary.LittleEndian.PutUint32(buf[pos:], param.value) + pos += 4 + } + } + return buf[:pos] +} + +// create bytes for COM_STMT_FETCH. +func getFetchBytes(stmtID, fetchSize uint32) []byte { + buf := make([]byte, 9) + pos := 0 + buf[pos] = mysql.ComStmtFetch + pos++ + binary.LittleEndian.PutUint32(buf[pos:], stmtID) + pos += 4 + binary.LittleEndian.PutUint32(buf[pos:], fetchSize) + return buf +} + +// create bytes for COM_STMT_FETCH. +func getResetBytes(stmtID uint32) []byte { + buf := make([]byte, 5) + pos := 0 + buf[pos] = mysql.ComStmtReset + pos++ + binary.LittleEndian.PutUint32(buf[pos:], stmtID) + return buf +} diff --git a/sessionctx/stmtctx/BUILD.bazel b/sessionctx/stmtctx/BUILD.bazel index 168b86f32cf62..b949bacb35267 100644 --- a/sessionctx/stmtctx/BUILD.bazel +++ b/sessionctx/stmtctx/BUILD.bazel @@ -10,14 +10,17 @@ go_library( "//parser/ast", "//parser/model", "//parser/mysql", + "//parser/terror", "//util/disk", "//util/execdetails", "//util/memory", "//util/resourcegrouptag", "//util/topsql/stmtstats", "//util/tracing", + "@com_github_pingcap_errors//:errors", "@com_github_tikv_client_go_v2//tikvrpc", "@com_github_tikv_client_go_v2//util", + "@org_golang_x_exp//slices", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", ], @@ -25,6 +28,7 @@ go_library( go_test( name = "stmtctx_test", + timeout = "short", srcs = [ "main_test.go", "stmtctx_test.go", @@ -32,9 +36,11 @@ go_test( embed = [":stmtctx"], deps = [ "//kv", + "//sessionctx/variable", "//testkit", "//testkit/testsetup", "//util/execdetails", + "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", "@com_github_tikv_client_go_v2//util", "@org_uber_go_goleak//:goleak", diff --git a/sessionctx/stmtctx/stmtctx.go b/sessionctx/stmtctx/stmtctx.go index 4d623015492cc..7fef92ee10782 100644 --- a/sessionctx/stmtctx/stmtctx.go +++ b/sessionctx/stmtctx/stmtctx.go @@ -17,7 +17,6 @@ package stmtctx import ( "encoding/json" "math" - "sort" "strconv" "sync" "sync/atomic" @@ -39,6 +38,7 @@ import ( "github.com/tikv/client-go/v2/util" atomic2 "go.uber.org/atomic" "go.uber.org/zap" + "golang.org/x/exp/slices" ) const ( @@ -109,6 +109,7 @@ type StatementContext struct { // IsDDLJobInQueue is used to mark whether the DDL job is put into the queue. // If IsDDLJobInQueue is true, it means the DDL job is in the queue of storage, and it can be handled by the DDL worker. IsDDLJobInQueue bool + DDLJobID int64 InInsertStmt bool InUpdateStmt bool InDeleteStmt bool @@ -839,15 +840,15 @@ func (sc *StatementContext) CopTasksDetails() *CopTasksDetails { d.AvgProcessTime = sc.mu.execDetails.TimeDetail.ProcessTime / time.Duration(n) d.AvgWaitTime = sc.mu.execDetails.TimeDetail.WaitTime / time.Duration(n) - sort.Slice(sc.mu.allExecDetails, func(i, j int) bool { - return sc.mu.allExecDetails[i].TimeDetail.ProcessTime < sc.mu.allExecDetails[j].TimeDetail.ProcessTime + slices.SortFunc(sc.mu.allExecDetails, func(i, j *execdetails.ExecDetails) bool { + return i.TimeDetail.ProcessTime < j.TimeDetail.ProcessTime }) d.P90ProcessTime = sc.mu.allExecDetails[n*9/10].TimeDetail.ProcessTime d.MaxProcessTime = sc.mu.allExecDetails[n-1].TimeDetail.ProcessTime d.MaxProcessAddress = sc.mu.allExecDetails[n-1].CalleeAddress - sort.Slice(sc.mu.allExecDetails, func(i, j int) bool { - return sc.mu.allExecDetails[i].TimeDetail.WaitTime < sc.mu.allExecDetails[j].TimeDetail.WaitTime + slices.SortFunc(sc.mu.allExecDetails, func(i, j *execdetails.ExecDetails) bool { + return i.TimeDetail.WaitTime < j.TimeDetail.WaitTime }) d.P90WaitTime = sc.mu.allExecDetails[n*9/10].TimeDetail.WaitTime d.MaxWaitTime = sc.mu.allExecDetails[n-1].TimeDetail.WaitTime @@ -873,8 +874,8 @@ func (sc *StatementContext) CopTasksDetails() *CopTasksDetails { if len(items) == 0 { continue } - sort.Slice(items, func(i, j int) bool { - return items[i].sleepTime < items[j].sleepTime + slices.SortFunc(items, func(i, j backoffItem) bool { + return i.sleepTime < j.sleepTime }) n := len(items) d.MaxBackoffAddress[backoff] = items[n-1].callee diff --git a/sessionctx/variable/BUILD.bazel b/sessionctx/variable/BUILD.bazel index c1bfa81623030..5d5863d660f72 100644 --- a/sessionctx/variable/BUILD.bazel +++ b/sessionctx/variable/BUILD.bazel @@ -58,12 +58,14 @@ go_library( "@com_github_tikv_client_go_v2//tikv", "@com_github_twmb_murmur3//:murmur3", "@org_golang_x_exp//maps", + "@org_golang_x_exp//slices", "@org_uber_go_atomic//:atomic", ], ) go_test( name = "variable_test", + timeout = "short", srcs = [ "main_test.go", "mock_globalaccessor_test.go", diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 12546cde3c0ad..04e1279e37410 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -23,7 +23,6 @@ import ( "math" "math/rand" "net" - "sort" "strconv" "strings" "sync" @@ -59,6 +58,7 @@ import ( "github.com/twmb/murmur3" atomic2 "go.uber.org/atomic" "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) // PreparedStmtCount is exported for test. @@ -1158,6 +1158,13 @@ type SessionVars struct { // MaxAllowedPacket indicates the maximum size of a packet for the MySQL protocol. MaxAllowedPacket uint64 + + // TiFlash related optimization, only for MPP. + TiFlashFineGrainedShuffleStreamCount int64 + TiFlashFineGrainedShuffleBatchSize uint64 + + // RequestSourceType is the type of inner request. + RequestSourceType string } // InitStatementContext initializes a StatementContext, the object is reused to reduce allocation. @@ -1684,6 +1691,11 @@ func (s *SessionVars) GetNextPreparedStmtID() uint32 { return s.preparedStmtID } +// SetNextPreparedStmtID sets the next prepared statement id. It's only used in restoring session states. +func (s *SessionVars) SetNextPreparedStmtID(preparedStmtID uint32) { + s.preparedStmtID = preparedStmtID +} + // Location returns the value of time_zone session variable. If it is nil, then return time.Local. func (s *SessionVars) Location() *time.Location { loc := s.TimeZone @@ -2414,7 +2426,7 @@ func (s *SessionVars) SlowLogFormat(logItems *SlowQueryLogItems) string { for backoff := range logItems.CopTasks.TotBackoffTimes { backoffs = append(backoffs, backoff) } - sort.Strings(backoffs) + slices.Sort(backoffs) if logItems.CopTasks.NumCopTasks == 1 { buf.WriteString(SlowLogRowPrefixStr + fmt.Sprintf("%v%v%v %v%v%v", diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 9fade168cea65..4af4de4e12837 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -213,29 +213,6 @@ var defaultSysVars = []*SysVar{ s.EnableOuterJoinReorder = TiDBOptOn(val) return nil }}, - {Scope: ScopeSession, Name: TiDBLogFileMaxDays, Value: strconv.Itoa(config.GetGlobalConfig().Log.File.MaxDays), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt32, skipInit: true, SetSession: func(s *SessionVars, val string) error { - maxAge, err := strconv.ParseInt(val, 10, 32) - if err != nil { - return err - } - - GlobalLogMaxDays.Store(int32(maxAge)) - - cfg := config.GetGlobalConfig().Log.ToLogConfig() - cfg.Config.File.MaxDays = int(maxAge) - - err = logutil.ReplaceLogger(cfg) - if err != nil { - return err - } - - return nil - }, GetSession: func(s *SessionVars) (string, error) { - return strconv.FormatInt(int64(GlobalLogMaxDays.Load()), 10), nil - }}, - {Scope: ScopeSession, Name: TiDBConfig, Value: "", ReadOnly: true, skipInit: true, GetSession: func(s *SessionVars) (string, error) { - return config.GetJSONConfig() - }}, {Scope: ScopeSession, Name: TiDBDDLReorgPriority, Value: "PRIORITY_LOW", Type: TypeEnum, skipInit: true, PossibleValues: []string{"PRIORITY_LOW", "PRIORITY_NORMAL", "PRIORITY_HIGH"}, SetSession: func(s *SessionVars, val string) error { s.setDDLReorgPriority(val) return nil @@ -348,6 +325,25 @@ var defaultSysVars = []*SysVar{ }}, /* The system variables below have INSTANCE scope */ + {Scope: ScopeInstance, Name: TiDBLogFileMaxDays, Value: strconv.Itoa(config.GetGlobalConfig().Log.File.MaxDays), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt32, SetGlobal: func(s *SessionVars, val string) error { + maxAge, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return err + } + GlobalLogMaxDays.Store(int32(maxAge)) + cfg := config.GetGlobalConfig().Log.ToLogConfig() + cfg.Config.File.MaxDays = int(maxAge) + err = logutil.ReplaceLogger(cfg) + if err != nil { + return err + } + return nil + }, GetGlobal: func(s *SessionVars) (string, error) { + return strconv.FormatInt(int64(GlobalLogMaxDays.Load()), 10), nil + }}, + {Scope: ScopeInstance, Name: TiDBConfig, Value: "", ReadOnly: true, GetGlobal: func(s *SessionVars) (string, error) { + return config.GetJSONConfig() + }}, {Scope: ScopeInstance, Name: TiDBGeneralLog, Value: BoolToOnOff(DefTiDBGeneralLog), Type: TypeBool, SetGlobal: func(s *SessionVars, val string) error { ProcessGeneralLog.Store(TiDBOptOn(val)) return nil @@ -1662,6 +1658,16 @@ var defaultSysVars = []*SysVar{ return nil }, }, + {Scope: ScopeGlobal | ScopeSession, Name: TiFlashFineGrainedShuffleStreamCount, Value: strconv.Itoa(DefTiFlashFineGrainedShuffleStreamCount), Type: TypeInt, MinValue: -1, MaxValue: 1024, + SetSession: func(s *SessionVars, val string) error { + s.TiFlashFineGrainedShuffleStreamCount = TidbOptInt64(val, DefTiFlashFineGrainedShuffleStreamCount) + return nil + }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiFlashFineGrainedShuffleBatchSize, Value: strconv.Itoa(DefTiFlashFineGrainedShuffleBatchSize), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64, + SetSession: func(s *SessionVars, val string) error { + s.TiFlashFineGrainedShuffleBatchSize = uint64(TidbOptInt64(val, DefTiFlashFineGrainedShuffleBatchSize)) + return nil + }}, {Scope: ScopeGlobal, Name: TiDBSimplifiedMetrics, Value: BoolToOnOff(DefTiDBSimplifiedMetrics), Type: TypeBool, SetGlobal: func(vars *SessionVars, s string) error { metrics.ToggleSimplifiedMode(TiDBOptOn(s)) diff --git a/sessionctx/variable/sysvar_test.go b/sessionctx/variable/sysvar_test.go index d101f90b7df78..6d94cb81e8ac0 100644 --- a/sessionctx/variable/sysvar_test.go +++ b/sessionctx/variable/sysvar_test.go @@ -536,13 +536,8 @@ func TestIsNoop(t *testing.T) { require.True(t, sv.IsNoop) } -func TestInstanceScopedVars(t *testing.T) { - // This tests instance scoped variables through GetSessionOrGlobalSystemVar(). - // Eventually these should be changed to use getters so that the switch - // statement in GetSessionOnlySysVars can be removed. - +func TestSessionGetterFuncs(t *testing.T) { vars := NewSessionVars() - val, err := GetSessionOrGlobalSystemVar(vars, TiDBCurrentTS) require.NoError(t, err) require.Equal(t, fmt.Sprintf("%d", vars.TxnCtx.StartTS), val) @@ -557,7 +552,22 @@ func TestInstanceScopedVars(t *testing.T) { require.NoError(t, err) require.Equal(t, string(info), val) - val, err = GetSessionOrGlobalSystemVar(vars, TiDBGeneralLog) + val, err = GetSessionOrGlobalSystemVar(vars, TiDBFoundInPlanCache) + require.NoError(t, err) + require.Equal(t, BoolToOnOff(vars.PrevFoundInPlanCache), val) + + val, err = GetSessionOrGlobalSystemVar(vars, TiDBFoundInBinding) + require.NoError(t, err) + require.Equal(t, BoolToOnOff(vars.PrevFoundInBinding), val) + + val, err = GetSessionOrGlobalSystemVar(vars, TiDBTxnScope) + require.NoError(t, err) + require.Equal(t, vars.TxnScope.GetVarValue(), val) +} + +func TestInstanceScopedVars(t *testing.T) { + vars := NewSessionVars() + val, err := GetSessionOrGlobalSystemVar(vars, TiDBGeneralLog) require.NoError(t, err) require.Equal(t, BoolToOnOff(ProcessGeneralLog.Load()), val) @@ -610,21 +620,19 @@ func TestInstanceScopedVars(t *testing.T) { require.NoError(t, err) require.Equal(t, BoolToOnOff(config.GetGlobalConfig().Instance.CheckMb4ValueInUTF8.Load()), val) - val, err = GetSessionOrGlobalSystemVar(vars, TiDBFoundInPlanCache) + val, err = GetSessionOrGlobalSystemVar(vars, TiDBEnableCollectExecutionInfo) require.NoError(t, err) - require.Equal(t, BoolToOnOff(vars.PrevFoundInPlanCache), val) + require.Equal(t, BoolToOnOff(config.GetGlobalConfig().Instance.EnableCollectExecutionInfo), val) - val, err = GetSessionOrGlobalSystemVar(vars, TiDBFoundInBinding) + val, err = GetSessionOrGlobalSystemVar(vars, TiDBConfig) require.NoError(t, err) - require.Equal(t, BoolToOnOff(vars.PrevFoundInBinding), val) - - val, err = GetSessionOrGlobalSystemVar(vars, TiDBEnableCollectExecutionInfo) + expected, err = config.GetJSONConfig() require.NoError(t, err) - require.Equal(t, BoolToOnOff(config.GetGlobalConfig().Instance.EnableCollectExecutionInfo), val) + require.Equal(t, expected, val) - val, err = GetSessionOrGlobalSystemVar(vars, TiDBTxnScope) + val, err = GetSessionOrGlobalSystemVar(vars, TiDBLogFileMaxDays) require.NoError(t, err) - require.Equal(t, vars.TxnScope.GetVarValue(), val) + require.Equal(t, fmt.Sprint(GlobalLogMaxDays.Load()), val) } // TestDefaultValuesAreSettable that sysvars defaults are logically valid. i.e. @@ -648,6 +656,14 @@ func TestDefaultValuesAreSettable(t *testing.T) { } } +// TestSysVarNameIsLowerCase tests that no new sysvars are added with uppercase characters. +// In MySQL variables are always lowercase, and can be set in a case-insensitive way. +func TestSysVarNameIsLowerCase(t *testing.T) { + for _, sv := range GetSysVars() { + require.Equal(t, strings.ToLower(sv.Name), sv.Name, "sysvar name contains uppercase characters") + } +} + // TestSettersandGetters tests that sysvars are logically correct with getter and setter functions. // i.e. it doesn't make sense to have a SetSession function on a variable that is only globally scoped. func TestSettersandGetters(t *testing.T) { diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 529c31b6e560d..2e55dfdb2353d 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -678,6 +678,10 @@ const ( // When set to true, a non-transactional DML finishes all batches even if errors are met in some batches. TiDBNonTransactionalIgnoreError = "tidb_nontransactional_ignore_error" + // Fine grained shuffle is disabled when TiFlashFineGrainedShuffleStreamCount is zero. + TiFlashFineGrainedShuffleStreamCount = "tiflash_fine_grained_shuffle_stream_count" + TiFlashFineGrainedShuffleBatchSize = "tiflash_fine_grained_shuffle_batch_size" + // TiDBSimplifiedMetrics controls whether to unregister some unused metrics. TiDBSimplifiedMetrics = "tidb_simplified_metrics" ) @@ -752,196 +756,199 @@ const ( // Default TiDB system variable values. const ( - DefHostname = "localhost" - DefIndexLookupConcurrency = ConcurrencyUnset - DefIndexLookupJoinConcurrency = ConcurrencyUnset - DefIndexSerialScanConcurrency = 1 - DefIndexJoinBatchSize = 25000 - DefIndexLookupSize = 20000 - DefDistSQLScanConcurrency = 15 - DefBuildStatsConcurrency = 4 - DefAutoAnalyzeRatio = 0.5 - DefAutoAnalyzeStartTime = "00:00 +0000" - DefAutoAnalyzeEndTime = "23:59 +0000" - DefAutoIncrementIncrement = 1 - DefAutoIncrementOffset = 1 - DefChecksumTableConcurrency = 4 - DefSkipUTF8Check = false - DefSkipASCIICheck = false - DefOptAggPushDown = false - DefOptCartesianBCJ = 1 - DefOptMPPOuterJoinFixedBuildSide = false - DefOptWriteRowID = false - DefOptEnableCorrelationAdjustment = true - DefOptLimitPushDownThreshold = 100 - DefOptCorrelationThreshold = 0.9 - DefOptCorrelationExpFactor = 1 - DefOptCPUFactor = 3.0 - DefOptCopCPUFactor = 3.0 - DefOptTiFlashConcurrencyFactor = 24.0 - DefOptNetworkFactor = 1.0 - DefOptScanFactor = 1.5 - DefOptDescScanFactor = 3.0 - DefOptSeekFactor = 20.0 - DefOptMemoryFactor = 0.001 - DefOptDiskFactor = 1.5 - DefOptConcurrencyFactor = 3.0 - DefOptCPUFactorV2 = 30.0 - DefOptCopCPUFactorV2 = 30.0 - DefOptTiFlashCPUFactorV2 = 2.0 - DefOptNetworkFactorV2 = 4.0 - DefOptScanFactorV2 = 100.0 - DefOptDescScanFactorV2 = 150.0 - DefOptTiFlashScanFactorV2 = 15.0 - DefOptSeekFactorV2 = 9500000.0 - DefOptMemoryFactorV2 = 0.001 - DefOptDiskFactorV2 = 1.5 - DefOptConcurrencyFactorV2 = 3.0 - DefOptInSubqToJoinAndAgg = true - DefOptPreferRangeScan = false - DefBatchInsert = false - DefBatchDelete = false - DefBatchCommit = false - DefCurretTS = 0 - DefInitChunkSize = 32 - DefMaxChunkSize = 1024 - DefDMLBatchSize = 0 - DefMaxPreparedStmtCount = -1 - DefWaitTimeout = 28800 - DefTiDBMemQuotaApplyCache = 32 << 20 // 32MB. - DefTiDBMemQuotaBindingCache = 64 << 20 // 64MB. - DefTiDBGeneralLog = false - DefTiDBPProfSQLCPU = 0 - DefTiDBRetryLimit = 10 - DefTiDBDisableTxnAutoRetry = true - DefTiDBConstraintCheckInPlace = false - DefTiDBHashJoinConcurrency = ConcurrencyUnset - DefTiDBProjectionConcurrency = ConcurrencyUnset - DefBroadcastJoinThresholdSize = 100 * 1024 * 1024 - DefBroadcastJoinThresholdCount = 10 * 1024 - DefTiDBOptimizerSelectivityLevel = 0 - DefTiDBOptimizerEnableNewOFGB = false - DefTiDBEnableOuterJoinReorder = true - DefTiDBAllowBatchCop = 1 - DefTiDBAllowMPPExecution = true - DefTiDBHashExchangeWithNewCollation = true - DefTiDBEnforceMPPExecution = false - DefTiFlashMaxThreads = -1 - DefTiDBMPPStoreFailTTL = "60s" - DefTiDBTxnMode = "" - DefTiDBRowFormatV1 = 1 - DefTiDBRowFormatV2 = 2 - DefTiDBDDLReorgWorkerCount = 4 - DefTiDBDDLReorgBatchSize = 256 - DefTiDBDDLErrorCountLimit = 512 - DefTiDBMaxDeltaSchemaCount = 1024 - DefTiDBChangeMultiSchema = false - DefTiDBPointGetCache = false - DefTiDBPlacementMode = PlacementModeStrict - DefTiDBEnableAutoIncrementInGenerated = false - DefTiDBHashAggPartialConcurrency = ConcurrencyUnset - DefTiDBHashAggFinalConcurrency = ConcurrencyUnset - DefTiDBWindowConcurrency = ConcurrencyUnset - DefTiDBMergeJoinConcurrency = 1 // disable optimization by default - DefTiDBStreamAggConcurrency = 1 - DefTiDBForcePriority = mysql.NoPriority - DefEnableWindowFunction = true - DefEnablePipelinedWindowFunction = true - DefEnableStrictDoubleTypeCheck = true - DefEnableVectorizedExpression = true - DefTiDBOptJoinReorderThreshold = 0 - DefTiDBDDLSlowOprThreshold = 300 - DefTiDBUseFastAnalyze = false - DefTiDBSkipIsolationLevelCheck = false - DefTiDBExpensiveQueryTimeThreshold = 60 // 60s - DefTiDBScatterRegion = false - DefTiDBWaitSplitRegionFinish = true - DefWaitSplitRegionTimeout = 300 // 300s - DefTiDBEnableNoopFuncs = Off - DefTiDBEnableNoopVariables = true - DefTiDBAllowRemoveAutoInc = false - DefTiDBUsePlanBaselines = true - DefTiDBEvolvePlanBaselines = false - DefTiDBEvolvePlanTaskMaxTime = 600 // 600s - DefTiDBEvolvePlanTaskStartTime = "00:00 +0000" - DefTiDBEvolvePlanTaskEndTime = "23:59 +0000" - DefInnodbLockWaitTimeout = 50 // 50s - DefTiDBStoreLimit = 0 - DefTiDBMetricSchemaStep = 60 // 60s - DefTiDBMetricSchemaRangeDuration = 60 // 60s - DefTiDBFoundInPlanCache = false - DefTiDBFoundInBinding = false - DefTiDBEnableCollectExecutionInfo = true - DefTiDBAllowAutoRandExplicitInsert = false - DefTiDBEnableClusteredIndex = ClusteredIndexDefModeIntOnly - DefTiDBRedactLog = false - DefTiDBRestrictedReadOnly = false - DefTiDBSuperReadOnly = false - DefTiDBShardAllocateStep = math.MaxInt64 - DefTiDBEnableTelemetry = true - DefTiDBEnableParallelApply = false - DefTiDBEnableAmendPessimisticTxn = false - DefTiDBPartitionPruneMode = "static" - DefTiDBEnableRateLimitAction = true - DefTiDBEnableAsyncCommit = false - DefTiDBEnable1PC = false - DefTiDBGuaranteeLinearizability = true - DefTiDBAnalyzeVersion = 2 - DefTiDBEnableIndexMergeJoin = false - DefTiDBTrackAggregateMemoryUsage = true - DefTiDBEnableExchangePartition = false - DefCTEMaxRecursionDepth = 1000 - DefTiDBTmpTableMaxSize = 64 << 20 // 64MB. - DefTiDBEnableLocalTxn = false - DefTiDBTSOClientBatchMaxWaitTime = 0.0 // 0ms - DefTiDBEnableTSOFollowerProxy = false - DefTiDBEnableOrderedResultMode = false - DefTiDBEnablePseudoForOutdatedStats = true - DefTiDBRegardNULLAsPoint = true - DefEnablePlacementCheck = true - DefTimestamp = "0" - DefTiDBEnableStmtSummary = true - DefTiDBStmtSummaryInternalQuery = false - DefTiDBStmtSummaryRefreshInterval = 1800 - DefTiDBStmtSummaryHistorySize = 24 - DefTiDBStmtSummaryMaxStmtCount = 3000 - DefTiDBStmtSummaryMaxSQLLength = 4096 - DefTiDBCapturePlanBaseline = Off - DefTiDBEnableIndexMerge = true - DefEnableLegacyInstanceScope = true - DefTiDBTableCacheLease = 3 // 3s - DefTiDBPersistAnalyzeOptions = true - DefTiDBEnableColumnTracking = false - DefTiDBStatsLoadSyncWait = 0 - DefTiDBStatsLoadPseudoTimeout = false - DefSysdateIsNow = false - DefTiDBEnableMutationChecker = false - DefTiDBTxnAssertionLevel = AssertionOffStr - DefTiDBIgnorePreparedCacheCloseStmt = false - DefTiDBBatchPendingTiFlashCount = 4000 - DefRCReadCheckTS = false - DefTiDBRemoveOrderbyInSubquery = false - DefTiDBReadStaleness = 0 - DefTiDBGCMaxWaitTime = 24 * 60 * 60 - DefMaxAllowedPacket uint64 = 67108864 - DefTiDBEnableBatchDML = false - DefTiDBMemQuotaQuery = 1073741824 // 1GB - DefTiDBStatsCacheMemQuota = 0 - MaxTiDBStatsCacheMemQuota = 1024 * 1024 * 1024 * 1024 // 1TB - DefTiDBQueryLogMaxLen = 4096 - DefRequireSecureTransport = false - DefTiDBCommitterConcurrency = 128 - DefTiDBBatchDMLIgnoreError = false - DefTiDBMemQuotaAnalyze = -1 - DefTiDBEnableAutoAnalyze = true - DefTiDBMemOOMAction = "CANCEL" - DefTiDBMaxAutoAnalyzeTime = 12 * 60 * 60 - DefTiDBEnablePrepPlanCache = true - DefTiDBPrepPlanCacheSize = 100 - DefTiDBPrepPlanCacheMemoryGuardRatio = 0.1 - DefTiDBEnableConcurrentDDL = true - DefTiDBSimplifiedMetrics = false - DefTiDBEnablePaging = true + DefHostname = "localhost" + DefIndexLookupConcurrency = ConcurrencyUnset + DefIndexLookupJoinConcurrency = ConcurrencyUnset + DefIndexSerialScanConcurrency = 1 + DefIndexJoinBatchSize = 25000 + DefIndexLookupSize = 20000 + DefDistSQLScanConcurrency = 15 + DefBuildStatsConcurrency = 4 + DefAutoAnalyzeRatio = 0.5 + DefAutoAnalyzeStartTime = "00:00 +0000" + DefAutoAnalyzeEndTime = "23:59 +0000" + DefAutoIncrementIncrement = 1 + DefAutoIncrementOffset = 1 + DefChecksumTableConcurrency = 4 + DefSkipUTF8Check = false + DefSkipASCIICheck = false + DefOptAggPushDown = false + DefOptCartesianBCJ = 1 + DefOptMPPOuterJoinFixedBuildSide = false + DefOptWriteRowID = false + DefOptEnableCorrelationAdjustment = true + DefOptLimitPushDownThreshold = 100 + DefOptCorrelationThreshold = 0.9 + DefOptCorrelationExpFactor = 1 + DefOptCPUFactor = 3.0 + DefOptCopCPUFactor = 3.0 + DefOptTiFlashConcurrencyFactor = 24.0 + DefOptNetworkFactor = 1.0 + DefOptScanFactor = 1.5 + DefOptDescScanFactor = 3.0 + DefOptSeekFactor = 20.0 + DefOptMemoryFactor = 0.001 + DefOptDiskFactor = 1.5 + DefOptConcurrencyFactor = 3.0 + DefOptCPUFactorV2 = 30.0 + DefOptCopCPUFactorV2 = 30.0 + DefOptTiFlashCPUFactorV2 = 2.0 + DefOptNetworkFactorV2 = 4.0 + DefOptScanFactorV2 = 100.0 + DefOptDescScanFactorV2 = 150.0 + DefOptTiFlashScanFactorV2 = 15.0 + DefOptSeekFactorV2 = 9500000.0 + DefOptMemoryFactorV2 = 0.001 + DefOptDiskFactorV2 = 1.5 + DefOptConcurrencyFactorV2 = 3.0 + DefOptInSubqToJoinAndAgg = true + DefOptPreferRangeScan = false + DefBatchInsert = false + DefBatchDelete = false + DefBatchCommit = false + DefCurretTS = 0 + DefInitChunkSize = 32 + DefMaxChunkSize = 1024 + DefDMLBatchSize = 0 + DefMaxPreparedStmtCount = -1 + DefWaitTimeout = 28800 + DefTiDBMemQuotaApplyCache = 32 << 20 // 32MB. + DefTiDBMemQuotaBindingCache = 64 << 20 // 64MB. + DefTiDBGeneralLog = false + DefTiDBPProfSQLCPU = 0 + DefTiDBRetryLimit = 10 + DefTiDBDisableTxnAutoRetry = true + DefTiDBConstraintCheckInPlace = false + DefTiDBHashJoinConcurrency = ConcurrencyUnset + DefTiDBProjectionConcurrency = ConcurrencyUnset + DefBroadcastJoinThresholdSize = 100 * 1024 * 1024 + DefBroadcastJoinThresholdCount = 10 * 1024 + DefTiDBOptimizerSelectivityLevel = 0 + DefTiDBOptimizerEnableNewOFGB = false + DefTiDBEnableOuterJoinReorder = true + DefTiDBAllowBatchCop = 1 + DefTiDBAllowMPPExecution = true + DefTiDBHashExchangeWithNewCollation = true + DefTiDBEnforceMPPExecution = false + DefTiFlashMaxThreads = -1 + DefTiDBMPPStoreFailTTL = "60s" + DefTiDBTxnMode = "" + DefTiDBRowFormatV1 = 1 + DefTiDBRowFormatV2 = 2 + DefTiDBDDLReorgWorkerCount = 4 + DefTiDBDDLReorgBatchSize = 256 + DefTiDBDDLErrorCountLimit = 512 + DefTiDBMaxDeltaSchemaCount = 1024 + DefTiDBChangeMultiSchema = false + DefTiDBPointGetCache = false + DefTiDBPlacementMode = PlacementModeStrict + DefTiDBEnableAutoIncrementInGenerated = false + DefTiDBHashAggPartialConcurrency = ConcurrencyUnset + DefTiDBHashAggFinalConcurrency = ConcurrencyUnset + DefTiDBWindowConcurrency = ConcurrencyUnset + DefTiDBMergeJoinConcurrency = 1 // disable optimization by default + DefTiDBStreamAggConcurrency = 1 + DefTiDBForcePriority = mysql.NoPriority + DefEnableWindowFunction = true + DefEnablePipelinedWindowFunction = true + DefEnableStrictDoubleTypeCheck = true + DefEnableVectorizedExpression = true + DefTiDBOptJoinReorderThreshold = 0 + DefTiDBDDLSlowOprThreshold = 300 + DefTiDBUseFastAnalyze = false + DefTiDBSkipIsolationLevelCheck = false + DefTiDBExpensiveQueryTimeThreshold = 60 // 60s + DefTiDBScatterRegion = false + DefTiDBWaitSplitRegionFinish = true + DefWaitSplitRegionTimeout = 300 // 300s + DefTiDBEnableNoopFuncs = Off + DefTiDBEnableNoopVariables = true + DefTiDBAllowRemoveAutoInc = false + DefTiDBUsePlanBaselines = true + DefTiDBEvolvePlanBaselines = false + DefTiDBEvolvePlanTaskMaxTime = 600 // 600s + DefTiDBEvolvePlanTaskStartTime = "00:00 +0000" + DefTiDBEvolvePlanTaskEndTime = "23:59 +0000" + DefInnodbLockWaitTimeout = 50 // 50s + DefTiDBStoreLimit = 0 + DefTiDBMetricSchemaStep = 60 // 60s + DefTiDBMetricSchemaRangeDuration = 60 // 60s + DefTiDBFoundInPlanCache = false + DefTiDBFoundInBinding = false + DefTiDBEnableCollectExecutionInfo = true + DefTiDBAllowAutoRandExplicitInsert = false + DefTiDBEnableClusteredIndex = ClusteredIndexDefModeIntOnly + DefTiDBRedactLog = false + DefTiDBRestrictedReadOnly = false + DefTiDBSuperReadOnly = false + DefTiDBShardAllocateStep = math.MaxInt64 + DefTiDBEnableTelemetry = true + DefTiDBEnableParallelApply = false + DefTiDBEnableAmendPessimisticTxn = false + DefTiDBPartitionPruneMode = "static" + DefTiDBEnableRateLimitAction = true + DefTiDBEnableAsyncCommit = false + DefTiDBEnable1PC = false + DefTiDBGuaranteeLinearizability = true + DefTiDBAnalyzeVersion = 2 + DefTiDBEnableIndexMergeJoin = false + DefTiDBTrackAggregateMemoryUsage = true + DefTiDBEnableExchangePartition = false + DefCTEMaxRecursionDepth = 1000 + DefTiDBTmpTableMaxSize = 64 << 20 // 64MB. + DefTiDBEnableLocalTxn = false + DefTiDBTSOClientBatchMaxWaitTime = 0.0 // 0ms + DefTiDBEnableTSOFollowerProxy = false + DefTiDBEnableOrderedResultMode = false + DefTiDBEnablePseudoForOutdatedStats = true + DefTiDBRegardNULLAsPoint = true + DefEnablePlacementCheck = true + DefTimestamp = "0" + DefTiDBEnableStmtSummary = true + DefTiDBStmtSummaryInternalQuery = false + DefTiDBStmtSummaryRefreshInterval = 1800 + DefTiDBStmtSummaryHistorySize = 24 + DefTiDBStmtSummaryMaxStmtCount = 3000 + DefTiDBStmtSummaryMaxSQLLength = 4096 + DefTiDBCapturePlanBaseline = Off + DefTiDBEnableIndexMerge = true + DefEnableLegacyInstanceScope = true + DefTiDBTableCacheLease = 3 // 3s + DefTiDBPersistAnalyzeOptions = true + DefTiDBEnableColumnTracking = false + DefTiDBStatsLoadSyncWait = 0 + DefTiDBStatsLoadPseudoTimeout = false + DefSysdateIsNow = false + DefTiDBEnableMutationChecker = false + DefTiDBTxnAssertionLevel = AssertionOffStr + DefTiDBIgnorePreparedCacheCloseStmt = false + DefTiDBBatchPendingTiFlashCount = 4000 + DefRCReadCheckTS = false + DefTiDBRemoveOrderbyInSubquery = false + DefTiDBReadStaleness = 0 + DefTiDBGCMaxWaitTime = 24 * 60 * 60 + DefMaxAllowedPacket uint64 = 67108864 + DefTiDBEnableBatchDML = false + DefTiDBMemQuotaQuery = 1073741824 // 1GB + DefTiDBStatsCacheMemQuota = 0 + MaxTiDBStatsCacheMemQuota = 1024 * 1024 * 1024 * 1024 // 1TB + DefTiDBQueryLogMaxLen = 4096 + DefRequireSecureTransport = false + DefTiDBCommitterConcurrency = 128 + DefTiDBBatchDMLIgnoreError = false + DefTiDBMemQuotaAnalyze = -1 + DefTiDBEnableAutoAnalyze = true + DefTiDBMemOOMAction = "CANCEL" + DefTiDBMaxAutoAnalyzeTime = 12 * 60 * 60 + DefTiDBEnablePrepPlanCache = true + DefTiDBPrepPlanCacheSize = 100 + DefTiDBPrepPlanCacheMemoryGuardRatio = 0.1 + DefTiDBEnableConcurrentDDL = true + DefTiDBSimplifiedMetrics = false + DefTiDBEnablePaging = true + DefTiFlashFineGrainedShuffleStreamCount = -1 + DefStreamCountWhenMaxThreadsNotSet = 8 + DefTiFlashFineGrainedShuffleBatchSize = 8192 ) // Process global variables. diff --git a/sessionctx/variable/variable.go b/sessionctx/variable/variable.go index 8a882f1d6e4f2..0266255c1e857 100644 --- a/sessionctx/variable/variable.go +++ b/sessionctx/variable/variable.go @@ -122,7 +122,8 @@ type SysVar struct { SetGlobal func(*SessionVars, string) error // IsHintUpdatable indicate whether it's updatable via SET_VAR() hint (optional) IsHintUpdatable bool - // Hidden means that it still responds to SET but doesn't show up in SHOW VARIABLES + // Deprecated: Hidden previously meant that the variable still responds to SET but doesn't show up in SHOW VARIABLES + // However, this feature is no longer used. All variables are visble. Hidden bool // Aliases is a list of sysvars that should also be updated when this sysvar is updated. // Updating aliases calls the SET function of the aliases, but does not update their aliases (preventing SET recursion) diff --git a/sessionctx/variable/varsutil.go b/sessionctx/variable/varsutil.go index 38108e5cba932..b65ddae858023 100644 --- a/sessionctx/variable/varsutil.go +++ b/sessionctx/variable/varsutil.go @@ -16,7 +16,6 @@ package variable import ( "fmt" - "sort" "strconv" "strings" "sync/atomic" @@ -30,6 +29,7 @@ import ( "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/timeutil" "github.com/tikv/client-go/v2/oracle" + "golang.org/x/exp/slices" ) // secondsPerYear represents seconds in a normal year. Leap year is not considered here. @@ -522,7 +522,7 @@ func collectAllowFuncName4ExpressionIndex() string { for funcName := range GAFunction4ExpressionIndex { str = append(str, funcName) } - sort.Strings(str) + slices.Sort(str) return strings.Join(str, ", ") } diff --git a/sessiontxn/BUILD.bazel b/sessiontxn/BUILD.bazel index bdb84de22657f..0738636e8299a 100644 --- a/sessiontxn/BUILD.bazel +++ b/sessiontxn/BUILD.bazel @@ -11,10 +11,14 @@ go_library( visibility = ["//visibility:public"], deps = [ "//infoschema", + "//kv", "//parser/ast", "//sessionctx", + "//sessionctx/variable", + "//table/temptable", "//util/stringutil", "@com_github_opentracing_opentracing_go//:opentracing-go", + "@com_github_pingcap_kvproto//pkg/kvrpcpb", "@com_github_tikv_client_go_v2//oracle", ], ) diff --git a/sessiontxn/interface.go b/sessiontxn/interface.go index d1febc88c8a48..60795aca52fd4 100644 --- a/sessiontxn/interface.go +++ b/sessiontxn/interface.go @@ -18,6 +18,7 @@ import ( "context" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/sessionctx" ) @@ -116,19 +117,29 @@ type TxnContextProvider interface { TxnAdvisable // GetTxnInfoSchema returns the information schema used by txn GetTxnInfoSchema() infoschema.InfoSchema - // GetStmtReadTS returns the read timestamp used by select statement (not for select ... for update) + // GetTxnScope returns the current txn scope + GetTxnScope() string + // GetReadReplicaScope returns the read replica scope + GetReadReplicaScope() string + //GetStmtReadTS returns the read timestamp used by select statement (not for select ... for update) GetStmtReadTS() (uint64, error) // GetStmtForUpdateTS returns the read timestamp used by update/insert/delete or select ... for update GetStmtForUpdateTS() (uint64, error) + // GetSnapshotWithStmtReadTS gets snapshot with read ts + GetSnapshotWithStmtReadTS() (kv.Snapshot, error) + // GetSnapshotWithStmtForUpdateTS gets snapshot with for update ts + GetSnapshotWithStmtForUpdateTS() (kv.Snapshot, error) // OnInitialize is the hook that should be called when enter a new txn with this provider OnInitialize(ctx context.Context, enterNewTxnType EnterNewTxnType) error // OnStmtStart is the hook that should be called when a new statement started - OnStmtStart(ctx context.Context) error + OnStmtStart(ctx context.Context, node ast.StmtNode) error // OnStmtErrorForNextAction is the hook that should be called when a new statement get an error OnStmtErrorForNextAction(point StmtErrorHandlePoint, err error) (StmtErrorAction, error) // OnStmtRetry is the hook that should be called when a statement is retried internally. OnStmtRetry(ctx context.Context) error + // ActivateTxn activates the transaction. + ActivateTxn() (kv.Transaction, error) } // TxnManager is an interface providing txn context management in session @@ -138,19 +149,27 @@ type TxnManager interface { // If the session is not in any transaction, for example: between two autocommit statements, // this method will return the latest information schema in session that is same with `sessionctx.GetDomainInfoSchema()` GetTxnInfoSchema() infoschema.InfoSchema + // GetTxnScope returns the current txn scope + GetTxnScope() string + // GetReadReplicaScope returns the read replica scope + GetReadReplicaScope() string // GetStmtReadTS returns the read timestamp used by select statement (not for select ... for update) GetStmtReadTS() (uint64, error) // GetStmtForUpdateTS returns the read timestamp used by update/insert/delete or select ... for update GetStmtForUpdateTS() (uint64, error) // GetContextProvider returns the current TxnContextProvider GetContextProvider() TxnContextProvider + // GetSnapshotWithStmtReadTS gets snapshot with read ts + GetSnapshotWithStmtReadTS() (kv.Snapshot, error) + // GetSnapshotWithStmtForUpdateTS gets snapshot with for update ts + GetSnapshotWithStmtForUpdateTS() (kv.Snapshot, error) // EnterNewTxn enters a new transaction. EnterNewTxn(ctx context.Context, req *EnterNewTxnRequest) error // OnTxnEnd is the hook that should be called after transaction commit or rollback OnTxnEnd() // OnStmtStart is the hook that should be called when a new statement started - OnStmtStart(ctx context.Context) error + OnStmtStart(ctx context.Context, node ast.StmtNode) error // OnStmtErrorForNextAction is the hook that should be called when a new statement get an error // This method is not required to be called for every error in the statement, // it is only required to be called for some errors handled in some specified points given by the parameter `point`. @@ -158,6 +177,10 @@ type TxnManager interface { OnStmtErrorForNextAction(point StmtErrorHandlePoint, err error) (StmtErrorAction, error) // OnStmtRetry is the hook that should be called when a statement retry OnStmtRetry(ctx context.Context) error + // ActivateTxn activates the transaction. + ActivateTxn() (kv.Transaction, error) + // GetCurrentStmt returns the current statement node + GetCurrentStmt() ast.StmtNode } // NewTxn starts a new optimistic and active txn, it can be used for the below scenes: @@ -178,7 +201,8 @@ func NewTxnInStmt(ctx context.Context, sctx sessionctx.Context) error { if err := NewTxn(ctx, sctx); err != nil { return err } - return GetTxnManager(sctx).OnStmtStart(ctx) + txnManager := GetTxnManager(sctx) + return txnManager.OnStmtStart(ctx, txnManager.GetCurrentStmt()) } // GetTxnManager returns the TxnManager object from session context diff --git a/sessiontxn/isolation/BUILD.bazel b/sessiontxn/isolation/BUILD.bazel index 35fb911f4b636..a05b08583768a 100644 --- a/sessiontxn/isolation/BUILD.bazel +++ b/sessiontxn/isolation/BUILD.bazel @@ -12,6 +12,7 @@ go_library( importpath = "github.com/pingcap/tidb/sessiontxn/isolation", visibility = ["//visibility:public"], deps = [ + "//config", "//infoschema", "//kv", "//parser/ast", @@ -22,6 +23,7 @@ go_library( "//sessionctx/variable", "//sessiontxn", "//sessiontxn/staleread", + "//table/temptable", "//util/logutil", "@com_github_pingcap_errors//:errors", "@com_github_tikv_client_go_v2//error", @@ -48,15 +50,20 @@ go_test( "//parser", "//parser/ast", "//planner", + "//session", "//sessionctx", "//sessiontxn", "//testkit", + "//testkit/testfork", "//testkit/testsetup", + "//types", "@com_github_pingcap_errors//:errors", + "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/kvrpcpb", "@com_github_stretchr_testify//require", "@com_github_tikv_client_go_v2//error", "@com_github_tikv_client_go_v2//oracle", + "@com_github_tikv_client_go_v2//tikv", "@org_uber_go_goleak//:goleak", ], ) diff --git a/sessiontxn/isolation/base.go b/sessiontxn/isolation/base.go index d5c3bcbca7ab1..877adebd564ca 100644 --- a/sessiontxn/isolation/base.go +++ b/sessiontxn/isolation/base.go @@ -19,12 +19,15 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/sessiontxn/staleread" + "github.com/pingcap/tidb/table/temptable" "github.com/tikv/client-go/v2/oracle" ) @@ -42,15 +45,16 @@ type baseTxnContextProvider struct { sctx sessionctx.Context causalConsistencyOnly bool onInitializeTxnCtx func(*variable.TransactionContext) - onTxnActive func(kv.Transaction) + onTxnActive func(kv.Transaction, sessiontxn.EnterNewTxnType) getStmtReadTSFunc func() (uint64, error) getStmtForUpdateTSFunc func() (uint64, error) // Runtime states - ctx context.Context - infoSchema infoschema.InfoSchema - txn kv.Transaction - isTxnPrepared bool + ctx context.Context + infoSchema infoschema.InfoSchema + txn kv.Transaction + isTxnPrepared bool + enterNewTxnType sessiontxn.EnterNewTxnType } // OnInitialize is the hook that should be called when enter a new txn with this provider @@ -59,6 +63,7 @@ func (p *baseTxnContextProvider) OnInitialize(ctx context.Context, tp sessiontxn return errors.New("ts functions should not be nil") } + p.ctx = ctx sessVars := p.sctx.GetSessionVars() activeNow := true switch tp { @@ -79,11 +84,8 @@ func (p *baseTxnContextProvider) OnInitialize(ctx context.Context, tp sessiontxn return errors.Errorf("Unsupported type: %v", tp) } - p.ctx = ctx - // For normal `sessionctx.Context` the `GetDomainInfoSchema` should always return a non-nil value with type `infoschema.InfoSchema` - // However for some test cases we are using `mock.Context` which will return nil for this method, - // so we use `p.infoSchema, _ = ...` to avoid panic in test cases - p.infoSchema, _ = p.sctx.GetDomainInfoSchema().(infoschema.InfoSchema) + p.enterNewTxnType = tp + p.infoSchema = p.sctx.GetDomainInfoSchema().(infoschema.InfoSchema) txnCtx := &variable.TransactionContext{ TxnCtxNoNeedToRestore: variable.TxnCtxNoNeedToRestore{ CreateTime: time.Now(), @@ -101,9 +103,9 @@ func (p *baseTxnContextProvider) OnInitialize(ctx context.Context, tp sessiontxn if err != nil { return err } - p.isTxnPrepared = txn.Valid() || p.sctx.GetPreparedTSFuture() != nil + p.isTxnPrepared = txn.Valid() || p.sctx.GetPreparedTxnFuture() != nil if activeNow { - _, err = p.activateTxn() + _, err = p.ActivateTxn() } return err @@ -116,8 +118,27 @@ func (p *baseTxnContextProvider) GetTxnInfoSchema() infoschema.InfoSchema { return p.infoSchema } +func (p *baseTxnContextProvider) GetTxnScope() string { + return p.sctx.GetSessionVars().TxnCtx.TxnScope +} + +func (p *baseTxnContextProvider) GetReadReplicaScope() string { + if txnScope := p.GetTxnScope(); txnScope != kv.GlobalTxnScope && txnScope != "" { + // In local txn, we should use txnScope as the readReplicaScope + return txnScope + } + + if p.sctx.GetSessionVars().GetReplicaRead().IsClosestRead() { + // If closest read is set, we should use the scope where instance located. + return config.GetTxnScopeFromConfig() + } + + // When it is not local txn or closet read, we should use global scope + return kv.GlobalReplicaScope +} + func (p *baseTxnContextProvider) GetStmtReadTS() (uint64, error) { - if _, err := p.activateTxn(); err != nil { + if _, err := p.ActivateTxn(); err != nil { return 0, err } @@ -128,7 +149,7 @@ func (p *baseTxnContextProvider) GetStmtReadTS() (uint64, error) { } func (p *baseTxnContextProvider) GetStmtForUpdateTS() (uint64, error) { - if _, err := p.activateTxn(); err != nil { + if _, err := p.ActivateTxn(); err != nil { return 0, err } @@ -138,7 +159,7 @@ func (p *baseTxnContextProvider) GetStmtForUpdateTS() (uint64, error) { return p.getStmtForUpdateTSFunc() } -func (p *baseTxnContextProvider) OnStmtStart(ctx context.Context) error { +func (p *baseTxnContextProvider) OnStmtStart(ctx context.Context, _ ast.StmtNode) error { p.ctx = ctx return nil } @@ -159,14 +180,14 @@ func (p *baseTxnContextProvider) OnStmtErrorForNextAction(point sessiontxn.StmtE } func (p *baseTxnContextProvider) getTxnStartTS() (uint64, error) { - txn, err := p.activateTxn() + txn, err := p.ActivateTxn() if err != nil { return 0, err } return txn.StartTS(), nil } -func (p *baseTxnContextProvider) activateTxn() (kv.Transaction, error) { +func (p *baseTxnContextProvider) ActivateTxn() (kv.Transaction, error) { if p.txn != nil { return p.txn, nil } @@ -175,7 +196,12 @@ func (p *baseTxnContextProvider) activateTxn() (kv.Transaction, error) { return nil, err } - txn, err := p.sctx.Txn(true) + txnFuture := p.sctx.GetPreparedTxnFuture() + if txnFuture == nil { + return nil, errors.AddStack(kv.ErrInvalidTxn) + } + + txn, err := txnFuture.Wait(p.ctx, p.sctx) if err != nil { return nil, err } @@ -183,12 +209,38 @@ func (p *baseTxnContextProvider) activateTxn() (kv.Transaction, error) { sessVars := p.sctx.GetSessionVars() sessVars.TxnCtx.StartTS = txn.StartTS() + if p.enterNewTxnType == sessiontxn.EnterNewTxnBeforeStmt && !sessVars.IsAutocommit() && sessVars.SnapshotTS == 0 { + sessVars.SetInTxn(true) + } + + txn.SetVars(sessVars.KVVars) + + readReplicaType := sessVars.GetReplicaRead() + if readReplicaType.IsFollowerRead() { + txn.SetOption(kv.ReplicaRead, readReplicaType) + } + txn.SetOption(kv.SnapInterceptor, temptable.SessionSnapshotInterceptor(p.sctx)) + + if sessVars.StmtCtx.WeakConsistency { + txn.SetOption(kv.IsolationLevel, kv.RC) + } + + sessiontxn.SetTxnAssertionLevel(txn, sessVars.AssertionLevel) + if p.causalConsistencyOnly { txn.SetOption(kv.GuaranteeLinearizability, false) } if p.onTxnActive != nil { - p.onTxnActive(txn) + p.onTxnActive(txn, p.enterNewTxnType) + } + + if p.sctx.GetSessionVars().InRestrictedSQL { + txn.SetOption(kv.RequestSourceInternal, true) + } + + if tp := p.sctx.GetSessionVars().RequestSourceType; tp != "" { + txn.SetOption(kv.RequestSourceType, tp) } p.txn = txn @@ -255,3 +307,47 @@ func (p *baseTxnContextProvider) AdviseWarmup() error { func (p *baseTxnContextProvider) AdviseOptimizeWithPlan(_ interface{}) error { return nil } + +// GetSnapshotWithStmtReadTS gets snapshot with read ts +func (p *baseTxnContextProvider) GetSnapshotWithStmtReadTS() (kv.Snapshot, error) { + ts, err := p.GetStmtReadTS() + if err != nil { + return nil, err + } + + return p.getSnapshotByTS(ts) +} + +// GetSnapshotWithStmtForUpdateTS gets snapshot with for update ts +func (p *baseTxnContextProvider) GetSnapshotWithStmtForUpdateTS() (kv.Snapshot, error) { + ts, err := p.GetStmtForUpdateTS() + if err != nil { + return nil, err + } + + return p.getSnapshotByTS(ts) +} + +// getSnapshotByTS get snapshot from store according to the snapshotTS and set the transaction related +// options before return +func (p *baseTxnContextProvider) getSnapshotByTS(snapshotTS uint64) (kv.Snapshot, error) { + txn, err := p.sctx.Txn(false) + if err != nil { + return nil, err + } + + txnCtx := p.sctx.GetSessionVars().TxnCtx + if txn.Valid() && txnCtx.StartTS == txnCtx.GetForUpdateTS() && txnCtx.StartTS == snapshotTS { + return txn.GetSnapshot(), nil + } + + sessVars := p.sctx.GetSessionVars() + snapshot := sessiontxn.GetSnapshotWithTS(p.sctx, snapshotTS) + + replicaReadType := sessVars.GetReplicaRead() + if replicaReadType.IsFollowerRead() && !sessVars.StmtCtx.RCCheckTS { + snapshot.SetOption(kv.ReplicaRead, replicaReadType) + } + + return snapshot, nil +} diff --git a/sessiontxn/isolation/main_test.go b/sessiontxn/isolation/main_test.go index 4c04d03243363..be85f098e31b6 100644 --- a/sessiontxn/isolation/main_test.go +++ b/sessiontxn/isolation/main_test.go @@ -16,21 +16,27 @@ package isolation_test import ( "context" + "fmt" "testing" "time" + "github.com/pingcap/failpoint" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessiontxn" + "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/testkit/testfork" "github.com/pingcap/tidb/testkit/testsetup" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" + "github.com/tikv/client-go/v2/tikv" "go.uber.org/goleak" ) func TestMain(m *testing.M) { testsetup.SetupForCommonTest() + tikv.EnableFailpoints() opts := []goleak.Option{ goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), @@ -39,7 +45,7 @@ func TestMain(m *testing.M) { goleak.VerifyTestMain(m, opts...) } -func getOracleTS(t *testing.T, sctx sessionctx.Context) uint64 { +func getOracleTS(t testing.TB, sctx sessionctx.Context) uint64 { ts, err := sctx.GetStore().GetOracle().GetTimestamp(context.TODO(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) require.NoError(t, err) return ts @@ -57,7 +63,7 @@ type txnAssert[T sessiontxn.TxnContextProvider] struct { couldRetry bool } -func (a *txnAssert[T]) Check(t *testing.T) { +func (a *txnAssert[T]) Check(t testing.TB) { provider := sessiontxn.GetTxnManager(a.sctx).GetContextProvider() sessVars := a.sctx.GetSessionVars() txnCtx := sessVars.TxnCtx @@ -76,6 +82,9 @@ func (a *txnAssert[T]) Check(t *testing.T) { require.Equal(t, a.inTxn, sessVars.InTxn()) require.Equal(t, a.inTxn, txnCtx.IsExplicit) require.Equal(t, a.couldRetry, txnCtx.CouldRetry) + require.Equal(t, assertTxnScope, txnCtx.TxnScope) + require.Equal(t, assertTxnScope, provider.GetTxnScope()) + require.Equal(t, assertReplicaReadScope, provider.GetReadReplicaScope()) txn, err := a.sctx.Txn(false) require.NoError(t, err) @@ -111,7 +120,53 @@ func activeSnapshotTxnAssert(sctx sessionctx.Context, ts uint64, isolation strin } } -func (a *txnAssert[T]) CheckAndGetProvider(t *testing.T) T { +func (a *txnAssert[T]) CheckAndGetProvider(t testing.TB) T { a.Check(t) return sessiontxn.GetTxnManager(a.sctx).GetContextProvider().(T) } + +var assertTxnScope = kv.GlobalTxnScope +var assertReplicaReadScope = kv.GlobalReplicaScope + +func forkScopeSettings(t *testfork.T, store kv.Storage) func() { + tk := testkit.NewTestKit(t, store) + failPointEnabled := false + clearFunc := func() { + assertTxnScope = kv.GlobalTxnScope + assertReplicaReadScope = kv.GlobalReplicaScope + tk.MustExec("set @@global.tidb_replica_read='leader'") + tk.MustExec("set @@global.tidb_enable_local_txn=0") + if failPointEnabled { + require.NoError(t, failpoint.Disable("tikvclient/injectTxnScope")) + } + } + + clearFunc() + success := false + defer func() { + if !success { + clearFunc() + } + }() + + zone := testfork.PickEnum(t, "", "bj") + if zone != "" { + require.NoError(t, failpoint.Enable("tikvclient/injectTxnScope", fmt.Sprintf(`return("%v")`, zone))) + failPointEnabled = true + if testfork.PickEnum(t, "", "enableLocalTxn") != "" { + tk.MustExec("set @@global.tidb_enable_local_txn=1") + assertTxnScope = zone + assertReplicaReadScope = zone + } + } + + if testfork.PickEnum(t, "", "closetRead") != "" { + tk.MustExec("set @@global.tidb_replica_read='closest-replicas'") + if zone != "" { + assertReplicaReadScope = zone + } + } + + success = true + return clearFunc +} diff --git a/sessiontxn/isolation/optimistic.go b/sessiontxn/isolation/optimistic.go index f5d56fdf5929e..2669bb302e0b6 100644 --- a/sessiontxn/isolation/optimistic.go +++ b/sessiontxn/isolation/optimistic.go @@ -21,6 +21,8 @@ import ( "github.com/pingcap/tidb/parser/mysql" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/util/logutil" "go.uber.org/zap" ) @@ -36,6 +38,10 @@ func NewOptimisticTxnContextProvider(sctx sessionctx.Context, causalConsistencyO baseTxnContextProvider: baseTxnContextProvider{ sctx: sctx, causalConsistencyOnly: causalConsistencyOnly, + onTxnActive: func(_ kv.Transaction, tp sessiontxn.EnterNewTxnType) { + sessVars := sctx.GetSessionVars() + sessVars.TxnCtx.CouldRetry = isOptimisticTxnRetryable(sessVars, tp) + }, }, } @@ -44,6 +50,45 @@ func NewOptimisticTxnContextProvider(sctx sessionctx.Context, causalConsistencyO return provider } +// isOptimisticTxnRetryable (if returns true) means the transaction could retry. +// We only consider retry in this optimistic mode. +// If the session is already in transaction, enable retry or internal SQL could retry. +// If not, the transaction could always retry, because it should be auto committed transaction. +// Anyway the retry limit is 0, the transaction could not retry. +func isOptimisticTxnRetryable(sessVars *variable.SessionVars, tp sessiontxn.EnterNewTxnType) bool { + if tp == sessiontxn.EnterNewTxnDefault { + return false + } + + // If retry limit is 0, the transaction could not retry. + if sessVars.RetryLimit == 0 { + return false + } + + // When `@@tidb_snapshot` is set, it is a ready-only statement and will not cause the errors that should retry a transaction in optimistic mode. + if sessVars.SnapshotTS != 0 { + return false + } + + // If the session is not InTxn, it is an auto-committed transaction. + // The auto-committed transaction could always retry. + if !sessVars.InTxn() { + return true + } + + // The internal transaction could always retry. + if sessVars.InRestrictedSQL { + return true + } + + // If the retry is enabled, the transaction could retry. + if !sessVars.DisableTxnAutoRetry { + return true + } + + return false +} + // AdviseOptimizeWithPlan providers optimization according to the plan // It will use MaxTS as the startTS in autocommit txn for some plans. func (p *OptimisticTxnContextProvider) AdviseOptimizeWithPlan(plan interface{}) (err error) { diff --git a/sessiontxn/isolation/optimistic_test.go b/sessiontxn/isolation/optimistic_test.go index 25390dca1aa19..4d6c7f56fc23b 100644 --- a/sessiontxn/isolation/optimistic_test.go +++ b/sessiontxn/isolation/optimistic_test.go @@ -22,9 +22,8 @@ import ( "testing" "time" - "github.com/pingcap/tidb/config" - "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser" @@ -34,6 +33,7 @@ import ( "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/sessiontxn/isolation" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/testkit/testfork" "github.com/stretchr/testify/require" tikverr "github.com/tikv/client-go/v2/error" ) @@ -49,7 +49,7 @@ func TestOptimisticTxnContextProviderTS(t *testing.T) { se := tk.Session() compareTS := getOracleTS(t, se) provider := initializeOptimisticProvider(t, tk, true) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) readTS, err := provider.GetStmtReadTS() require.NoError(t, err) updateTS, err := provider.GetStmtForUpdateTS() @@ -59,7 +59,7 @@ func TestOptimisticTxnContextProviderTS(t *testing.T) { compareTS = readTS // for optimistic mode ts, ts should be the same for all statements - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) readTS, err = provider.GetStmtReadTS() require.NoError(t, err) updateTS, err = provider.GetStmtForUpdateTS() @@ -72,7 +72,7 @@ func TestOptimisticTxnContextProviderTS(t *testing.T) { require.NoError(t, err) stmt := stmts[0] provider = initializeOptimisticProvider(t, tk, false) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), stmt)) plan, _, err := planner.Optimize(context.TODO(), tk.Session(), stmt, provider.GetTxnInfoSchema()) require.NoError(t, err) require.NoError(t, provider.AdviseOptimizeWithPlan(plan)) @@ -85,7 +85,7 @@ func TestOptimisticTxnContextProviderTS(t *testing.T) { // if the oracle future is prepared fist, `math.MaxUint64` should still be used after plan provider = initializeOptimisticProvider(t, tk, false) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), stmt)) require.NoError(t, provider.AdviseWarmup()) plan, _, err = planner.Optimize(context.TODO(), tk.Session(), stmt, provider.GetTxnInfoSchema()) require.NoError(t, err) @@ -100,7 +100,7 @@ func TestOptimisticTxnContextProviderTS(t *testing.T) { // when it is in explicit txn, we should not use `math.MaxUint64` compareTS = getOracleTS(t, se) provider = initializeOptimisticProvider(t, tk, true) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), stmt)) plan, _, err = planner.Optimize(context.TODO(), tk.Session(), stmt, provider.GetTxnInfoSchema()) require.NoError(t, err) require.NoError(t, provider.AdviseOptimizeWithPlan(plan)) @@ -115,7 +115,7 @@ func TestOptimisticTxnContextProviderTS(t *testing.T) { tk.MustExec("set @@autocommit=0") compareTS = getOracleTS(t, se) provider = initializeOptimisticProvider(t, tk, false) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), stmt)) plan, _, err = planner.Optimize(context.TODO(), tk.Session(), stmt, provider.GetTxnInfoSchema()) require.NoError(t, err) require.NoError(t, provider.AdviseOptimizeWithPlan(plan)) @@ -175,7 +175,7 @@ func TestOptimisticHandleError(t *testing.T) { } for _, c := range cases { - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) action, err := provider.OnStmtErrorForNextAction(c.point, c.err) if c.point == sessiontxn.StmtErrAfterPessimisticLock { require.Error(t, err) @@ -183,7 +183,7 @@ func TestOptimisticHandleError(t *testing.T) { require.Equal(t, sessiontxn.StmtActionError, action) // next statement should not update ts - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkTS() } else { require.NoError(t, err) @@ -194,13 +194,13 @@ func TestOptimisticHandleError(t *testing.T) { checkTS() // OnStmtErrorForNextAction again - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) action, err = provider.OnStmtErrorForNextAction(c.point, c.err) require.NoError(t, err) require.Equal(t, sessiontxn.StmtActionNoIdea, action) // next statement should not update ts - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkTS() } } @@ -209,84 +209,76 @@ func TestOptimisticHandleError(t *testing.T) { func TestOptimisticProviderInitialize(t *testing.T) { store, _, clean := testkit.CreateMockStoreAndDomain(t) defer clean() - - tk := testkit.NewTestKit(t, store) - se := tk.Session() - - // begin outside a txn - assert := activeOptimisticTxnAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // begin in a txn - assert = activeOptimisticTxnAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // begin outside a txn when tidb_disable_txn_auto_retry=0 - tk.MustExec("set @@tidb_disable_txn_auto_retry=0") - tk.MustExec("rollback") - assert = activeOptimisticTxnAssert(t, se, true) - assert.couldRetry = true - tk.MustExec("begin") - assert.Check(t) - - // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY - assert = activeOptimisticTxnAssert(t, se, true) - assert.causalConsistencyOnly = true - tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") - assert.Check(t) - - // EnterNewTxnDefault will create an active txn, but not explicit - assert = activeOptimisticTxnAssert(t, se, false) - require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ - Type: sessiontxn.EnterNewTxnDefault, - TxnMode: ast.Optimistic, - })) - assert.Check(t) - - tk.MustExec("rollback") - require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ - Type: sessiontxn.EnterNewTxnDefault, - })) - assert.Check(t) - - // non-active txn and then active it - cases := []struct { - disableTxnAutoRetry bool - autocommit bool - }{ - { - true, true, - }, - { - true, false, - }, - { - false, true, - }, - { - false, false, - }, - } - for _, c := range cases { - t.Run(fmt.Sprintf("disableAutRetry: %v, autoCommit: %v", c.disableTxnAutoRetry, c.autocommit), func(t *testing.T) { - tk.MustExec("rollback") - defer tk.MustExec("rollback") - tk.MustExec(fmt.Sprintf("set @@autocommit=%v", c.autocommit)) - tk.MustExec(fmt.Sprintf("set @@tidb_disable_txn_auto_retry=%v", c.disableTxnAutoRetry)) - assert = inactiveOptimisticTxnAssert(se) - assertAfterActive := activeOptimisticTxnAssert(t, se, !c.autocommit) - assertAfterActive.couldRetry = c.autocommit || !c.disableTxnAutoRetry - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider := assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO())) - ts, err := provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - }) - } + testfork.RunTest(t, func(t *testfork.T) { + clearScopeSettings := forkScopeSettings(t, store) + defer clearScopeSettings() + + tk := testkit.NewTestKit(t, store) + se := tk.Session() + + // begin outside a txn + assert := activeOptimisticTxnAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // begin in a txn + assert = activeOptimisticTxnAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // begin outside a txn when tidb_disable_txn_auto_retry=0 + tk.MustExec("set @@tidb_disable_txn_auto_retry=0") + tk.MustExec("rollback") + assert = activeOptimisticTxnAssert(t, se, true) + assert.couldRetry = true + tk.MustExec("begin") + assert.Check(t) + + // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY + assert = activeOptimisticTxnAssert(t, se, true) + assert.causalConsistencyOnly = true + assert.couldRetry = true + tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") + assert.Check(t) + + // EnterNewTxnDefault will create an active txn, but not explicit + assert = activeOptimisticTxnAssert(t, se, false) + require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnDefault, + TxnMode: ast.Optimistic, + })) + assert.Check(t) + + tk.MustExec("rollback") + require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnDefault, + })) + assert.Check(t) + + // non-active txn and then active it + disableTxnAutoRetry := true + if testfork.PickEnum(t, "enableTxnAutoRetry", "") != "" { + disableTxnAutoRetry = false + } + autocommit := true + if testfork.PickEnum(t, "noAutocommit", "") != "" { + autocommit = false + } + tk.MustExec("rollback") + defer tk.MustExec("rollback") + tk.MustExec(fmt.Sprintf("set @@autocommit=%v", autocommit)) + tk.MustExec(fmt.Sprintf("set @@tidb_disable_txn_auto_retry=%v", disableTxnAutoRetry)) + assert = inactiveOptimisticTxnAssert(se) + assertAfterActive := activeOptimisticTxnAssert(t, se, !autocommit) + assertAfterActive.couldRetry = autocommit || !disableTxnAutoRetry + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider := assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err := provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + }) } func TestTidbSnapshotVarInOptimisticTxn(t *testing.T) { @@ -343,12 +335,12 @@ func TestTidbSnapshotVarInOptimisticTxn(t *testing.T) { } // information schema and ts should equal to snapshot when tidb_snapshot is set - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkUseSnapshot() // information schema and ts will restore when set tidb_snapshot to empty tk.MustExec("set @@tidb_snapshot=''") - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkUseTxn() // txn will not be active after `GetStmtReadTS` or `GetStmtForUpdateTS` when `tidb_snapshot` is set @@ -368,14 +360,14 @@ func TestTidbSnapshotVarInOptimisticTxn(t *testing.T) { assertAfterUseSnapshot := activeSnapshotTxnAssert(se, se.GetSessionVars().SnapshotTS, "") require.NoError(t, se.PrepareTxnCtx(context.TODO())) provider = assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkUseSnapshot() assertAfterUseSnapshot.Check(t) }() } } -func activeOptimisticTxnAssert(t *testing.T, sctx sessionctx.Context, inTxn bool) *txnAssert[*isolation.OptimisticTxnContextProvider] { +func activeOptimisticTxnAssert(t testing.TB, sctx sessionctx.Context, inTxn bool) *txnAssert[*isolation.OptimisticTxnContextProvider] { return &txnAssert[*isolation.OptimisticTxnContextProvider]{ sctx: sctx, minStartTime: time.Now(), @@ -393,7 +385,7 @@ func inactiveOptimisticTxnAssert(sctx sessionctx.Context) *txnAssert[*isolation. } } -func initializeOptimisticProvider(t *testing.T, tk *testkit.TestKit, withExplicitBegin bool) *isolation.OptimisticTxnContextProvider { +func initializeOptimisticProvider(t testing.TB, tk *testkit.TestKit, withExplicitBegin bool) *isolation.OptimisticTxnContextProvider { tk.MustExec("commit") if withExplicitBegin { assert := activeOptimisticTxnAssert(t, tk.Session(), true) diff --git a/sessiontxn/isolation/readcommitted.go b/sessiontxn/isolation/readcommitted.go index 5fb316b59f8bf..06adc9fcba4fc 100644 --- a/sessiontxn/isolation/readcommitted.go +++ b/sessiontxn/isolation/readcommitted.go @@ -63,10 +63,13 @@ func NewPessimisticRCTxnContextProvider(sctx sessionctx.Context, causalConsisten txnCtx.IsPessimistic = true txnCtx.Isolation = ast.ReadCommitted }, + onTxnActive: func(txn kv.Transaction, _ sessiontxn.EnterNewTxnType) { + txn.SetOption(kv.Pessimistic, true) + }, }, } - provider.onTxnActive = func(txn kv.Transaction) { + provider.onTxnActive = func(txn kv.Transaction, _ sessiontxn.EnterNewTxnType) { txn.SetOption(kv.Pessimistic, true) provider.latestOracleTS = txn.StartTS() provider.latestOracleTSValid = true @@ -77,13 +80,30 @@ func NewPessimisticRCTxnContextProvider(sctx sessionctx.Context, causalConsisten } // OnStmtStart is the hook that should be called when a new statement started -func (p *PessimisticRCTxnContextProvider) OnStmtStart(ctx context.Context) error { - if err := p.baseTxnContextProvider.OnStmtStart(ctx); err != nil { +func (p *PessimisticRCTxnContextProvider) OnStmtStart(ctx context.Context, node ast.StmtNode) error { + if err := p.baseTxnContextProvider.OnStmtStart(ctx, node); err != nil { return err } + + // Try to mark the `RCCheckTS` flag for the first time execution of in-transaction read requests + // using read-consistency isolation level. + if node != nil && NeedSetRCCheckTSFlag(p.sctx, node) { + p.sctx.GetSessionVars().StmtCtx.RCCheckTS = true + } + return p.prepareStmt(!p.isTxnPrepared) } +// NeedSetRCCheckTSFlag checks whether it's needed to set `RCCheckTS` flag in current stmtctx. +func NeedSetRCCheckTSFlag(ctx sessionctx.Context, node ast.Node) bool { + sessionVars := ctx.GetSessionVars() + if sessionVars.ConnectionID > 0 && sessionVars.RcReadCheckTS && sessionVars.InTxn() && + !sessionVars.RetryInfo.Retrying && plannercore.IsReadOnly(node, sessionVars) { + return true + } + return false +} + // OnStmtErrorForNextAction is the hook that should be called when a new statement get an error func (p *PessimisticRCTxnContextProvider) OnStmtErrorForNextAction(point sessiontxn.StmtErrorHandlePoint, err error) (sessiontxn.StmtErrorAction, error) { switch point { @@ -144,7 +164,7 @@ func (p *PessimisticRCTxnContextProvider) getStmtTS() (ts uint64, err error) { } var txn kv.Transaction - if txn, err = p.activateTxn(); err != nil { + if txn, err = p.ActivateTxn(); err != nil { return 0, err } @@ -168,7 +188,7 @@ func (p *PessimisticRCTxnContextProvider) handleAfterQueryError(queryErr error) p.latestOracleTSValid = false logutil.Logger(p.ctx).Info("RC read with ts checking has failed, retry RC read", - zap.String("sql", sessVars.StmtCtx.OriginalSQL)) + zap.String("sql", sessVars.StmtCtx.OriginalSQL), zap.Error(queryErr)) return sessiontxn.RetryReady() } @@ -237,3 +257,17 @@ func (p *PessimisticRCTxnContextProvider) AdviseOptimizeWithPlan(val interface{} return nil } + +// GetSnapshotWithStmtReadTS gets snapshot with read ts +func (p *PessimisticRCTxnContextProvider) GetSnapshotWithStmtReadTS() (kv.Snapshot, error) { + snapshot, err := p.baseTxnContextProvider.GetSnapshotWithStmtForUpdateTS() + if err != nil { + return nil, err + } + + if p.sctx.GetSessionVars().StmtCtx.RCCheckTS { + snapshot.SetOption(kv.IsolationLevel, kv.RCCheckTS) + } + + return snapshot, nil +} diff --git a/sessiontxn/isolation/readcommitted_test.go b/sessiontxn/isolation/readcommitted_test.go index 5c747eba4fa2c..a0211d2cf83f0 100644 --- a/sessiontxn/isolation/readcommitted_test.go +++ b/sessiontxn/isolation/readcommitted_test.go @@ -29,10 +29,13 @@ import ( "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/sessiontxn/isolation" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/testkit/testfork" + "github.com/pingcap/tidb/types" "github.com/stretchr/testify/require" tikverr "github.com/tikv/client-go/v2/error" ) @@ -55,26 +58,26 @@ func TestPessimisticRCTxnContextProviderRCCheck(t *testing.T) { forUpdateStmt := stmts[0] compareTS := se.GetSessionVars().TxnCtx.StartTS - // first ts should request from tso + // first ts should use the txn startTS require.NoError(t, executor.ResetContextOfStmt(se, readOnlyStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), readOnlyStmt)) ts, err := provider.GetStmtReadTS() require.NoError(t, err) require.Equal(t, ts, compareTS) rcCheckTS := ts - // second ts should reuse first ts + // second ts should reuse the txn startTS require.NoError(t, executor.ResetContextOfStmt(se, readOnlyStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), readOnlyStmt)) ts, err = provider.GetStmtReadTS() require.NoError(t, err) require.Equal(t, rcCheckTS, ts) // when one statement did not getStmtReadTS, the next one should still reuse the first ts require.NoError(t, executor.ResetContextOfStmt(se, readOnlyStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), readOnlyStmt)) require.NoError(t, executor.ResetContextOfStmt(se, readOnlyStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), readOnlyStmt)) ts, err = provider.GetStmtReadTS() require.NoError(t, err) require.Equal(t, rcCheckTS, ts) @@ -93,7 +96,7 @@ func TestPessimisticRCTxnContextProviderRCCheck(t *testing.T) { // if retry succeed next statement will still use rc check require.NoError(t, executor.ResetContextOfStmt(se, readOnlyStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), readOnlyStmt)) ts, err = provider.GetStmtReadTS() require.NoError(t, err) require.Equal(t, rcCheckTS, ts) @@ -103,14 +106,14 @@ func TestPessimisticRCTxnContextProviderRCCheck(t *testing.T) { require.NoError(t, err) require.Equal(t, sessiontxn.StmtActionNoIdea, nextAction) require.NoError(t, executor.ResetContextOfStmt(se, readOnlyStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), readOnlyStmt)) ts, err = provider.GetStmtReadTS() require.NoError(t, err) require.Equal(t, rcCheckTS, ts) // `StmtErrAfterPessimisticLock` will still disable rc check require.NoError(t, executor.ResetContextOfStmt(se, readOnlyStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), readOnlyStmt)) ts, err = provider.GetStmtReadTS() require.NoError(t, err) require.Equal(t, rcCheckTS, ts) @@ -128,7 +131,7 @@ func TestPessimisticRCTxnContextProviderRCCheck(t *testing.T) { // only read-only stmt can retry for rc check require.NoError(t, executor.ResetContextOfStmt(se, forUpdateStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), forUpdateStmt)) ts, err = provider.GetStmtReadTS() require.NoError(t, err) require.Greater(t, ts, compareTS) @@ -137,6 +140,60 @@ func TestPessimisticRCTxnContextProviderRCCheck(t *testing.T) { require.Equal(t, sessiontxn.StmtActionNoIdea, nextAction) } +func TestPessimisticRCTxnContextProviderRCCheckForPrepareExecute(t *testing.T) { + store, _, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk2 := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk2.MustExec("use test") + tk.MustExec("create table t (id int primary key, v int)") + tk2.MustExec("insert into t values(1, 1)") + + tk.MustExec("set @@tidb_rc_read_check_ts=1") + se := tk.Session() + ctx := context.Background() + provider := initializePessimisticRCProvider(t, tk) + txnStartTS := se.GetSessionVars().TxnCtx.StartTS + + // first ts should use the txn startTS + stmt, _, _, err := tk.Session().PrepareStmt("select * from t") + require.NoError(t, err) + rs, err := tk.Session().ExecutePreparedStmt(ctx, stmt, []types.Datum{}) + tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 1")) + require.NoError(t, err) + ts, err := provider.GetStmtForUpdateTS() + require.NoError(t, err) + require.Equal(t, txnStartTS, ts) + + // second ts should reuse the txn startTS + rs, err = tk.Session().ExecutePreparedStmt(ctx, stmt, []types.Datum{}) + tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 1")) + require.NoError(t, err) + ts, err = provider.GetStmtForUpdateTS() + require.NoError(t, err) + require.Equal(t, txnStartTS, ts) + + tk2.MustExec("update t set v = v + 10 where id = 1") + compareTS := getOracleTS(t, se) + rs, err = tk.Session().ExecutePreparedStmt(ctx, stmt, []types.Datum{}) + require.NoError(t, err) + _, err = session.ResultSetToStringSlice(ctx, tk.Session(), rs) + require.Error(t, err) + ts, err = provider.GetStmtForUpdateTS() + require.NoError(t, err) + require.Greater(t, compareTS, ts) + // retry + tk.Session().GetSessionVars().RetryInfo.Retrying = true + rs, err = tk.Session().ExecutePreparedStmt(ctx, stmt, []types.Datum{}) + require.NoError(t, err) + tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 11")) + ts, err = provider.GetStmtForUpdateTS() + require.NoError(t, err) + require.Greater(t, ts, compareTS) +} + func TestPessimisticRCTxnContextProviderLockError(t *testing.T) { store, _, clean := testkit.CreateMockStoreAndDomain(t) defer clean() @@ -155,7 +212,7 @@ func TestPessimisticRCTxnContextProviderLockError(t *testing.T) { &tikverr.ErrDeadlock{Deadlock: &kvrpcpb.Deadlock{}, IsRetryable: true}, } { require.NoError(t, executor.ResetContextOfStmt(se, stmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), stmt)) nextAction, err := provider.OnStmtErrorForNextAction(sessiontxn.StmtErrAfterPessimisticLock, lockErr) require.NoError(t, err) require.Equal(t, sessiontxn.StmtActionRetryReady, nextAction) @@ -167,7 +224,7 @@ func TestPessimisticRCTxnContextProviderLockError(t *testing.T) { errors.New("err"), } { require.NoError(t, executor.ResetContextOfStmt(se, stmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), stmt)) nextAction, err := provider.OnStmtErrorForNextAction(sessiontxn.StmtErrAfterPessimisticLock, lockErr) require.Same(t, lockErr, err) require.Equal(t, sessiontxn.StmtActionError, nextAction) @@ -189,7 +246,7 @@ func TestPessimisticRCTxnContextProviderTS(t *testing.T) { // first read require.NoError(t, executor.ResetContextOfStmt(se, stmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), stmt)) readTS, err := provider.GetStmtReadTS() require.NoError(t, err) forUpdateTS, err := provider.GetStmtForUpdateTS() @@ -202,7 +259,7 @@ func TestPessimisticRCTxnContextProviderTS(t *testing.T) { compareTS = getOracleTS(t, se) require.Greater(t, compareTS, readTS) require.NoError(t, executor.ResetContextOfStmt(se, stmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), stmt)) readTS, err = provider.GetStmtReadTS() require.NoError(t, err) forUpdateTS, err = provider.GetStmtForUpdateTS() @@ -231,61 +288,66 @@ func TestRCProviderInitialize(t *testing.T) { store, _, clean := testkit.CreateMockStoreAndDomain(t) defer clean() - tk := testkit.NewTestKit(t, store) - se := tk.Session() - tk.MustExec("set @@tx_isolation = 'READ-COMMITTED'") - tk.MustExec("set @@tidb_txn_mode='pessimistic'") - - // begin outside a txn - assert := activeRCTxnAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // begin in a txn - assert = activeRCTxnAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY - assert = activeRCTxnAssert(t, se, true) - assert.causalConsistencyOnly = true - tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") - assert.Check(t) - - // EnterNewTxnDefault will create an active txn, but not explicit - assert = activeRCTxnAssert(t, se, false) - require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ - Type: sessiontxn.EnterNewTxnDefault, - TxnMode: ast.Pessimistic, - })) - assert.Check(t) - - // non-active txn and then active it - tk.MustExec("rollback") - tk.MustExec("set @@autocommit=0") - assert = inactiveRCTxnAssert(se) - assertAfterActive := activeRCTxnAssert(t, se, true) - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider := assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO())) - ts, err := provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - tk.MustExec("rollback") - - // Case Pessimistic Autocommit - config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true) - assert = inactiveRCTxnAssert(se) - assertAfterActive = activeRCTxnAssert(t, se, true) - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider = assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO())) - ts, err = provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - tk.MustExec("rollback") + testfork.RunTest(t, func(t *testfork.T) { + clearScopeSettings := forkScopeSettings(t, store) + defer clearScopeSettings() + + tk := testkit.NewTestKit(t, store) + se := tk.Session() + tk.MustExec("set @@tx_isolation = 'READ-COMMITTED'") + tk.MustExec("set @@tidb_txn_mode='pessimistic'") + + // begin outside a txn + assert := activeRCTxnAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // begin in a txn + assert = activeRCTxnAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY + assert = activeRCTxnAssert(t, se, true) + assert.causalConsistencyOnly = true + tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") + assert.Check(t) + + // EnterNewTxnDefault will create an active txn, but not explicit + assert = activeRCTxnAssert(t, se, false) + require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnDefault, + TxnMode: ast.Pessimistic, + })) + assert.Check(t) + + // non-active txn and then active it + tk.MustExec("rollback") + tk.MustExec("set @@autocommit=0") + assert = inactiveRCTxnAssert(se) + assertAfterActive := activeRCTxnAssert(t, se, true) + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider := assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err := provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + tk.MustExec("rollback") + + // Case Pessimistic Autocommit + config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true) + assert = inactiveRCTxnAssert(se) + assertAfterActive = activeRCTxnAssert(t, se, true) + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider = assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err = provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + tk.MustExec("rollback") + }) } func TestTidbSnapshotVarInRC(t *testing.T) { @@ -346,12 +408,12 @@ func TestTidbSnapshotVarInRC(t *testing.T) { } // information schema and ts should equal to snapshot when tidb_snapshot is set - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkUseSnapshot() // information schema and ts will restore when set tidb_snapshot to empty tk.MustExec("set @@tidb_snapshot=''") - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkUseTxn(false) // txn will not be active after `GetStmtReadTS` or `GetStmtForUpdateTS` when `tidb_snapshot` is set @@ -372,7 +434,7 @@ func TestTidbSnapshotVarInRC(t *testing.T) { assertAfterUseSnapshot := activeSnapshotTxnAssert(se, se.GetSessionVars().SnapshotTS, "READ-COMMITTED") require.NoError(t, se.PrepareTxnCtx(context.TODO())) provider = assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkUseSnapshot() assertAfterUseSnapshot.Check(t) }() @@ -461,7 +523,7 @@ func TestConflictErrorsInRC(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/assertPessimisticLockErr")) } -func activeRCTxnAssert(t *testing.T, sctx sessionctx.Context, inTxn bool) *txnAssert[*isolation.PessimisticRCTxnContextProvider] { +func activeRCTxnAssert(t testing.TB, sctx sessionctx.Context, inTxn bool) *txnAssert[*isolation.PessimisticRCTxnContextProvider] { return &txnAssert[*isolation.PessimisticRCTxnContextProvider]{ sctx: sctx, isolation: "READ-COMMITTED", @@ -481,7 +543,7 @@ func inactiveRCTxnAssert(sctx sessionctx.Context) *txnAssert[*isolation.Pessimis } } -func initializePessimisticRCProvider(t *testing.T, tk *testkit.TestKit) *isolation.PessimisticRCTxnContextProvider { +func initializePessimisticRCProvider(t testing.TB, tk *testkit.TestKit) *isolation.PessimisticRCTxnContextProvider { tk.MustExec("set @@tx_isolation = 'READ-COMMITTED'") assert := activeRCTxnAssert(t, tk.Session(), true) tk.MustExec("begin pessimistic") diff --git a/sessiontxn/isolation/repeatable_read.go b/sessiontxn/isolation/repeatable_read.go index 571d2754be9a3..f09f9ca415e2a 100644 --- a/sessiontxn/isolation/repeatable_read.go +++ b/sessiontxn/isolation/repeatable_read.go @@ -52,7 +52,7 @@ func NewPessimisticRRTxnContextProvider(sctx sessionctx.Context, causalConsisten txnCtx.IsPessimistic = true txnCtx.Isolation = ast.RepeatableRead }, - onTxnActive: func(txn kv.Transaction) { + onTxnActive: func(txn kv.Transaction, _ sessiontxn.EnterNewTxnType) { txn.SetOption(kv.Pessimistic, true) }, }, @@ -70,7 +70,7 @@ func (p *PessimisticRRTxnContextProvider) getForUpdateTs() (ts uint64, err error } var txn kv.Transaction - if txn, err = p.activateTxn(); err != nil { + if txn, err = p.ActivateTxn(); err != nil { return 0, err } @@ -122,8 +122,8 @@ func (p *PessimisticRRTxnContextProvider) updateForUpdateTS() (err error) { } // OnStmtStart is the hook that should be called when a new statement started -func (p *PessimisticRRTxnContextProvider) OnStmtStart(ctx context.Context) error { - if err := p.baseTxnContextProvider.OnStmtStart(ctx); err != nil { +func (p *PessimisticRRTxnContextProvider) OnStmtStart(ctx context.Context, node ast.StmtNode) error { + if err := p.baseTxnContextProvider.OnStmtStart(ctx, node); err != nil { return err } diff --git a/sessiontxn/isolation/repeatable_read_test.go b/sessiontxn/isolation/repeatable_read_test.go index c60c1c3da560d..f8fc70fe8315a 100644 --- a/sessiontxn/isolation/repeatable_read_test.go +++ b/sessiontxn/isolation/repeatable_read_test.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/sessiontxn/isolation" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/testkit/testfork" "github.com/stretchr/testify/require" tikverr "github.com/tikv/client-go/v2/error" ) @@ -74,7 +75,7 @@ func TestPessimisticRRErrorHandle(t *testing.T) { nextAction, err = provider.OnStmtErrorForNextAction(sessiontxn.StmtErrAfterPessimisticLock, lockErr) require.NoError(t, err) require.Equal(t, sessiontxn.StmtActionRetryReady, nextAction) - err = provider.OnStmtStart(context.TODO()) + err = provider.OnStmtStart(context.TODO(), nil) // Unlike StmtRetry which uses forUpdateTS got in OnStmtErrorForNextAction, OnStmtStart will reset provider's forUpdateTS, // which leads GetStmtForUpdateTS to acquire the latest ts. compareTS2 = getOracleTS(t, se) @@ -111,7 +112,7 @@ func TestPessimisticRRErrorHandle(t *testing.T) { nextAction, err = provider.OnStmtErrorForNextAction(sessiontxn.StmtErrAfterPessimisticLock, lockErr) require.NoError(t, err) require.Equal(t, sessiontxn.StmtActionRetryReady, nextAction) - err = provider.OnStmtStart(context.TODO()) + err = provider.OnStmtStart(context.TODO(), nil) require.NoError(t, err) // Unlike StmtRetry which uses forUpdateTS got in OnStmtErrorForNextAction, OnStmtStart will reset provider's forUpdateTS, // which leads GetStmtForUpdateTS to acquire the latest ts. @@ -153,7 +154,7 @@ func TestRepeatableReadProviderTS(t *testing.T) { compareTS := getOracleTS(t, se) // The read ts should be less than the compareTS require.NoError(t, executor.ResetContextOfStmt(se, readOnlyStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) CurrentTS, err = provider.GetStmtReadTS() require.NoError(t, err) require.Greater(t, compareTS, CurrentTS) @@ -161,7 +162,7 @@ func TestRepeatableReadProviderTS(t *testing.T) { // The read ts should also be less than the compareTS in a new statement (after calling OnStmtStart) require.NoError(t, executor.ResetContextOfStmt(se, readOnlyStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) CurrentTS, err = provider.GetStmtReadTS() require.NoError(t, err) require.Equal(t, CurrentTS, prevTS) @@ -175,14 +176,14 @@ func TestRepeatableReadProviderTS(t *testing.T) { // The for update read ts should be larger than the compareTS require.NoError(t, executor.ResetContextOfStmt(se, forUpdateStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) forUpdateTS, err := provider.GetStmtForUpdateTS() require.NoError(t, err) require.Greater(t, forUpdateTS, compareTS) // But the read ts is still less than the compareTS require.NoError(t, executor.ResetContextOfStmt(se, readOnlyStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) CurrentTS, err = provider.GetStmtReadTS() require.NoError(t, err) require.Equal(t, CurrentTS, prevTS) @@ -192,61 +193,66 @@ func TestRepeatableReadProviderInitialize(t *testing.T) { store, _, clean := testkit.CreateMockStoreAndDomain(t) defer clean() - tk := testkit.NewTestKit(t, store) - se := tk.Session() - tk.MustExec("set @@tx_isolation = 'REPEATABLE-READ'") - tk.MustExec("set @@tidb_txn_mode='pessimistic'") - - // begin outside a txn - assert := activePessimisticRRAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // begin in a txn - assert = activePessimisticRRAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY - assert = activePessimisticRRAssert(t, se, true) - assert.causalConsistencyOnly = true - tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") - assert.Check(t) - - // EnterNewTxnDefault will create an active txn, but not explicit - assert = activePessimisticRRAssert(t, se, false) - require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ - Type: sessiontxn.EnterNewTxnDefault, - TxnMode: ast.Pessimistic, - })) - assert.Check(t) - - // non-active txn and then active it - tk.MustExec("rollback") - tk.MustExec("set @@autocommit=0") - assert = inactivePessimisticRRAssert(se) - assertAfterActive := activePessimisticRRAssert(t, se, true) - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider := assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO())) - ts, err := provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - tk.MustExec("rollback") - - // Case Pessimistic Autocommit - config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true) - assert = inactivePessimisticRRAssert(se) - assertAfterActive = activePessimisticRRAssert(t, se, true) - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider = assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO())) - ts, err = provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - tk.MustExec("rollback") + testfork.RunTest(t, func(t *testfork.T) { + clearScopeSettings := forkScopeSettings(t, store) + defer clearScopeSettings() + + tk := testkit.NewTestKit(t, store) + se := tk.Session() + tk.MustExec("set @@tx_isolation = 'REPEATABLE-READ'") + tk.MustExec("set @@tidb_txn_mode='pessimistic'") + + // begin outside a txn + assert := activePessimisticRRAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // begin in a txn + assert = activePessimisticRRAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY + assert = activePessimisticRRAssert(t, se, true) + assert.causalConsistencyOnly = true + tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") + assert.Check(t) + + // EnterNewTxnDefault will create an active txn, but not explicit + assert = activePessimisticRRAssert(t, se, false) + require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnDefault, + TxnMode: ast.Pessimistic, + })) + assert.Check(t) + + // non-active txn and then active it + tk.MustExec("rollback") + tk.MustExec("set @@autocommit=0") + assert = inactivePessimisticRRAssert(se) + assertAfterActive := activePessimisticRRAssert(t, se, true) + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider := assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err := provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + tk.MustExec("rollback") + + // Case Pessimistic Autocommit + config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true) + assert = inactivePessimisticRRAssert(se) + assertAfterActive = activePessimisticRRAssert(t, se, true) + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider = assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err = provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + tk.MustExec("rollback") + }) } func TestTidbSnapshotVarInPessimisticRepeatableRead(t *testing.T) { @@ -303,12 +309,12 @@ func TestTidbSnapshotVarInPessimisticRepeatableRead(t *testing.T) { } // information schema and ts should equal to snapshot when tidb_snapshot is set - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkUseSnapshot() // information schema and ts will restore when set tidb_snapshot to empty tk.MustExec("set @@tidb_snapshot=''") - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkUseTxn() // txn will not be active after `GetStmtReadTS` or `GetStmtForUpdateTS` when `tidb_snapshot` is set @@ -329,7 +335,7 @@ func TestTidbSnapshotVarInPessimisticRepeatableRead(t *testing.T) { assertAfterUseSnapshot := activeSnapshotTxnAssert(se, se.GetSessionVars().SnapshotTS, "REPEATABLE-READ") require.NoError(t, se.PrepareTxnCtx(context.TODO())) provider = assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkUseSnapshot() assertAfterUseSnapshot.Check(t) }() @@ -390,11 +396,11 @@ func TestOptimizeWithPlanInPessimisticRR(t *testing.T) { for _, c := range cases { compareTS = getOracleTS(t, se) - require.NoError(t, txnManager.OnStmtStart(context.TODO())) + require.NoError(t, txnManager.OnStmtStart(context.TODO(), nil)) stmt, err = parser.New().ParseOneStmt(c.sql, "", "") require.NoError(t, err) - err = provider.OnStmtStart(context.TODO()) + err = provider.OnStmtStart(context.TODO(), nil) require.NoError(t, err) compiler = executor.Compiler{Ctx: se} @@ -432,9 +438,9 @@ func TestOptimizeWithPlanInPessimisticRR(t *testing.T) { // Test use startTS after optimize when autocommit=0 activeAssert := activePessimisticRRAssert(t, tk.Session(), true) provider = initializeRepeatableReadProvider(t, tk, false) - require.NoError(t, txnManager.OnStmtStart(context.TODO())) stmt, err = parser.New().ParseOneStmt("update t set v = v + 10 where id = 1", "", "") require.NoError(t, err) + require.NoError(t, txnManager.OnStmtStart(context.TODO(), stmt)) execStmt, err = compiler.Compile(context.TODO(), stmt) require.NoError(t, err) err = txnManager.AdviseOptimizeWithPlan(execStmt.Plan) @@ -448,9 +454,9 @@ func TestOptimizeWithPlanInPessimisticRR(t *testing.T) { compareTS = getOracleTS(t, se) activeAssert = activePessimisticRRAssert(t, tk.Session(), true) provider = initializeRepeatableReadProvider(t, tk, false) - require.NoError(t, txnManager.OnStmtStart(context.TODO())) stmt, err = parser.New().ParseOneStmt("select * from t", "", "") require.NoError(t, err) + require.NoError(t, txnManager.OnStmtStart(context.TODO(), stmt)) execStmt, err = compiler.Compile(context.TODO(), stmt) require.NoError(t, err) err = txnManager.AdviseOptimizeWithPlan(execStmt.Plan) @@ -632,7 +638,7 @@ func TestConflictErrorInOtherQueryContainingPointGet(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/assertPessimisticLockErr")) } -func activePessimisticRRAssert(t *testing.T, sctx sessionctx.Context, +func activePessimisticRRAssert(t testing.TB, sctx sessionctx.Context, inTxn bool) *txnAssert[*isolation.PessimisticRRTxnContextProvider] { return &txnAssert[*isolation.PessimisticRRTxnContextProvider]{ sctx: sctx, diff --git a/sessiontxn/isolation/serializable.go b/sessiontxn/isolation/serializable.go index a8256bb2294ac..75f4db61a8133 100644 --- a/sessiontxn/isolation/serializable.go +++ b/sessiontxn/isolation/serializable.go @@ -38,7 +38,7 @@ func NewPessimisticSerializableTxnContextProvider(sctx sessionctx.Context, txnCtx.IsPessimistic = true txnCtx.Isolation = ast.Serializable }, - onTxnActive: func(txn kv.Transaction) { + onTxnActive: func(txn kv.Transaction, _ sessiontxn.EnterNewTxnType) { txn.SetOption(kv.Pessimistic, true) }, }, diff --git a/sessiontxn/isolation/serializable_test.go b/sessiontxn/isolation/serializable_test.go index a28e455195cbf..90034e0934278 100644 --- a/sessiontxn/isolation/serializable_test.go +++ b/sessiontxn/isolation/serializable_test.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/sessiontxn/isolation" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/testkit/testfork" "github.com/stretchr/testify/require" tikverr "github.com/tikv/client-go/v2/error" ) @@ -54,7 +55,7 @@ func TestPessimisticSerializableTxnProviderTS(t *testing.T) { compareTS := getOracleTS(t, se) require.NoError(t, executor.ResetContextOfStmt(se, readOnlyStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) ts, err := provider.GetStmtReadTS() require.NoError(t, err) require.Greater(t, compareTS, ts) @@ -62,7 +63,7 @@ func TestPessimisticSerializableTxnProviderTS(t *testing.T) { // In Oracle-like serializable isolation, readTS equals to the for update ts require.NoError(t, executor.ResetContextOfStmt(se, forUpdateStmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) ts, err = provider.GetStmtForUpdateTS() require.NoError(t, err) require.Greater(t, compareTS, ts) @@ -87,7 +88,7 @@ func TestPessimisticSerializableTxnContextProviderLockError(t *testing.T) { &tikverr.ErrDeadlock{Deadlock: &kvrpcpb.Deadlock{}, IsRetryable: true}, } { require.NoError(t, executor.ResetContextOfStmt(se, stmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) nextAction, err := provider.OnStmtErrorForNextAction(sessiontxn.StmtErrAfterPessimisticLock, lockErr) require.Same(t, lockErr, err) require.Equal(t, sessiontxn.StmtActionError, nextAction) @@ -99,7 +100,7 @@ func TestPessimisticSerializableTxnContextProviderLockError(t *testing.T) { errors.New("err"), } { require.NoError(t, executor.ResetContextOfStmt(se, stmt)) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) nextAction, err := provider.OnStmtErrorForNextAction(sessiontxn.StmtErrAfterPessimisticLock, lockErr) require.Same(t, lockErr, err) require.Equal(t, sessiontxn.StmtActionError, nextAction) @@ -110,62 +111,67 @@ func TestSerializableInitialize(t *testing.T) { store, _, clean := testkit.CreateMockStoreAndDomain(t) defer clean() - tk := testkit.NewTestKit(t, store) - se := tk.Session() - tk.MustExec("set tidb_skip_isolation_level_check = 1") - tk.MustExec("set @@tx_isolation = 'SERIALIZABLE'") - tk.MustExec("set @@tidb_txn_mode='pessimistic'") - - // begin outsize a txn - assert := activeSerializableAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // begin outsize a txn - assert = activeSerializableAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY - assert = activeSerializableAssert(t, se, true) - assert.causalConsistencyOnly = true - tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") - assert.Check(t) - - // EnterNewTxnDefault will create an active txn, but not explicit - assert = activeSerializableAssert(t, se, false) - require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ - Type: sessiontxn.EnterNewTxnDefault, - TxnMode: ast.Pessimistic, - })) - assert.Check(t) - - // non-active txn and then active it - tk.MustExec("rollback") - tk.MustExec("set @@autocommit=0") - assert = inactiveSerializableAssert(se) - assertAfterActive := activeSerializableAssert(t, se, true) - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider := assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO())) - ts, err := provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - tk.MustExec("rollback") - - // Case Pessimistic Autocommit - config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true) - assert = inactiveSerializableAssert(se) - assertAfterActive = activeSerializableAssert(t, se, true) - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider = assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO())) - ts, err = provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - tk.MustExec("rollback") + testfork.RunTest(t, func(t *testfork.T) { + clearScopeSettings := forkScopeSettings(t, store) + defer clearScopeSettings() + + tk := testkit.NewTestKit(t, store) + se := tk.Session() + tk.MustExec("set tidb_skip_isolation_level_check = 1") + tk.MustExec("set @@tx_isolation = 'SERIALIZABLE'") + tk.MustExec("set @@tidb_txn_mode='pessimistic'") + + // begin outsize a txn + assert := activeSerializableAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // begin outsize a txn + assert = activeSerializableAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY + assert = activeSerializableAssert(t, se, true) + assert.causalConsistencyOnly = true + tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") + assert.Check(t) + + // EnterNewTxnDefault will create an active txn, but not explicit + assert = activeSerializableAssert(t, se, false) + require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnDefault, + TxnMode: ast.Pessimistic, + })) + assert.Check(t) + + // non-active txn and then active it + tk.MustExec("rollback") + tk.MustExec("set @@autocommit=0") + assert = inactiveSerializableAssert(se) + assertAfterActive := activeSerializableAssert(t, se, true) + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider := assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err := provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + tk.MustExec("rollback") + + // Case Pessimistic Autocommit + config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true) + assert = inactiveSerializableAssert(se) + assertAfterActive = activeSerializableAssert(t, se, true) + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider = assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err = provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + tk.MustExec("rollback") + }) } func TestTidbSnapshotVarInSerialize(t *testing.T) { @@ -223,12 +229,12 @@ func TestTidbSnapshotVarInSerialize(t *testing.T) { } // information schema and ts should equal to snapshot when tidb_snapshot is set - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkUseSnapshot() // information schema and ts will restore when set tidb_snapshot to empty tk.MustExec("set @@tidb_snapshot=''") - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkUseTxn() // txn will not be active after `GetStmtReadTS` or `GetStmtForUpdateTS` when `tidb_snapshot` is set @@ -250,14 +256,14 @@ func TestTidbSnapshotVarInSerialize(t *testing.T) { assertAfterUseSnapshot := activeSnapshotTxnAssert(se, se.GetSessionVars().SnapshotTS, "SERIALIZABLE") require.NoError(t, se.PrepareTxnCtx(context.TODO())) provider = assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO())) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) checkUseSnapshot() assertAfterUseSnapshot.Check(t) }() } } -func activeSerializableAssert(t *testing.T, sctx sessionctx.Context, +func activeSerializableAssert(t testing.TB, sctx sessionctx.Context, inTxn bool) *txnAssert[*isolation.PessimisticSerializableTxnContextProvider] { return &txnAssert[*isolation.PessimisticSerializableTxnContextProvider]{ sctx: sctx, diff --git a/sessiontxn/legacy/BUILD.bazel b/sessiontxn/legacy/BUILD.bazel deleted file mode 100644 index d9e9b5249cbbf..0000000000000 --- a/sessiontxn/legacy/BUILD.bazel +++ /dev/null @@ -1,41 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "legacy", - srcs = ["provider.go"], - importpath = "github.com/pingcap/tidb/sessiontxn/legacy", - visibility = ["//visibility:public"], - deps = [ - "//domain", - "//infoschema", - "//kv", - "//parser/ast", - "//parser/terror", - "//sessionctx", - "//sessionctx/variable", - "//sessiontxn", - "//sessiontxn/staleread", - "//table/temptable", - "//util/logutil", - "@com_github_pingcap_errors//:errors", - "@com_github_tikv_client_go_v2//error", - "@org_uber_go_zap//:zap", - ], -) - -go_test( - name = "legacy_test", - srcs = ["provider_test.go"], - deps = [ - ":legacy", - "//domain", - "//kv", - "//sessionctx", - "//sessiontxn", - "//testkit", - "@com_github_pingcap_errors//:errors", - "@com_github_pingcap_kvproto//pkg/kvrpcpb", - "@com_github_stretchr_testify//require", - "@com_github_tikv_client_go_v2//error", - ], -) diff --git a/sessiontxn/legacy/provider.go b/sessiontxn/legacy/provider.go deleted file mode 100644 index a571c612f24aa..0000000000000 --- a/sessiontxn/legacy/provider.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacy - -import ( - "context" - "time" - - "github.com/pingcap/errors" - "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/infoschema" - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/parser/ast" - "github.com/pingcap/tidb/parser/terror" - "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/sessionctx/variable" - "github.com/pingcap/tidb/sessiontxn" - "github.com/pingcap/tidb/sessiontxn/staleread" - "github.com/pingcap/tidb/table/temptable" - "github.com/pingcap/tidb/util/logutil" - tikverr "github.com/tikv/client-go/v2/error" - "go.uber.org/zap" -) - -// SimpleTxnContextProvider implements TxnContextProvider -// It is only used in refactor stage -// TODO: remove it after refactor finished -type SimpleTxnContextProvider struct { - Ctx context.Context - Sctx sessionctx.Context - InfoSchema infoschema.InfoSchema - GetReadTSFunc func() (uint64, error) - GetForUpdateTSFunc func() (uint64, error) - UpdateForUpdateTS func(seCtx sessionctx.Context, newForUpdateTS uint64) error - - Pessimistic bool - CausalConsistencyOnly bool - - isTxnActive bool -} - -// GetTxnInfoSchema returns the information schema used by txn -func (p *SimpleTxnContextProvider) GetTxnInfoSchema() infoschema.InfoSchema { - return p.InfoSchema -} - -// GetStmtReadTS returns the read timestamp used by select statement (not for select ... for update) -func (p *SimpleTxnContextProvider) GetStmtReadTS() (uint64, error) { - if p.GetReadTSFunc == nil { - return 0, errors.New("ReadTSFunc not set") - } - return p.GetReadTSFunc() -} - -// GetStmtForUpdateTS returns the read timestamp used by update/insert/delete or select ... for update -func (p *SimpleTxnContextProvider) GetStmtForUpdateTS() (uint64, error) { - if p.GetForUpdateTSFunc == nil { - return 0, errors.New("GetForUpdateTSFunc not set") - } - return p.GetForUpdateTSFunc() -} - -// OnInitialize is the hook that should be called when enter a new txn with this provider -func (p *SimpleTxnContextProvider) OnInitialize(ctx context.Context, tp sessiontxn.EnterNewTxnType) error { - p.Ctx = ctx - sessVars := p.Sctx.GetSessionVars() - switch tp { - case sessiontxn.EnterNewTxnDefault, sessiontxn.EnterNewTxnWithBeginStmt: - shouldReuseTxn := tp == sessiontxn.EnterNewTxnWithBeginStmt && sessiontxn.CanReuseTxnWhenExplicitBegin(p.Sctx) - if !shouldReuseTxn { - if err := p.Sctx.NewTxn(ctx); err != nil { - return err - } - } - - if tp == sessiontxn.EnterNewTxnWithBeginStmt { - // With START TRANSACTION, autocommit remains disabled until you end - // the transaction with COMMIT or ROLLBACK. The autocommit mode then - // reverts to its previous state. - sessVars.SetInTxn(true) - } - - sessVars.TxnCtx.IsPessimistic = p.Pessimistic - if _, err := p.activateTxn(); err != nil { - return err - } - - if is, ok := sessVars.TxnCtx.InfoSchema.(infoschema.InfoSchema); ok { - p.InfoSchema = is - } - case sessiontxn.EnterNewTxnBeforeStmt: - p.InfoSchema = temptable.AttachLocalTemporaryTableInfoSchema(p.Sctx, domain.GetDomain(p.Sctx).InfoSchema()) - sessVars.TxnCtx = &variable.TransactionContext{ - TxnCtxNoNeedToRestore: variable.TxnCtxNoNeedToRestore{ - InfoSchema: p.InfoSchema, - CreateTime: time.Now(), - ShardStep: int(sessVars.ShardAllocateStep), - TxnScope: sessVars.CheckAndGetTxnScope(), - IsPessimistic: p.Pessimistic, - }, - } - default: - return errors.Errorf("Unsupported type: %v", tp) - } - - return nil -} - -// OnStmtStart is the hook that should be called when a new statement started -func (p *SimpleTxnContextProvider) OnStmtStart(ctx context.Context) error { - p.Ctx = ctx - p.InfoSchema = p.Sctx.GetInfoSchema().(infoschema.InfoSchema) - return nil -} - -// OnStmtErrorForNextAction is the hook that should be called when a new statement get an error -func (p *SimpleTxnContextProvider) OnStmtErrorForNextAction(point sessiontxn.StmtErrorHandlePoint, err error) (sessiontxn.StmtErrorAction, error) { - switch point { - case sessiontxn.StmtErrAfterPessimisticLock: - return p.handleAfterPessimisticLockError(err) - default: - return sessiontxn.NoIdea() - } -} - -func (p *SimpleTxnContextProvider) handleAfterPessimisticLockError(lockErr error) (sessiontxn.StmtErrorAction, error) { - sessVars := p.Sctx.GetSessionVars() - if sessVars.IsIsolation(ast.Serializable) { - return sessiontxn.ErrorAction(lockErr) - } - - txnCtx := sessVars.TxnCtx - if deadlock, ok := errors.Cause(lockErr).(*tikverr.ErrDeadlock); ok { - if !deadlock.IsRetryable { - return sessiontxn.ErrorAction(lockErr) - } - logutil.Logger(p.Ctx).Info("single statement deadlock, retry statement", - zap.Uint64("txn", txnCtx.StartTS), - zap.Uint64("lockTS", deadlock.LockTs), - zap.Stringer("lockKey", kv.Key(deadlock.LockKey)), - zap.Uint64("deadlockKeyHash", deadlock.DeadlockKeyHash)) - } else if terror.ErrorEqual(kv.ErrWriteConflict, lockErr) { - errStr := lockErr.Error() - forUpdateTS := txnCtx.GetForUpdateTS() - logutil.Logger(p.Ctx).Debug("pessimistic write conflict, retry statement", - zap.Uint64("txn", txnCtx.StartTS), - zap.Uint64("forUpdateTS", forUpdateTS), - zap.String("err", errStr)) - // Always update forUpdateTS by getting a new timestamp from PD. - // If we use the conflict commitTS as the new forUpdateTS and async commit - // is used, the commitTS of this transaction may exceed the max timestamp - // that PD allocates. Then, the change may be invisible to a new transaction, - // which means linearizability is broken. - } else { - // this branch if err not nil, always update forUpdateTS to avoid problem described below - // for nowait, when ErrLock happened, ErrLockAcquireFailAndNoWaitSet will be returned, and in the same txn - // the select for updateTs must be updated, otherwise there maybe rollback problem. - // begin; select for update key1(here ErrLocked or other errors(or max_execution_time like util), - // key1 lock not get and async rollback key1 is raised) - // select for update key1 again(this time lock succ(maybe lock released by others)) - // the async rollback operation rollbacked the lock just acquired - tsErr := p.UpdateForUpdateTS(p.Sctx, 0) - if tsErr != nil { - logutil.Logger(p.Ctx).Warn("UpdateForUpdateTS failed", zap.Error(tsErr)) - } - return sessiontxn.ErrorAction(lockErr) - } - - if err := p.UpdateForUpdateTS(p.Sctx, 0); err != nil { - return sessiontxn.ErrorAction(lockErr) - } - - return sessiontxn.RetryReady() -} - -// OnStmtRetry is the hook that should be called when a statement retry -func (p *SimpleTxnContextProvider) OnStmtRetry(_ context.Context) error { - return nil -} - -func (p *SimpleTxnContextProvider) prepareTSFuture() error { - if p.Sctx.GetSessionVars().SnapshotTS != 0 || staleread.IsStmtStaleness(p.Sctx) || p.Sctx.GetPreparedTSFuture() != nil { - return nil - } - - txn, err := p.Sctx.Txn(false) - if err != nil { - return err - } - - if txn.Valid() { - return nil - } - - txnScope := p.Sctx.GetSessionVars().CheckAndGetTxnScope() - future := sessiontxn.NewOracleFuture(p.Ctx, p.Sctx, txnScope) - return p.Sctx.PrepareTSFuture(p.Ctx, future, txnScope) -} - -// activateTxn actives the txn -func (p *SimpleTxnContextProvider) activateTxn() (kv.Transaction, error) { - if p.isTxnActive { - return p.Sctx.Txn(true) - } - - txn, err := p.Sctx.Txn(true) - if err != nil { - return nil, err - } - - if p.Pessimistic { - txn.SetOption(kv.Pessimistic, true) - } - - if p.CausalConsistencyOnly { - txn.SetOption(kv.GuaranteeLinearizability, false) - } - - p.isTxnActive = true - return txn, nil -} - -// AdviseWarmup provides warmup for inner state -func (p *SimpleTxnContextProvider) AdviseWarmup() error { - return p.prepareTSFuture() -} - -// AdviseOptimizeWithPlan providers optimization according to the plan -func (p *SimpleTxnContextProvider) AdviseOptimizeWithPlan(_ interface{}) error { - return nil -} diff --git a/sessiontxn/legacy/provider_test.go b/sessiontxn/legacy/provider_test.go deleted file mode 100644 index 22aa042632182..0000000000000 --- a/sessiontxn/legacy/provider_test.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2022 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package legacy_test - -import ( - "context" - "testing" - - "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/sessiontxn" - "github.com/pingcap/tidb/sessiontxn/legacy" - "github.com/pingcap/tidb/testkit" - "github.com/stretchr/testify/require" - tikverr "github.com/tikv/client-go/v2/error" -) - -func TestErrorHandle(t *testing.T) { - store, do, clean := testkit.CreateMockStoreAndDomain(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - - provider := newSimpleProvider(tk, do) - require.NoError(t, provider.OnStmtStart(context.TODO())) - expectedForUpdateTS := getForUpdateTS(t, provider) - - var lockErr error - - // StmtErrAfterLock: ErrWriteConflict should retry and update forUpdateTS - lockErr = kv.ErrWriteConflict - action, err := provider.OnStmtErrorForNextAction(sessiontxn.StmtErrAfterPessimisticLock, lockErr) - require.Equal(t, sessiontxn.StmtActionRetryReady, action) - require.Nil(t, err) - expectedForUpdateTS += 1 - require.Equal(t, expectedForUpdateTS, getForUpdateTS(t, provider)) - - // StmtErrAfterLock: DeadLock that is not retryable will just return an error - lockErr = newDeadLockError(false) - action, err = provider.OnStmtErrorForNextAction(sessiontxn.StmtErrAfterPessimisticLock, lockErr) - require.Equal(t, sessiontxn.StmtActionError, action) - require.Equal(t, lockErr, err) - require.Equal(t, expectedForUpdateTS, getForUpdateTS(t, provider)) - - // StmtErrAfterLock: DeadLock that is retryable should retry and update forUpdateTS - lockErr = newDeadLockError(true) - action, err = provider.OnStmtErrorForNextAction(sessiontxn.StmtErrAfterPessimisticLock, lockErr) - require.Equal(t, sessiontxn.StmtActionRetryReady, action) - require.Nil(t, err) - expectedForUpdateTS += 1 - require.Equal(t, expectedForUpdateTS, getForUpdateTS(t, provider)) - - // StmtErrAfterLock: other errors should only update forUpdateTS but not retry - lockErr = errors.New("other error") - action, err = provider.OnStmtErrorForNextAction(sessiontxn.StmtErrAfterPessimisticLock, lockErr) - require.Equal(t, sessiontxn.StmtActionError, action) - require.Equal(t, lockErr, err) - expectedForUpdateTS += 1 - require.Equal(t, expectedForUpdateTS, getForUpdateTS(t, provider)) - - // StmtErrAfterQuery: always not retry - lockErr = kv.ErrWriteConflict - action, err = provider.OnStmtErrorForNextAction(sessiontxn.StmtErrAfterQuery, lockErr) - require.Equal(t, sessiontxn.StmtActionNoIdea, action) - require.Nil(t, err) - - tk.Session().GetSessionVars().StmtCtx.RCCheckTS = true - require.NoError(t, provider.OnStmtStart(context.TODO())) - action, err = provider.OnStmtErrorForNextAction(sessiontxn.StmtErrAfterQuery, lockErr) - require.Equal(t, sessiontxn.StmtActionNoIdea, action) - require.Nil(t, err) -} - -func getForUpdateTS(t *testing.T, provider *legacy.SimpleTxnContextProvider) uint64 { - forUpdateTS, err := provider.GetStmtForUpdateTS() - require.NoError(t, err) - return forUpdateTS -} - -func newDeadLockError(isRetryable bool) error { - return &tikverr.ErrDeadlock{ - Deadlock: &kvrpcpb.Deadlock{}, - IsRetryable: isRetryable, - } -} - -func newSimpleProvider(tk *testkit.TestKit, do *domain.Domain) *legacy.SimpleTxnContextProvider { - tk.MustExec("begin pessimistic") - readTS := uint64(1) - forUpdateTS := uint64(1) - return &legacy.SimpleTxnContextProvider{ - Ctx: context.TODO(), - Sctx: tk.Session(), - InfoSchema: do.InfoSchema(), - GetReadTSFunc: func() (uint64, error) { - return readTS, nil - }, - GetForUpdateTSFunc: func() (uint64, error) { - return forUpdateTS, nil - }, - UpdateForUpdateTS: func(seCtx sessionctx.Context, newForUpdateTS uint64) error { - if newForUpdateTS == 0 { - forUpdateTS += 1 - } else { - forUpdateTS = newForUpdateTS - } - return nil - }, - Pessimistic: true, - } -} diff --git a/sessiontxn/staleread/BUILD.bazel b/sessiontxn/staleread/BUILD.bazel index 727ee725341ec..d6272550153af 100644 --- a/sessiontxn/staleread/BUILD.bazel +++ b/sessiontxn/staleread/BUILD.bazel @@ -12,10 +12,12 @@ go_library( importpath = "github.com/pingcap/tidb/sessiontxn/staleread", visibility = ["//visibility:public"], deps = [ + "//config", "//domain", "//errno", "//expression", "//infoschema", + "//kv", "//parser/ast", "//parser/mysql", "//sessionctx", @@ -31,20 +33,28 @@ go_library( go_test( name = "staleread_test", - srcs = ["processor_test.go"], + srcs = [ + "main_test.go", + "processor_test.go", + "provider_test.go", + ], deps = [ ":staleread", "//domain", "//infoschema", + "//kv", "//parser", "//parser/ast", "//sessionctx", + "//sessiontxn", "//table/temptable", "//testkit", "//testkit/testsetup", "@com_github_pingcap_errors//:errors", + "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//require", "@com_github_tikv_client_go_v2//oracle", + "@com_github_tikv_client_go_v2//tikv", "@org_uber_go_goleak//:goleak", ], ) diff --git a/sessiontxn/staleread/main_test.go b/sessiontxn/staleread/main_test.go new file mode 100644 index 0000000000000..a60b5a95c9007 --- /dev/null +++ b/sessiontxn/staleread/main_test.go @@ -0,0 +1,33 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package staleread_test + +import ( + "testing" + + "github.com/pingcap/tidb/testkit/testsetup" + "github.com/tikv/client-go/v2/tikv" + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + testsetup.SetupForCommonTest() + tikv.EnableFailpoints() + opts := []goleak.Option{ + goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), + goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), + goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), + } + goleak.VerifyTestMain(m, opts...) +} diff --git a/sessiontxn/staleread/processor_test.go b/sessiontxn/staleread/processor_test.go index 0e399d3b78760..111b9088fb364 100644 --- a/sessiontxn/staleread/processor_test.go +++ b/sessiontxn/staleread/processor_test.go @@ -28,21 +28,10 @@ import ( "github.com/pingcap/tidb/sessiontxn/staleread" "github.com/pingcap/tidb/table/temptable" "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/testkit/testsetup" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" - "go.uber.org/goleak" ) -func TestMain(m *testing.M) { - opts := []goleak.Option{ - goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), - goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), - } - testsetup.SetupForCommonTest() - goleak.VerifyTestMain(m, opts...) -} - type staleReadPoint struct { tk *testkit.TestKit ts uint64 diff --git a/sessiontxn/staleread/provider.go b/sessiontxn/staleread/provider.go index cc77cdd214b37..417154b7ea420 100644 --- a/sessiontxn/staleread/provider.go +++ b/sessiontxn/staleread/provider.go @@ -18,18 +18,22 @@ import ( "context" "github.com/pingcap/errors" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/sessiontxn" - "github.com/pingcap/tidb/table/temptable" ) // StalenessTxnContextProvider implements sessiontxn.TxnContextProvider type StalenessTxnContextProvider struct { + ctx context.Context sctx sessionctx.Context is infoschema.InfoSchema ts uint64 + txn kv.Transaction } // NewStalenessTxnContextProvider creates a new StalenessTxnContextProvider @@ -46,6 +50,16 @@ func (p *StalenessTxnContextProvider) GetTxnInfoSchema() infoschema.InfoSchema { return p.is } +// GetTxnScope returns the current txn scope +func (p *StalenessTxnContextProvider) GetTxnScope() string { + return p.sctx.GetSessionVars().TxnCtx.TxnScope +} + +// GetReadReplicaScope returns the read replica scope +func (p *StalenessTxnContextProvider) GetReadReplicaScope() string { + return config.GetTxnScopeFromConfig() +} + // GetStmtReadTS returns the read timestamp func (p *StalenessTxnContextProvider) GetStmtReadTS() (uint64, error) { return p.ts, nil @@ -58,26 +72,25 @@ func (p *StalenessTxnContextProvider) GetStmtForUpdateTS() (uint64, error) { // OnInitialize is the hook that should be called when enter a new txn with this provider func (p *StalenessTxnContextProvider) OnInitialize(ctx context.Context, tp sessiontxn.EnterNewTxnType) error { + p.ctx = ctx switch tp { case sessiontxn.EnterNewTxnDefault, sessiontxn.EnterNewTxnWithBeginStmt: - if err := p.sctx.NewStaleTxnWithStartTS(ctx, p.ts); err != nil { - return err - } - p.is = p.sctx.GetSessionVars().TxnCtx.InfoSchema.(infoschema.InfoSchema) - if err := p.sctx.GetSessionVars().SetSystemVar(variable.TiDBSnapshot, ""); err != nil { - return err - } + return p.activateStaleTxn() case sessiontxn.EnterNewTxnWithReplaceProvider: - if p.is == nil { - is, err := GetSessionSnapshotInfoSchema(p.sctx, p.ts) - if err != nil { - return err - } - p.is = temptable.AttachLocalTemporaryTableInfoSchema(p.sctx, is) - } + return p.enterNewStaleTxnWithReplaceProvider() default: return errors.Errorf("Unsupported type: %v", tp) } +} + +func (p *StalenessTxnContextProvider) activateStaleTxn() error { + if err := p.sctx.NewStaleTxnWithStartTS(p.ctx, p.ts); err != nil { + return err + } + p.is = p.sctx.GetSessionVars().TxnCtx.InfoSchema.(infoschema.InfoSchema) + if err := p.sctx.GetSessionVars().SetSystemVar(variable.TiDBSnapshot, ""); err != nil { + return err + } txnCtx := p.sctx.GetSessionVars().TxnCtx txnCtx.IsStaleness = true @@ -85,18 +98,57 @@ func (p *StalenessTxnContextProvider) OnInitialize(ctx context.Context, tp sessi return nil } -// OnStmtStart is the hook that should be called when a new statement started -func (p *StalenessTxnContextProvider) OnStmtStart(_ context.Context) error { +func (p *StalenessTxnContextProvider) enterNewStaleTxnWithReplaceProvider() error { + if p.is == nil { + is, err := GetSessionSnapshotInfoSchema(p.sctx, p.ts) + if err != nil { + return err + } + p.is = is + } + + txnCtx := p.sctx.GetSessionVars().TxnCtx + txnCtx.TxnScope = kv.GlobalTxnScope + txnCtx.IsStaleness = true + txnCtx.InfoSchema = p.is return nil } +// OnStmtStart is the hook that should be called when a new statement starte +func (p *StalenessTxnContextProvider) OnStmtStart(ctx context.Context, _ ast.StmtNode) error { + p.ctx = ctx + return nil +} + +// ActivateTxn activates the transaction. +func (p *StalenessTxnContextProvider) ActivateTxn() (kv.Transaction, error) { + if p.txn != nil { + return p.txn, nil + } + + err := p.activateStaleTxn() + if err != nil { + return nil, err + } + + txn, err := p.sctx.Txn(false) + if err != nil { + return nil, err + } + + p.txn = txn + + return p.txn, nil +} + // OnStmtErrorForNextAction is the hook that should be called when a new statement get an error func (p *StalenessTxnContextProvider) OnStmtErrorForNextAction(_ sessiontxn.StmtErrorHandlePoint, _ error) (sessiontxn.StmtErrorAction, error) { return sessiontxn.NoIdea() } // OnStmtRetry is the hook that should be called when a statement retry -func (p *StalenessTxnContextProvider) OnStmtRetry(_ context.Context) error { +func (p *StalenessTxnContextProvider) OnStmtRetry(ctx context.Context) error { + p.ctx = ctx return nil } @@ -109,3 +161,32 @@ func (p *StalenessTxnContextProvider) AdviseWarmup() error { func (p *StalenessTxnContextProvider) AdviseOptimizeWithPlan(_ interface{}) error { return nil } + +// GetSnapshotWithStmtReadTS gets snapshot with read ts and set the transaction related options +// before return +func (p *StalenessTxnContextProvider) GetSnapshotWithStmtReadTS() (kv.Snapshot, error) { + txn, err := p.sctx.Txn(false) + if err != nil { + return nil, err + } + + if txn.Valid() { + return txn.GetSnapshot(), nil + } + + sessVars := p.sctx.GetSessionVars() + snapshot := sessiontxn.GetSnapshotWithTS(p.sctx, p.ts) + + replicaReadType := sessVars.GetReplicaRead() + if replicaReadType.IsFollowerRead() { + snapshot.SetOption(kv.ReplicaRead, replicaReadType) + } + snapshot.SetOption(kv.IsStalenessReadOnly, true) + + return snapshot, nil +} + +// GetSnapshotWithStmtForUpdateTS gets snapshot with for update ts +func (p *StalenessTxnContextProvider) GetSnapshotWithStmtForUpdateTS() (kv.Snapshot, error) { + return nil, errors.New("GetSnapshotWithStmtForUpdateTS not supported for stalenessTxnProvider") +} diff --git a/sessiontxn/staleread/provider_test.go b/sessiontxn/staleread/provider_test.go new file mode 100644 index 0000000000000..0b50f06f41746 --- /dev/null +++ b/sessiontxn/staleread/provider_test.go @@ -0,0 +1,116 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package staleread_test + +import ( + "context" + "fmt" + "testing" + + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessiontxn" + "github.com/pingcap/tidb/sessiontxn/staleread" + "github.com/pingcap/tidb/testkit" + "github.com/stretchr/testify/require" + "github.com/tikv/client-go/v2/oracle" +) + +func TestStaleReadTxnScope(t *testing.T) { + store, _, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + + checkProviderTxnScope := func() { + provider := createStaleReadProvider(t, tk, false) + require.Equal(t, kv.GlobalTxnScope, provider.GetTxnScope()) + + provider = createStaleReadProvider(t, tk, true) + require.Equal(t, kv.GlobalTxnScope, provider.GetTxnScope()) + + tk.MustExec("rollback") + } + + checkProviderTxnScope() + + require.NoError(t, failpoint.Enable("tikvclient/injectTxnScope", fmt.Sprintf(`return("%v")`, "bj"))) + defer func() { + require.NoError(t, failpoint.Disable("tikvclient/injectTxnScope")) + }() + + checkProviderTxnScope() + + tk.MustExec("set @@global.tidb_enable_local_txn=1") + tk.MustExec("rollback") + tk = testkit.NewTestKit(t, store) + checkProviderTxnScope() +} + +func TestStaleReadReplicaReadScope(t *testing.T) { + store, _, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + + checkProviderReplicaReadScope := func(scope string) { + provider := createStaleReadProvider(t, tk, false) + require.Equal(t, scope, provider.GetReadReplicaScope()) + + provider = createStaleReadProvider(t, tk, true) + require.Equal(t, scope, provider.GetReadReplicaScope()) + + tk.MustExec("rollback") + } + + checkProviderReplicaReadScope(kv.GlobalReplicaScope) + + require.NoError(t, failpoint.Enable("tikvclient/injectTxnScope", fmt.Sprintf(`return("%v")`, "bj"))) + defer func() { + require.NoError(t, failpoint.Disable("tikvclient/injectTxnScope")) + }() + + checkProviderReplicaReadScope("bj") +} + +func createStaleReadProvider(t *testing.T, tk *testkit.TestKit, explicitTxn bool) *staleread.StalenessTxnContextProvider { + tk.MustExec("rollback") + require.NoError(t, tk.Session().PrepareTxnCtx(context.TODO())) + se := tk.Session() + ts := getOracleTS(t, se) + if explicitTxn { + err := sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnWithBeginStmt, + StaleReadTS: ts, + }) + require.NoError(t, err) + } else { + is, err := domain.GetDomain(se).GetSnapshotInfoSchema(ts) + require.NoError(t, err) + err = sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnWithReplaceProvider, + Provider: staleread.NewStalenessTxnContextProvider(se, ts, is), + }) + require.NoError(t, err) + } + return sessiontxn.GetTxnManager(se).GetContextProvider().(*staleread.StalenessTxnContextProvider) +} + +func getOracleTS(t testing.TB, sctx sessionctx.Context) uint64 { + ts, err := sctx.GetStore().GetOracle().GetTimestamp(context.TODO(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) + require.NoError(t, err) + return ts +} diff --git a/sessiontxn/txn.go b/sessiontxn/txn.go index 6627a31e0313e..b986d1048ca5f 100644 --- a/sessiontxn/txn.go +++ b/sessiontxn/txn.go @@ -18,7 +18,11 @@ import ( "context" "github.com/opentracing/opentracing-go" + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/table/temptable" "github.com/tikv/client-go/v2/oracle" ) @@ -67,3 +71,29 @@ func CanReuseTxnWhenExplicitBegin(sctx sessionctx.Context) bool { // initialized with snapshot ts. return txnCtx.History == nil && !txnCtx.IsStaleness && sessVars.SnapshotTS == 0 } + +// GetSnapshotWithTS returns a snapshot with ts. +func GetSnapshotWithTS(s sessionctx.Context, ts uint64) kv.Snapshot { + snap := s.GetStore().GetSnapshot(kv.Version{Ver: ts}) + snap.SetOption(kv.SnapInterceptor, temptable.SessionSnapshotInterceptor(s)) + if s.GetSessionVars().InRestrictedSQL { + snap.SetOption(kv.RequestSourceInternal, true) + } + if tp := s.GetSessionVars().RequestSourceType; tp != "" { + snap.SetOption(kv.RequestSourceType, tp) + } + return snap +} + +// SetTxnAssertionLevel sets assertion level of a transactin. Note that assertion level should be set only once just +// after creating a new transaction. +func SetTxnAssertionLevel(txn kv.Transaction, assertionLevel variable.AssertionLevel) { + switch assertionLevel { + case variable.AssertionLevelOff: + txn.SetOption(kv.AssertionLevel, kvrpcpb.AssertionLevel_Off) + case variable.AssertionLevelFast: + txn.SetOption(kv.AssertionLevel, kvrpcpb.AssertionLevel_Fast) + case variable.AssertionLevelStrict: + txn.SetOption(kv.AssertionLevel, kvrpcpb.AssertionLevel_Strict) + } +} diff --git a/sessiontxn/txn_context_test.go b/sessiontxn/txn_context_test.go index 9fec1844606a3..75a3b72f1ac38 100644 --- a/sessiontxn/txn_context_test.go +++ b/sessiontxn/txn_context_test.go @@ -51,13 +51,10 @@ func setupTxnContextTest(t *testing.T) (kv.Storage, *domain.Domain, func()) { require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/assertTxnManagerAfterBuildExecutor", "return")) require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/assertTxnManagerAfterPessimisticLockErrorRetry", "return")) require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/assertTxnManagerInShortPointGetPlan", "return")) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/assertStaleReadValuesSameWithExecuteAndBuilder", "return")) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/assertNotStaleReadForExecutorGetReadTS", "return")) require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/session/assertTxnManagerInRunStmt", "return")) require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/session/assertTxnManagerInPreparedStmtExec", "return")) require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/session/assertTxnManagerInCachedPlanExec", "return")) require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/session/assertTxnManagerForUpdateTSEqual", "return")) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/planner/core/assertStaleReadForOptimizePreparedPlan", "return")) store, do, clean := testkit.CreateMockStoreAndDomain(t) @@ -82,13 +79,10 @@ func setupTxnContextTest(t *testing.T) (kv.Storage, *domain.Domain, func()) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/assertTxnManagerAfterBuildExecutor")) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/assertTxnManagerAfterPessimisticLockErrorRetry")) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/assertTxnManagerInShortPointGetPlan")) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/assertStaleReadValuesSameWithExecuteAndBuilder")) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/assertNotStaleReadForExecutorGetReadTS")) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/session/assertTxnManagerInRunStmt")) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/session/assertTxnManagerInPreparedStmtExec")) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/session/assertTxnManagerInCachedPlanExec")) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/session/assertTxnManagerForUpdateTSEqual")) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/planner/core/assertStaleReadForOptimizePreparedPlan")) tk.Session().SetValue(sessiontxn.AssertRecordsKey, nil) tk.Session().SetValue(sessiontxn.AssertTxnInfoSchemaKey, nil) @@ -593,13 +587,13 @@ func TestTxnContextForPrepareExecute(t *testing.T) { } func TestTxnContextForStaleReadInPrepare(t *testing.T) { - store, do, deferFunc := setupTxnContextTest(t) + store, _, deferFunc := setupTxnContextTest(t) defer deferFunc() tk := testkit.NewTestKit(t, store) tk.MustExec("use test") se := tk.Session() - is1 := do.InfoSchema() + is1 := se.GetDomainInfoSchema() tk.MustExec("do sleep(0.1)") tk.MustExec("set @a=now(6)") tk.MustExec("prepare s1 from 'select * from t1 where id=1'") @@ -666,6 +660,32 @@ func TestTxnContextForStaleReadInPrepare(t *testing.T) { doWithCheckPath(t, se, normalPathRecords, func() { tk.MustExec("execute s3") }) + se.SetValue(sessiontxn.AssertTxnInfoSchemaKey, nil) + + // stale read should not use plan cache + is2 := se.GetDomainInfoSchema() + se.SetValue(sessiontxn.AssertTxnInfoSchemaKey, nil) + tk.MustExec("set @@tx_read_ts=''") + tk.MustExec("do sleep(0.1)") + tk.MustExec("set @b=now(6)") + tk.MustExec("do sleep(0.1)") + tk.MustExec("update t1 set v=v+1 where id=1") + se.SetValue(sessiontxn.AssertTxnInfoSchemaKey, is2) + doWithCheckPath(t, se, path, func() { + rs, err := se.ExecutePreparedStmt(context.TODO(), stmtID1, nil) + require.NoError(t, err) + tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 12")) + }) + se.SetValue(sessiontxn.AssertTxnInfoSchemaKey, nil) + tk.MustExec("set @@tx_read_ts=@b") + se.SetValue(sessiontxn.AssertTxnInfoSchemaKey, is2) + doWithCheckPath(t, se, path, func() { + rs, err := se.ExecutePreparedStmt(context.TODO(), stmtID1, nil) + require.NoError(t, err) + tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 11")) + }) + se.SetValue(sessiontxn.AssertTxnInfoSchemaKey, nil) + tk.MustExec("set @@tx_read_ts=''") } func TestTxnContextPreparedStmtWithForUpdate(t *testing.T) { @@ -782,3 +802,83 @@ func TestStillWriteConflictAfterRetry(t *testing.T) { } }) } + +func TestOptimisticTxnRetryInPessimisticMode(t *testing.T) { + store, _, deferFunc := setupTxnContextTest(t) + defer deferFunc() + + queries := []string{ + "update t1 set v=v+1", + "update t1 set v=v+1 where id=1", + "update t1 set v=v+1 where id=1 and v>0", + "update t1 set v=v+1 where id in (1, 2, 3)", + "update t1 set v=v+1 where id in (1, 2, 3) and v>0", + } + + testfork.RunTest(t, func(t *testfork.T) { + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("truncate table t1") + tk.MustExec("insert into t1 values(1, 10)") + tk2 := testkit.NewSteppedTestKit(t, store) + defer tk2.MustExec("rollback") + + tk2.MustExec("use test") + tk2.MustExec("set @@tidb_txn_mode = 'pessimistic'") + tk2.MustExec("set autocommit = 1") + + // When autocommit meets write conflict, it will retry in pessimistic mode. + // conflictAfterTransfer being true means we encounter a write-conflict again during + // the pessimistic mode. + // doubleConflictAfterTransfer being true means we encounter a write-conflict again + // during the pessimistic retry phase. + // And only conflictAfterTransfer being true allows doubleConflictAfterTransfer being true. + conflictAfterTransfer := testfork.PickEnum(t, true, false) + doubleConflictAfterTransfer := testfork.PickEnum(t, true, false) + if !conflictAfterTransfer && doubleConflictAfterTransfer { + return + } + + tk2.SetBreakPoints( + sessiontxn.BreakPointBeforeExecutorFirstRun, + sessiontxn.BreakPointOnStmtRetryAfterLockError, + ) + + query := testfork.Pick(t, queries) + + tk2.SteppedMustExec(query) + + // Pause the session before the executor first run and then update the record in another session + tk2.ExpectStopOnBreakPoint(sessiontxn.BreakPointBeforeExecutorFirstRun) + // After this update, tk2's statement will encounter write conflict. As it's an autocommit transaction, + // it will transfer to pessimistic transaction mode. + tk.MustExec("update t1 set v=v+1") + + if conflictAfterTransfer { + tk2.Continue().ExpectStopOnBreakPoint(sessiontxn.BreakPointBeforeExecutorFirstRun) + tk.MustExec("update t1 set v=v+1") + + if doubleConflictAfterTransfer { + // Session continues, it should get a lock error and retry, we pause the session before the executor's next run + // and then update the record in another session again. + tk2.Continue().ExpectStopOnBreakPoint(sessiontxn.BreakPointOnStmtRetryAfterLockError) + tk.MustExec("update t1 set v=v+1") + } + + // Because the record is updated by another session again, when this session continues, it will get a lock error again. + tk2.Continue().ExpectStopOnBreakPoint(sessiontxn.BreakPointOnStmtRetryAfterLockError) + tk2.Continue().ExpectIdle() + + if doubleConflictAfterTransfer { + tk2.MustQuery("select * from t1").Check(testkit.Rows("1 14")) + } else { + tk2.MustQuery("select * from t1").Check(testkit.Rows("1 13")) + } + } else { + tk2.Continue().ExpectStopOnBreakPoint(sessiontxn.BreakPointBeforeExecutorFirstRun) + tk2.Continue().ExpectIdle() + + tk2.MustQuery("select * from t1").Check(testkit.Rows("1 12")) + } + }) +} diff --git a/sessiontxn/txn_manager_test.go b/sessiontxn/txn_manager_test.go index e32f8bc2b3784..983513fa44d03 100644 --- a/sessiontxn/txn_manager_test.go +++ b/sessiontxn/txn_manager_test.go @@ -15,6 +15,7 @@ package sessiontxn_test import ( + "bytes" "context" "testing" @@ -70,6 +71,27 @@ func TestEnterNewTxn(t *testing.T) { require.True(t, txn.GetOption(kv.GuaranteeLinearizability).(bool)) }, }, + { + name: "EnterNewTxnDefault", + request: &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnDefault, + }, + prepare: func(t *testing.T) { + tk.MustExec("set @@autocommit=0") + }, + check: func(t *testing.T, sctx sessionctx.Context) { + txn := checkBasicActiveTxn(t, sctx) + checkInfoSchemaVersion(t, sctx, domain.GetDomain(sctx).InfoSchema().SchemaMetaVersion()) + + sessVars := sctx.GetSessionVars() + require.False(t, txn.IsPessimistic()) + require.False(t, sessVars.InTxn()) + require.False(t, sessVars.TxnCtx.IsStaleness) + + require.False(t, sctx.GetSessionVars().TxnCtx.CouldRetry) + require.True(t, txn.GetOption(kv.GuaranteeLinearizability).(bool)) + }, + }, { name: "EnterNewTxnWithBeginStmt simple", request: &sessiontxn.EnterNewTxnRequest{ @@ -135,7 +157,7 @@ func TestEnterNewTxn(t *testing.T) { Type: sessiontxn.EnterNewTxnBeforeStmt, }) require.NoError(t, err) - require.NoError(t, mgr.OnStmtStart(context.TODO())) + require.NoError(t, mgr.OnStmtStart(context.TODO(), nil)) require.NoError(t, mgr.AdviseWarmup()) }, request: &sessiontxn.EnterNewTxnRequest{ @@ -229,6 +251,195 @@ func TestEnterNewTxn(t *testing.T) { } } +func TestGetSnapshot(t *testing.T) { + store, _, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk2 := testkit.NewTestKit(t, store) + tk2.MustExec("use test") + tk.MustExec("use test") + tk.MustExec("create table t (id int primary key)") + + isSnapshotEqual := func(t *testing.T, snap1 kv.Snapshot, snap2 kv.Snapshot) bool { + require.NotNil(t, snap1) + require.NotNil(t, snap2) + + iter1, err := snap1.Iter([]byte{}, []byte{}) + require.NoError(t, err) + iter2, err := snap2.Iter([]byte{}, []byte{}) + require.NoError(t, err) + + for { + if iter1.Valid() && iter2.Valid() { + if iter1.Key().Cmp(iter2.Key()) != 0 { + return false + } + if !bytes.Equal(iter1.Value(), iter2.Value()) { + return false + } + err = iter1.Next() + require.NoError(t, err) + err = iter2.Next() + require.NoError(t, err) + } else if !iter1.Valid() && !iter2.Valid() { + return true + } else { + return false + } + } + } + + mgr := sessiontxn.GetTxnManager(tk.Session()) + + cases := []struct { + isolation string + prepare func(t *testing.T) + check func(t *testing.T, sctx sessionctx.Context) + }{ + { + isolation: "Pessimistic Repeatable Read", + prepare: func(t *testing.T) { + tk.MustExec("set @@tx_isolation='REPEATABLE-READ'") + tk.MustExec("begin pessimistic") + }, + check: func(t *testing.T, sctx sessionctx.Context) { + ts, err := mgr.GetStmtReadTS() + require.NoError(t, err) + compareSnap := sessiontxn.GetSnapshotWithTS(sctx, ts) + snap, err := mgr.GetSnapshotWithStmtReadTS() + require.NoError(t, err) + require.True(t, isSnapshotEqual(t, compareSnap, snap)) + + tk2.MustExec("insert into t values(10)") + + tk.MustQuery("select * from t for update").Check(testkit.Rows("1", "3", "10")) + ts, err = mgr.GetStmtForUpdateTS() + require.NoError(t, err) + compareSnap2 := sessiontxn.GetSnapshotWithTS(sctx, ts) + snap, err = mgr.GetSnapshotWithStmtReadTS() + require.NoError(t, err) + require.False(t, isSnapshotEqual(t, compareSnap2, snap)) + snap, err = mgr.GetSnapshotWithStmtForUpdateTS() + require.NoError(t, err) + require.True(t, isSnapshotEqual(t, compareSnap2, snap)) + + require.False(t, isSnapshotEqual(t, compareSnap, snap)) + }, + }, + { + isolation: "Pessimistic Read Committed", + prepare: func(t *testing.T) { + tk.MustExec("set tx_isolation = 'READ-COMMITTED'") + tk.MustExec("begin pessimistic") + }, + check: func(t *testing.T, sctx sessionctx.Context) { + ts, err := mgr.GetStmtReadTS() + require.NoError(t, err) + compareSnap := sessiontxn.GetSnapshotWithTS(sctx, ts) + snap, err := mgr.GetSnapshotWithStmtReadTS() + require.NoError(t, err) + require.True(t, isSnapshotEqual(t, compareSnap, snap)) + + tk2.MustExec("insert into t values(10)") + + tk.MustQuery("select * from t").Check(testkit.Rows("1", "3", "10")) + ts, err = mgr.GetStmtForUpdateTS() + require.NoError(t, err) + compareSnap2 := sessiontxn.GetSnapshotWithTS(sctx, ts) + snap, err = mgr.GetSnapshotWithStmtReadTS() + require.NoError(t, err) + require.True(t, isSnapshotEqual(t, compareSnap2, snap)) + snap, err = mgr.GetSnapshotWithStmtForUpdateTS() + require.NoError(t, err) + require.True(t, isSnapshotEqual(t, compareSnap2, snap)) + + require.False(t, isSnapshotEqual(t, compareSnap, snap)) + }, + }, + { + isolation: "Optimistic", + prepare: func(t *testing.T) { + tk.MustExec("begin optimistic") + }, + check: func(t *testing.T, sctx sessionctx.Context) { + ts, err := mgr.GetStmtReadTS() + require.NoError(t, err) + compareSnap := sessiontxn.GetSnapshotWithTS(sctx, ts) + snap, err := mgr.GetSnapshotWithStmtReadTS() + require.NoError(t, err) + require.True(t, isSnapshotEqual(t, compareSnap, snap)) + + tk2.MustExec("insert into t values(10)") + + tk.MustQuery("select * from t for update").Check(testkit.Rows("1", "3")) + ts, err = mgr.GetStmtForUpdateTS() + require.NoError(t, err) + compareSnap2 := sessiontxn.GetSnapshotWithTS(sctx, ts) + snap, err = mgr.GetSnapshotWithStmtReadTS() + require.NoError(t, err) + require.True(t, isSnapshotEqual(t, compareSnap2, snap)) + snap, err = mgr.GetSnapshotWithStmtForUpdateTS() + require.NoError(t, err) + require.True(t, isSnapshotEqual(t, compareSnap2, snap)) + + require.True(t, isSnapshotEqual(t, compareSnap, snap)) + }, + }, + { + isolation: "Pessimistic Serializable", + prepare: func(t *testing.T) { + tk.MustExec("set tidb_skip_isolation_level_check = 1") + tk.MustExec("set tx_isolation = 'SERIALIZABLE'") + tk.MustExec("begin pessimistic") + }, + check: func(t *testing.T, sctx sessionctx.Context) { + ts, err := mgr.GetStmtReadTS() + require.NoError(t, err) + compareSnap := sessiontxn.GetSnapshotWithTS(sctx, ts) + snap, err := mgr.GetSnapshotWithStmtReadTS() + require.NoError(t, err) + require.True(t, isSnapshotEqual(t, compareSnap, snap)) + + tk2.MustExec("insert into t values(10)") + + tk.MustQuery("select * from t for update").Check(testkit.Rows("1", "3")) + ts, err = mgr.GetStmtForUpdateTS() + require.NoError(t, err) + compareSnap2 := sessiontxn.GetSnapshotWithTS(sctx, ts) + snap, err = mgr.GetSnapshotWithStmtReadTS() + require.NoError(t, err) + require.True(t, isSnapshotEqual(t, compareSnap2, snap)) + snap, err = mgr.GetSnapshotWithStmtForUpdateTS() + require.NoError(t, err) + require.True(t, isSnapshotEqual(t, compareSnap2, snap)) + + require.True(t, isSnapshotEqual(t, compareSnap, snap)) + }, + }, + } + + for _, c := range cases { + t.Run(c.isolation, func(t *testing.T) { + se := tk.Session() + tk.MustExec("truncate t") + tk.MustExec("set @@tidb_txn_mode=''") + tk.MustExec("set @@autocommit=1") + tk.MustExec("insert into t values(1), (3)") + tk.MustExec("commit") + + if c.prepare != nil { + c.prepare(t) + } + + if c.check != nil { + c.check(t, se) + } + tk.MustExec("rollback") + }) + } +} + func checkBasicActiveTxn(t *testing.T, sctx sessionctx.Context) kv.Transaction { txn, err := sctx.Txn(false) require.NoError(t, err) diff --git a/statistics/cmsketch.go b/statistics/cmsketch.go index 15308e3a84c7a..10505fb616ef0 100644 --- a/statistics/cmsketch.go +++ b/statistics/cmsketch.go @@ -441,6 +441,9 @@ func DecodeCMSketchAndTopN(data []byte, topNRows []chunk.Row) (*CMSketch, *TopN, // TotalCount returns the total count in the sketch, it is only used for test. func (c *CMSketch) TotalCount() uint64 { + if c == nil { + return 0 + } return c.count } @@ -625,8 +628,8 @@ func (c *TopN) Sort() { if c == nil { return } - sort.Slice(c.TopN, func(i, j int) bool { - return bytes.Compare(c.TopN[i].Encoded, c.TopN[j].Encoded) < 0 + slices.SortFunc(c.TopN, func(i, j TopNMeta) bool { + return bytes.Compare(i.Encoded, j.Encoded) < 0 }) } @@ -796,8 +799,8 @@ func MergePartTopN2GlobalTopN(sc *stmtctx.StatementContext, version int, topNs [ for i := 0; i < partNum; i++ { if len(removeVals[i]) > 0 { tmp := removeVals[i] - sort.Slice(tmp, func(i, j int) bool { - cmpResult := bytes.Compare(tmp[i].Encoded, tmp[j].Encoded) + slices.SortFunc(tmp, func(i, j TopNMeta) bool { + cmpResult := bytes.Compare(i.Encoded, j.Encoded) return cmpResult < 0 }) hists[i].RemoveVals(tmp) @@ -855,11 +858,11 @@ func checkEmptyTopNs(topNs []*TopN) bool { } func getMergedTopNFromSortedSlice(sorted []TopNMeta, n uint32) (*TopN, []TopNMeta) { - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].Count != sorted[j].Count { - return sorted[i].Count > sorted[j].Count + slices.SortFunc(sorted, func(i, j TopNMeta) bool { + if i.Count != j.Count { + return i.Count > j.Count } - return bytes.Compare(sorted[i].Encoded, sorted[j].Encoded) < 0 + return bytes.Compare(i.Encoded, j.Encoded) < 0 }) n = mathutil.Min(uint32(len(sorted)), n) diff --git a/statistics/feedback.go b/statistics/feedback.go index 3fc37a968fb1c..daf771d0f89a4 100644 --- a/statistics/feedback.go +++ b/statistics/feedback.go @@ -40,6 +40,7 @@ import ( "github.com/pingcap/tidb/util/ranger" "go.uber.org/atomic" "go.uber.org/zap" + "golang.org/x/exp/slices" ) // Feedback represents the total scan count in range [lower, upper). @@ -350,15 +351,15 @@ func NonOverlappedFeedbacks(sc *stmtctx.StatementContext, fbs []Feedback) ([]Fee // Sort feedbacks by end point and start point incrementally, then pick every feedback that is not overlapped // with the previous chosen feedbacks. var existsErr bool - sort.Slice(fbs, func(i, j int) bool { - res, err := fbs[i].Upper.Compare(sc, fbs[j].Upper, collate.GetBinaryCollator()) + slices.SortFunc(fbs, func(i, j Feedback) bool { + res, err := i.Upper.Compare(sc, j.Upper, collate.GetBinaryCollator()) if err != nil { existsErr = true } if existsErr || res != 0 { return res < 0 } - res, err = fbs[i].Lower.Compare(sc, fbs[j].Lower, collate.GetBinaryCollator()) + res, err = i.Lower.Compare(sc, j.Lower, collate.GetBinaryCollator()) if err != nil { existsErr = true } @@ -726,7 +727,7 @@ func mergeBuckets(bkts []bucket, isNewBuckets []bool, bucketCount int, totalCoun for i := 0; i < mergeCount; i++ { ids = append(ids, bs[i].id) } - sort.Ints(ids) + slices.Sort(ids) idCursor, bktCursor := 0, 0 for i := range bkts { // Merge this bucket with last one. diff --git a/statistics/handle/BUILD.bazel b/statistics/handle/BUILD.bazel index 716c78f2708fc..db9a1d64930ff 100644 --- a/statistics/handle/BUILD.bazel +++ b/statistics/handle/BUILD.bazel @@ -49,6 +49,7 @@ go_library( "@com_github_pingcap_tipb//go-tipb", "@com_github_prometheus_client_golang//prometheus", "@com_github_tikv_client_go_v2//oracle", + "@org_golang_x_exp//slices", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", ], @@ -56,6 +57,7 @@ go_library( go_test( name = "handle_test", + timeout = "short", srcs = [ "ddl_test.go", "dump_test.go", diff --git a/statistics/handle/bootstrap.go b/statistics/handle/bootstrap.go index a2e5714380643..094a02bd1da79 100644 --- a/statistics/handle/bootstrap.go +++ b/statistics/handle/bootstrap.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" @@ -60,8 +61,9 @@ func (h *Handle) initStatsMeta4Chunk(is infoschema.InfoSchema, cache *statsCache } func (h *Handle) initStatsMeta(is infoschema.InfoSchema) (statsCache, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := "select HIGH_PRIORITY version, table_id, modify_count, count from mysql.stats_meta" - rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql) + rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) if err != nil { return statsCache{}, errors.Trace(err) } @@ -70,7 +72,7 @@ func (h *Handle) initStatsMeta(is infoschema.InfoSchema) (statsCache, error) { req := rc.NewChunk(nil) iter := chunk.NewIterator4Chunk(req) for { - err := rc.Next(context.TODO(), req) + err := rc.Next(ctx, req) if err != nil { return statsCache{}, errors.Trace(err) } @@ -110,12 +112,14 @@ func (h *Handle) initStatsHistograms4Chunk(is infoschema.InfoSchema, cache *stat } hist := statistics.NewHistogram(id, ndv, nullCount, version, types.NewFieldType(mysql.TypeBlob), chunk.InitialCapacity, 0) index := &statistics.Index{ - Histogram: *hist, - CMSketch: cms, - TopN: topN, - Info: idxInfo, - StatsVer: statsVer, - Flag: row.GetInt64(10), + Histogram: *hist, + CMSketch: cms, + TopN: topN, + Info: idxInfo, + StatsVer: statsVer, + Flag: row.GetInt64(10), + PhysicalID: tblID, + StatsLoadedStatus: statistics.NewStatsFullLoadStatus(), } lastAnalyzePos.Copy(&index.LastAnalyzePos) table.Indices[hist.ID] = index @@ -159,8 +163,9 @@ func (h *Handle) initStatsHistograms4Chunk(is infoschema.InfoSchema, cache *stat } func (h *Handle) initStatsHistograms(is infoschema.InfoSchema, cache *statsCache) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := "select HIGH_PRIORITY table_id, is_index, hist_id, distinct_count, version, null_count, cm_sketch, tot_col_size, stats_ver, correlation, flag, last_analyze_pos from mysql.stats_histograms" - rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql) + rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) if err != nil { return errors.Trace(err) } @@ -168,7 +173,7 @@ func (h *Handle) initStatsHistograms(is infoschema.InfoSchema, cache *statsCache req := rc.NewChunk(nil) iter := chunk.NewIterator4Chunk(req) for { - err := rc.Next(context.TODO(), req) + err := rc.Next(ctx, req) if err != nil { return errors.Trace(err) } @@ -205,8 +210,9 @@ func (h *Handle) initStatsTopN4Chunk(cache *statsCache, iter *chunk.Iterator4Chu } func (h *Handle) initStatsTopN(cache *statsCache) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := "select HIGH_PRIORITY table_id, hist_id, value, count from mysql.stats_top_n where is_index = 1" - rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql) + rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) if err != nil { return errors.Trace(err) } @@ -214,7 +220,7 @@ func (h *Handle) initStatsTopN(cache *statsCache) error { req := rc.NewChunk(nil) iter := chunk.NewIterator4Chunk(req) for { - err := rc.Next(context.TODO(), req) + err := rc.Next(ctx, req) if err != nil { return errors.Trace(err) } @@ -253,8 +259,9 @@ func (h *Handle) initStatsFMSketch4Chunk(cache *statsCache, iter *chunk.Iterator } func (h *Handle) initStatsFMSketch(cache *statsCache) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := "select HIGH_PRIORITY table_id, is_index, hist_id, value from mysql.stats_fm_sketch" - rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql) + rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) if err != nil { return errors.Trace(err) } @@ -262,7 +269,7 @@ func (h *Handle) initStatsFMSketch(cache *statsCache) error { req := rc.NewChunk(nil) iter := chunk.NewIterator4Chunk(req) for { - err := rc.Next(context.TODO(), req) + err := rc.Next(ctx, req) if err != nil { return errors.Trace(err) } @@ -324,8 +331,9 @@ func (h *Handle) initTopNCountSum(tableID, colID int64) (int64, error) { // Before stats ver 2, histogram represents all data in this column. // In stats ver 2, histogram + TopN represent all data in this column. // So we need to add TopN total count here. + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) selSQL := "select sum(count) from mysql.stats_top_n where table_id = %? and is_index = 0 and hist_id = %?" - rs, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), selSQL, tableID, colID) + rs, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, selSQL, tableID, colID) if rs != nil { defer terror.Call(rs.Close) } @@ -334,7 +342,7 @@ func (h *Handle) initTopNCountSum(tableID, colID int64) (int64, error) { } req := rs.NewChunk(nil) iter := chunk.NewIterator4Chunk(req) - err = rs.Next(context.TODO(), req) + err = rs.Next(ctx, req) if err != nil { return 0, err } @@ -345,8 +353,9 @@ func (h *Handle) initTopNCountSum(tableID, colID int64) (int64, error) { } func (h *Handle) initStatsBuckets(cache *statsCache) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := "select HIGH_PRIORITY table_id, is_index, hist_id, count, repeats, lower_bound, upper_bound, ndv from mysql.stats_buckets order by table_id, is_index, hist_id, bucket_id" - rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql) + rc, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) if err != nil { return errors.Trace(err) } @@ -354,7 +363,7 @@ func (h *Handle) initStatsBuckets(cache *statsCache) error { req := rc.NewChunk(nil) iter := chunk.NewIterator4Chunk(req) for { - err := rc.Next(context.TODO(), req) + err := rc.Next(ctx, req) if err != nil { return errors.Trace(err) } @@ -386,15 +395,16 @@ func (h *Handle) initStatsBuckets(cache *statsCache) error { // InitStats will init the stats cache using full load strategy. func (h *Handle) InitStats(is infoschema.InfoSchema) (err error) { loadFMSketch := config.GetGlobalConfig().Performance.EnableLoadFMSketch + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) h.mu.Lock() defer func() { - _, err1 := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "commit") + _, err1 := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "commit") if err == nil && err1 != nil { err = err1 } h.mu.Unlock() }() - _, err = h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "begin") + _, err = h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "begin") if err != nil { return err } diff --git a/statistics/handle/ddl.go b/statistics/handle/ddl.go index 497cb71a4a955..7e628a34c674a 100644 --- a/statistics/handle/ddl.go +++ b/statistics/handle/ddl.go @@ -19,6 +19,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/ddl/util" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -40,7 +41,7 @@ func (h *Handle) HandleDDLEvent(t *util.Event) error { return err } } - case model.ActionAddColumn, model.ActionAddColumns, model.ActionModifyColumn: + case model.ActionAddColumn, model.ActionModifyColumn: ids := h.getInitStateTableIDs(t.TableInfo) for _, id := range ids { if err := h.insertColStats2KV(id, t.ColumnInfos); err != nil { @@ -191,7 +192,7 @@ func (h *Handle) insertTableStats2KV(info *model.TableInfo, physicalID int64) (e }() h.mu.Lock() defer h.mu.Unlock() - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin") if err != nil { @@ -234,7 +235,7 @@ func (h *Handle) insertColStats2KV(physicalID int64, colInfos []*model.ColumnInf h.mu.Lock() defer h.mu.Unlock() - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin") if err != nil { diff --git a/statistics/handle/dump.go b/statistics/handle/dump.go index 088c1e494e7d1..4fd89b6c11b84 100644 --- a/statistics/handle/dump.go +++ b/statistics/handle/dump.go @@ -283,11 +283,13 @@ func TableStatsFromJSON(tableInfo *model.TableInfo, physicalID int64, jsonTbl *J statsVer = *jsonIdx.StatsVer } idx := &statistics.Index{ - Histogram: *hist, - CMSketch: cm, - TopN: topN, - Info: idxInfo, - StatsVer: statsVer, + Histogram: *hist, + CMSketch: cm, + TopN: topN, + Info: idxInfo, + StatsVer: statsVer, + PhysicalID: physicalID, + StatsLoadedStatus: statistics.NewStatsFullLoadStatus(), } tbl.Indices[idx.ID] = idx } @@ -322,15 +324,15 @@ func TableStatsFromJSON(tableInfo *model.TableInfo, physicalID int64, jsonTbl *J statsVer = *jsonCol.StatsVer } col := &statistics.Column{ - PhysicalID: physicalID, - Histogram: *hist, - CMSketch: cm, - TopN: topN, - FMSketch: fms, - Info: colInfo, - IsHandle: tableInfo.PKIsHandle && mysql.HasPriKeyFlag(colInfo.GetFlag()), - StatsVer: statsVer, - ColLoadedStatus: statistics.NewColFullLoadStatus(), + PhysicalID: physicalID, + Histogram: *hist, + CMSketch: cm, + TopN: topN, + FMSketch: fms, + Info: colInfo, + IsHandle: tableInfo.PKIsHandle && mysql.HasPriKeyFlag(colInfo.GetFlag()), + StatsVer: statsVer, + StatsLoadedStatus: statistics.NewStatsFullLoadStatus(), } col.Count = int64(col.TotalRowCount()) tbl.Columns[col.ID] = col diff --git a/statistics/handle/gc.go b/statistics/handle/gc.go index a3ac7b9191f4c..1babb4321eb9e 100644 --- a/statistics/handle/gc.go +++ b/statistics/handle/gc.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/mathutil" "github.com/pingcap/tidb/util/sqlexec" @@ -142,7 +143,7 @@ func (h *Handle) deleteHistStatsFromKV(physicalID int64, histID int64, isIndex i h.mu.Lock() defer h.mu.Unlock() - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin") if err != nil { @@ -191,18 +192,18 @@ func (h *Handle) DeleteTableStatsFromKV(statsIDs []int64) (err error) { h.mu.Lock() defer h.mu.Unlock() exec := h.mu.ctx.(sqlexec.SQLExecutor) - _, err = exec.ExecuteInternal(context.Background(), "begin") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + _, err = exec.ExecuteInternal(ctx, "begin") if err != nil { return errors.Trace(err) } defer func() { - err = finishTransaction(context.Background(), exec, err) + err = finishTransaction(ctx, exec, err) }() txn, err := h.mu.ctx.Txn(true) if err != nil { return errors.Trace(err) } - ctx := context.Background() startTS := txn.StartTS() for _, statsID := range statsIDs { // We only update the version so that other tidb will know that this table is deleted. @@ -241,7 +242,7 @@ func (h *Handle) removeDeletedExtendedStats(version uint64) (err error) { h.mu.Lock() defer h.mu.Unlock() exec := h.mu.ctx.(sqlexec.SQLExecutor) - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) _, err = exec.ExecuteInternal(ctx, "begin pessimistic") if err != nil { return errors.Trace(err) diff --git a/statistics/handle/handle.go b/statistics/handle/handle.go index 6bd49b1f12302..db7bc68deed58 100644 --- a/statistics/handle/handle.go +++ b/statistics/handle/handle.go @@ -18,7 +18,6 @@ import ( "context" "encoding/json" "fmt" - "sort" "strconv" "strings" "sync" @@ -31,6 +30,7 @@ import ( "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" @@ -50,6 +50,7 @@ import ( "github.com/tikv/client-go/v2/oracle" atomic2 "go.uber.org/atomic" "go.uber.org/zap" + "golang.org/x/exp/slices" ) const ( @@ -127,12 +128,14 @@ func (h *Handle) withRestrictedSQLExecutor(ctx context.Context, fn func(context. } func (h *Handle) execRestrictedSQL(ctx context.Context, sql string, params ...interface{}) ([]chunk.Row, []*ast.ResultField, error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats) return h.withRestrictedSQLExecutor(ctx, func(ctx context.Context, exec sqlexec.RestrictedSQLExecutor) ([]chunk.Row, []*ast.ResultField, error) { return exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseCurSession}, sql, params...) }) } func (h *Handle) execRestrictedSQLWithStatsVer(ctx context.Context, statsVer int, procTrackID uint64, sql string, params ...interface{}) ([]chunk.Row, []*ast.ResultField, error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats) return h.withRestrictedSQLExecutor(ctx, func(ctx context.Context, exec sqlexec.RestrictedSQLExecutor) ([]chunk.Row, []*ast.ResultField, error) { optFuncs := []sqlexec.OptionFuncAlias{ execOptionForAnalyze[statsVer], @@ -145,6 +148,7 @@ func (h *Handle) execRestrictedSQLWithStatsVer(ctx context.Context, statsVer int } func (h *Handle) execRestrictedSQLWithSnapshot(ctx context.Context, sql string, snapshot uint64, params ...interface{}) ([]chunk.Row, []*ast.ResultField, error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats) return h.withRestrictedSQLExecutor(ctx, func(ctx context.Context, exec sqlexec.RestrictedSQLExecutor) ([]chunk.Row, []*ast.ResultField, error) { optFuncs := []sqlexec.OptionFuncAlias{ sqlexec.ExecOptionWithSnapshot(snapshot), @@ -306,7 +310,7 @@ func (h *Handle) Update(is infoschema.InfoSchema, opts ...TableStatsOpt) error { } else { lastVersion = 0 } - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) rows, _, err := h.execRestrictedSQL(ctx, "SELECT version, table_id, modify_count, count from mysql.stats_meta where version > %? order by version", lastVersion) if err != nil { return errors.Trace(err) @@ -631,9 +635,9 @@ func (h *Handle) updateStatsCache(newCache statsCache) (updated bool) { return } -// LoadNeededHistograms will load histograms for those needed columns. +// LoadNeededHistograms will load histograms for those needed columns/indices. func (h *Handle) LoadNeededHistograms() (err error) { - cols := statistics.HistogramNeededColumns.AllCols() + items := statistics.HistogramNeededItems.AllItems() reader, err := h.getGlobalStatsReader(0) if err != nil { return err @@ -647,64 +651,130 @@ func (h *Handle) LoadNeededHistograms() (err error) { }() loadFMSketch := config.GetGlobalConfig().Performance.EnableLoadFMSketch - for _, col := range cols { - oldCache := h.statsCache.Load().(statsCache) - tbl, ok := oldCache.Get(col.TableID) - if !ok { - continue - } - c, ok := tbl.Columns[col.ColumnID] - if !ok || !c.IsLoadNeeded() { - statistics.HistogramNeededColumns.Delete(col) - continue + for _, item := range items { + if !item.IsIndex { + err = h.loadNeededColumnHistograms(reader, item, loadFMSketch) + } else { + err = h.loadNeededIndexHistograms(reader, item, loadFMSketch) } - hg, err := h.histogramFromStorage(reader, col.TableID, c.ID, &c.Info.FieldType, c.Histogram.NDV, 0, c.LastUpdateVersion, c.NullCount, c.TotColSize, c.Correlation) if err != nil { - return errors.Trace(err) + return err } - cms, topN, err := h.cmSketchAndTopNFromStorage(reader, col.TableID, 0, col.ColumnID) + } + return nil +} + +func (h *Handle) loadNeededColumnHistograms(reader *statsReader, col model.TableItemID, loadFMSketch bool) (err error) { + oldCache := h.statsCache.Load().(statsCache) + tbl, ok := oldCache.Get(col.TableID) + if !ok { + return nil + } + c, ok := tbl.Columns[col.ID] + if !ok || !c.IsLoadNeeded() { + statistics.HistogramNeededItems.Delete(col) + return nil + } + hg, err := h.histogramFromStorage(reader, col.TableID, c.ID, &c.Info.FieldType, c.Histogram.NDV, 0, c.LastUpdateVersion, c.NullCount, c.TotColSize, c.Correlation) + if err != nil { + return errors.Trace(err) + } + cms, topN, err := h.cmSketchAndTopNFromStorage(reader, col.TableID, 0, col.ID) + if err != nil { + return errors.Trace(err) + } + var fms *statistics.FMSketch + if loadFMSketch { + fms, err = h.fmSketchFromStorage(reader, col.TableID, 0, col.ID) if err != nil { return errors.Trace(err) } - var fms *statistics.FMSketch - if loadFMSketch { - fms, err = h.fmSketchFromStorage(reader, col.TableID, 0, col.ColumnID) - if err != nil { - return errors.Trace(err) - } - } - rows, _, err := reader.read("select stats_ver from mysql.stats_histograms where is_index = 0 and table_id = %? and hist_id = %?", col.TableID, col.ColumnID) + } + rows, _, err := reader.read("select stats_ver from mysql.stats_histograms where is_index = 0 and table_id = %? and hist_id = %?", col.TableID, col.ID) + if err != nil { + return errors.Trace(err) + } + if len(rows) == 0 { + logutil.BgLogger().Error("fail to get stats version for this histogram", zap.Int64("table_id", col.TableID), zap.Int64("hist_id", col.ID)) + return errors.Trace(errors.New(fmt.Sprintf("fail to get stats version for this histogram, table_id:%v, hist_id:%v", col.TableID, col.ID))) + } + colHist := &statistics.Column{ + PhysicalID: col.TableID, + Histogram: *hg, + Info: c.Info, + CMSketch: cms, + TopN: topN, + FMSketch: fms, + IsHandle: c.IsHandle, + StatsVer: rows[0].GetInt64(0), + StatsLoadedStatus: statistics.NewStatsFullLoadStatus(), + } + // Column.Count is calculated by Column.TotalRowCount(). Hence we don't set Column.Count when initializing colHist. + colHist.Count = int64(colHist.TotalRowCount()) + // Reload the latest stats cache, otherwise the `updateStatsCache` may fail with high probability, because functions + // like `GetPartitionStats` called in `fmSketchFromStorage` would have modified the stats cache already. + oldCache = h.statsCache.Load().(statsCache) + tbl, ok = oldCache.Get(col.TableID) + if !ok { + return nil + } + tbl = tbl.Copy() + tbl.Columns[c.ID] = colHist + if h.updateStatsCache(oldCache.update([]*statistics.Table{tbl}, nil, oldCache.version)) { + statistics.HistogramNeededItems.Delete(col) + } + return nil +} + +func (h *Handle) loadNeededIndexHistograms(reader *statsReader, idx model.TableItemID, loadFMSketch bool) (err error) { + oldCache := h.statsCache.Load().(statsCache) + tbl, ok := oldCache.Get(idx.TableID) + if !ok { + return nil + } + index, ok := tbl.Indices[idx.ID] + if !ok { + statistics.HistogramNeededItems.Delete(idx) + return nil + } + hg, err := h.histogramFromStorage(reader, idx.TableID, index.ID, types.NewFieldType(mysql.TypeBlob), index.Histogram.NDV, 1, index.LastUpdateVersion, index.NullCount, index.TotColSize, index.Correlation) + if err != nil { + return errors.Trace(err) + } + cms, topN, err := h.cmSketchAndTopNFromStorage(reader, idx.TableID, 1, idx.ID) + if err != nil { + return errors.Trace(err) + } + var fms *statistics.FMSketch + if loadFMSketch { + fms, err = h.fmSketchFromStorage(reader, idx.TableID, 1, idx.ID) if err != nil { return errors.Trace(err) } - if len(rows) == 0 { - logutil.BgLogger().Error("fail to get stats version for this histogram", zap.Int64("table_id", col.TableID), zap.Int64("hist_id", col.ColumnID)) - } - colHist := &statistics.Column{ - PhysicalID: col.TableID, - Histogram: *hg, - Info: c.Info, - CMSketch: cms, - TopN: topN, - FMSketch: fms, - IsHandle: c.IsHandle, - StatsVer: rows[0].GetInt64(0), - ColLoadedStatus: statistics.NewColFullLoadStatus(), - } - // Column.Count is calculated by Column.TotalRowCount(). Hence we don't set Column.Count when initializing colHist. - colHist.Count = int64(colHist.TotalRowCount()) - // Reload the latest stats cache, otherwise the `updateStatsCache` may fail with high probability, because functions - // like `GetPartitionStats` called in `fmSketchFromStorage` would have modified the stats cache already. - oldCache = h.statsCache.Load().(statsCache) - tbl, ok = oldCache.Get(col.TableID) - if !ok { - continue - } - tbl = tbl.Copy() - tbl.Columns[c.ID] = colHist - if h.updateStatsCache(oldCache.update([]*statistics.Table{tbl}, nil, oldCache.version)) { - statistics.HistogramNeededColumns.Delete(col) - } + } + rows, _, err := reader.read("select stats_ver from mysql.stats_histograms where is_index = 1 and table_id = %? and hist_id = %?", idx.TableID, idx.ID) + if err != nil { + return errors.Trace(err) + } + if len(rows) == 0 { + logutil.BgLogger().Error("fail to get stats version for this histogram", zap.Int64("table_id", idx.TableID), zap.Int64("hist_id", idx.ID)) + return errors.Trace(errors.New(fmt.Sprintf("fail to get stats version for this histogram, table_id:%v, hist_id:%v", idx.TableID, idx.ID))) + } + idxHist := &statistics.Index{Histogram: *hg, CMSketch: cms, TopN: topN, FMSketch: fms, + Info: index.Info, ErrorRate: index.ErrorRate, StatsVer: rows[0].GetInt64(0), + Flag: index.Flag, PhysicalID: idx.TableID, + StatsLoadedStatus: statistics.NewStatsFullLoadStatus()} + index.LastAnalyzePos.Copy(&idxHist.LastAnalyzePos) + + oldCache = h.statsCache.Load().(statsCache) + tbl, ok = oldCache.Get(idx.TableID) + if !ok { + return nil + } + tbl = tbl.Copy() + tbl.Indices[idx.ID] = idxHist + if h.updateStatsCache(oldCache.update([]*statistics.Table{tbl}, nil, oldCache.version)) { + statistics.HistogramNeededItems.Delete(idx) } return nil } @@ -790,7 +860,10 @@ func (h *Handle) indexStatsFromStorage(reader *statsReader, row chunk.Row, table if err != nil { return errors.Trace(err) } - idx = &statistics.Index{Histogram: *hg, CMSketch: cms, TopN: topN, FMSketch: fmSketch, Info: idxInfo, ErrorRate: errorRate, StatsVer: row.GetInt64(7), Flag: flag} + idx = &statistics.Index{Histogram: *hg, CMSketch: cms, TopN: topN, FMSketch: fmSketch, + Info: idxInfo, ErrorRate: errorRate, StatsVer: row.GetInt64(7), Flag: flag, + PhysicalID: table.PhysicalID, + StatsLoadedStatus: statistics.NewStatsFullLoadStatus()} lastAnalyzePos.Copy(&idx.LastAnalyzePos) } break @@ -869,17 +942,17 @@ func (h *Handle) columnStatsFromStorage(reader *statsReader, row chunk.Row, tabl return errors.Trace(err) } col = &statistics.Column{ - PhysicalID: table.PhysicalID, - Histogram: *hg, - Info: colInfo, - CMSketch: cms, - TopN: topN, - FMSketch: fmSketch, - ErrorRate: errorRate, - IsHandle: tableInfo.PKIsHandle && mysql.HasPriKeyFlag(colInfo.GetFlag()), - Flag: flag, - StatsVer: statsVer, - ColLoadedStatus: statistics.NewColFullLoadStatus(), + PhysicalID: table.PhysicalID, + Histogram: *hg, + Info: colInfo, + CMSketch: cms, + TopN: topN, + FMSketch: fmSketch, + ErrorRate: errorRate, + IsHandle: tableInfo.PKIsHandle && mysql.HasPriKeyFlag(colInfo.GetFlag()), + Flag: flag, + StatsVer: statsVer, + StatsLoadedStatus: statistics.NewStatsFullLoadStatus(), } // Column.Count is calculated by Column.TotalRowCount(). Hence we don't set Column.Count when initializing col. col.Count = int64(col.TotalRowCount()) @@ -1044,14 +1117,14 @@ func (h *Handle) SaveTableStatsToStorage(results *statistics.AnalyzeResults, nee }() h.mu.Lock() defer h.mu.Unlock() - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin pessimistic") if err != nil { return err } defer func() { - err = finishTransaction(context.Background(), exec, err) + err = finishTransaction(ctx, exec, err) }() txn, err := h.mu.ctx.Txn(true) if err != nil { @@ -1254,14 +1327,14 @@ func (h *Handle) SaveStatsToStorage(tableID int64, count int64, isIndex int, hg }() h.mu.Lock() defer h.mu.Unlock() - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin pessimistic") if err != nil { return errors.Trace(err) } defer func() { - err = finishTransaction(context.Background(), exec, err) + err = finishTransaction(ctx, exec, err) }() txn, err := h.mu.ctx.Txn(true) if err != nil { @@ -1355,7 +1428,7 @@ func (h *Handle) SaveMetaToStorage(tableID, count, modifyCount int64) (err error }() h.mu.Lock() defer h.mu.Unlock() - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin") if err != nil { @@ -1450,7 +1523,7 @@ func (h *Handle) columnCountFromStorage(reader *statsReader, tableID, colID, sta } func (h *Handle) statsMetaByTableIDFromStorage(tableID int64, snapshot uint64) (version uint64, modifyCount, count int64, err error) { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) var rows []chunk.Row if snapshot == 0 { rows, _, err = h.execRestrictedSQL(ctx, "SELECT version, modify_count, count from mysql.stats_meta where table_id = %? order by version", tableID) @@ -1477,7 +1550,7 @@ type statsReader struct { } func (sr *statsReader) read(sql string, args ...interface{}) (rows []chunk.Row, fields []*ast.ResultField, err error) { - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) if sr.snapshot > 0 { return sr.ctx.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseSessionPool, sqlexec.ExecOptionWithSnapshot(sr.snapshot)}, sql, args...) } @@ -1506,33 +1579,35 @@ func (h *Handle) releaseGlobalStatsReader(reader *statsReader) error { return h.releaseStatsReader(reader, h.mu.ctx.(sqlexec.RestrictedSQLExecutor)) } -func (h *Handle) getStatsReader(snapshot uint64, ctx sqlexec.RestrictedSQLExecutor) (reader *statsReader, err error) { +func (h *Handle) getStatsReader(snapshot uint64, exec sqlexec.RestrictedSQLExecutor) (reader *statsReader, err error) { failpoint.Inject("mockGetStatsReaderFail", func(val failpoint.Value) { if val.(bool) { failpoint.Return(nil, errors.New("gofail genStatsReader error")) } }) if snapshot > 0 { - return &statsReader{ctx: ctx, snapshot: snapshot}, nil + return &statsReader{ctx: exec, snapshot: snapshot}, nil } defer func() { if r := recover(); r != nil { err = fmt.Errorf("getStatsReader panic %v", r) } }() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) failpoint.Inject("mockGetStatsReaderPanic", nil) - _, err = ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "begin") + _, err = exec.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "begin") if err != nil { return nil, err } - return &statsReader{ctx: ctx}, nil + return &statsReader{ctx: exec}, nil } -func (h *Handle) releaseStatsReader(reader *statsReader, ctx sqlexec.RestrictedSQLExecutor) error { +func (h *Handle) releaseStatsReader(reader *statsReader, exec sqlexec.RestrictedSQLExecutor) error { if reader.snapshot > 0 { return nil } - _, err := ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), "commit") + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + _, err := exec.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "commit") return err } @@ -1553,7 +1628,7 @@ func (h *Handle) InsertExtendedStats(statsName string, colIDs []int64, tp int, t err = h.recordHistoricalStatsMeta(tableID, statsVer) } }() - sort.Slice(colIDs, func(i, j int) bool { return colIDs[i] < colIDs[j] }) + slices.Sort(colIDs) bytes, err := json.Marshal(colIDs) if err != nil { return errors.Trace(err) @@ -1561,7 +1636,7 @@ func (h *Handle) InsertExtendedStats(statsName string, colIDs []int64, tp int, t strColIDs := string(bytes) h.mu.Lock() defer h.mu.Unlock() - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin pessimistic") if err != nil { @@ -1624,7 +1699,7 @@ func (h *Handle) MarkExtendedStatsDeleted(statsName string, tableID int64, ifExi err = h.recordHistoricalStatsMeta(tableID, statsVer) } }() - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) rows, _, err := h.execRestrictedSQL(ctx, "SELECT name FROM mysql.stats_extended WHERE name = %? and table_id = %? and status in (%?, %?)", statsName, tableID, StatsStatusInited, StatsStatusAnalyzed) if err != nil { return errors.Trace(err) @@ -1721,7 +1796,7 @@ func (h *Handle) ReloadExtendedStatistics() error { // BuildExtendedStats build extended stats for column groups if needed based on the column samples. func (h *Handle) BuildExtendedStats(tableID int64, cols []*model.ColumnInfo, collectors []*statistics.SampleCollector) (*statistics.ExtendedStatsColl, error) { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) const sql = "SELECT name, type, column_ids FROM mysql.stats_extended WHERE table_id = %? and status in (%?, %?)" rows, _, err := h.execRestrictedSQL(ctx, sql, tableID, StatsStatusAnalyzed, StatsStatusInited) if err != nil { @@ -1842,7 +1917,7 @@ func (h *Handle) SaveExtendedStatsToStorage(tableID int64, extStats *statistics. } h.mu.Lock() defer h.mu.Unlock() - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin pessimistic") if err != nil { @@ -1918,7 +1993,8 @@ type colStatsTimeInfo struct { // getDisableColumnTrackingTime reads the value of tidb_disable_column_tracking_time from mysql.tidb if it exists. func (h *Handle) getDisableColumnTrackingTime() (*time.Time, error) { - rows, fields, err := h.execRestrictedSQL(context.Background(), "SELECT variable_value FROM %n.%n WHERE variable_name = %?", mysql.SystemDB, mysql.TiDBTable, variable.TiDBDisableColumnTrackingTime) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + rows, fields, err := h.execRestrictedSQL(ctx, "SELECT variable_value FROM %n.%n WHERE variable_name = %?", mysql.SystemDB, mysql.TiDBTable, variable.TiDBDisableColumnTrackingTime) if err != nil { return nil, err } @@ -1944,8 +2020,9 @@ func (h *Handle) LoadColumnStatsUsage(loc *time.Location) (map[model.TableColumn if err != nil { return nil, errors.Trace(err) } + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) // Since we use another session from session pool to read mysql.column_stats_usage, which may have different @@time_zone, so we do time zone conversion here. - rows, _, err := h.execRestrictedSQL(context.Background(), "SELECT table_id, column_id, CONVERT_TZ(last_used_at, @@TIME_ZONE, '+00:00'), CONVERT_TZ(last_analyzed_at, @@TIME_ZONE, '+00:00') FROM mysql.column_stats_usage") + rows, _, err := h.execRestrictedSQL(ctx, "SELECT table_id, column_id, CONVERT_TZ(last_used_at, @@TIME_ZONE, '+00:00'), CONVERT_TZ(last_analyzed_at, @@TIME_ZONE, '+00:00') FROM mysql.column_stats_usage") if err != nil { return nil, errors.Trace(err) } @@ -1983,7 +2060,7 @@ func (h *Handle) LoadColumnStatsUsage(loc *time.Location) (map[model.TableColumn // CollectColumnsInExtendedStats returns IDs of the columns involved in extended stats. func (h *Handle) CollectColumnsInExtendedStats(tableID int64) ([]int64, error) { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) const sql = "SELECT name, type, column_ids FROM mysql.stats_extended WHERE table_id = %? and status in (%?, %?)" rows, _, err := h.execRestrictedSQL(ctx, sql, tableID, StatsStatusAnalyzed, StatsStatusInited) if err != nil { @@ -2012,7 +2089,8 @@ func (h *Handle) GetPredicateColumns(tableID int64) ([]int64, error) { if err != nil { return nil, errors.Trace(err) } - rows, _, err := h.execRestrictedSQL(context.Background(), "SELECT column_id, CONVERT_TZ(last_used_at, @@TIME_ZONE, '+00:00') FROM mysql.column_stats_usage WHERE table_id = %? AND last_used_at IS NOT NULL", tableID) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + rows, _, err := h.execRestrictedSQL(ctx, "SELECT column_id, CONVERT_TZ(last_used_at, @@TIME_ZONE, '+00:00') FROM mysql.column_stats_usage WHERE table_id = %? AND last_used_at IS NOT NULL", tableID) if err != nil { return nil, errors.Trace(err) } @@ -2040,7 +2118,7 @@ const maxColumnSize = 6 << 20 // RecordHistoricalStatsToStorage records the given table's stats data to mysql.stats_history func (h *Handle) RecordHistoricalStatsToStorage(dbName string, tableInfo *model.TableInfo) (uint64, error) { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) js, err := h.DumpStatsToJSON(dbName, tableInfo, nil) if err != nil { return 0, errors.Trace(err) @@ -2100,7 +2178,7 @@ func (h *Handle) recordHistoricalStatsMeta(tableID int64, version uint64) error return nil } - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) h.mu.Lock() defer h.mu.Unlock() rows, _, err := h.execRestrictedSQL(ctx, "select modify_count, count from mysql.stats_meta where table_id = %? and version = %?", tableID, version) @@ -2133,7 +2211,7 @@ func (h *Handle) InsertAnalyzeJob(job *statistics.AnalyzeJob, instance string, p h.mu.Lock() defer h.mu.Unlock() exec := h.mu.ctx.(sqlexec.RestrictedSQLExecutor) - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) jobInfo := job.JobInfo const textMaxLength = 65535 if len(jobInfo) > textMaxLength { @@ -2156,7 +2234,8 @@ func (h *Handle) InsertAnalyzeJob(job *statistics.AnalyzeJob, instance string, p // DeleteAnalyzeJobs deletes the analyze jobs whose update time is earlier than updateTime. func (h *Handle) DeleteAnalyzeJobs(updateTime time.Time) error { - _, _, err := h.execRestrictedSQL(context.TODO(), "DELETE FROM mysql.analyze_jobs WHERE update_time < CONVERT_TZ(%?, '+00:00', @@TIME_ZONE)", updateTime.UTC().Format(types.TimeFormat)) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) + _, _, err := h.execRestrictedSQL(ctx, "DELETE FROM mysql.analyze_jobs WHERE update_time < CONVERT_TZ(%?, '+00:00', @@TIME_ZONE)", updateTime.UTC().Format(types.TimeFormat)) return err } diff --git a/statistics/handle/handle_hist.go b/statistics/handle/handle_hist.go index 2dcccadce1f76..36af151ce19db 100644 --- a/statistics/handle/handle_hist.go +++ b/statistics/handle/handle_hist.go @@ -274,15 +274,15 @@ func (h *Handle) readStatsForOne(col model.TableColumnID, c *statistics.Column, logutil.BgLogger().Error("fail to get stats version for this histogram", zap.Int64("table_id", col.TableID), zap.Int64("hist_id", col.ColumnID)) } colHist := &statistics.Column{ - PhysicalID: col.TableID, - Histogram: *hg, - Info: c.Info, - CMSketch: cms, - TopN: topN, - FMSketch: fms, - IsHandle: c.IsHandle, - StatsVer: rows[0].GetInt64(0), - ColLoadedStatus: statistics.NewColFullLoadStatus(), + PhysicalID: col.TableID, + Histogram: *hg, + Info: c.Info, + CMSketch: cms, + TopN: topN, + FMSketch: fms, + IsHandle: c.IsHandle, + StatsVer: rows[0].GetInt64(0), + StatsLoadedStatus: statistics.NewStatsFullLoadStatus(), } // Column.Count is calculated by Column.TotalRowCount(). Hence, we don't set Column.Count when initializing colHist. colHist.Count = int64(colHist.TotalRowCount()) diff --git a/statistics/handle/handle_test.go b/statistics/handle/handle_test.go index b18d14236bd36..fa5e26d589bcc 100644 --- a/statistics/handle/handle_test.go +++ b/statistics/handle/handle_test.go @@ -613,6 +613,25 @@ func TestLoadStats(t *testing.T) { stat = h.GetTableStats(tableInfo) hg = stat.Columns[tableInfo.Columns[2].ID].Histogram require.Greater(t, hg.Len(), 0) + + // assert index LoadNeededHistograms + idx := stat.Indices[tableInfo.Indices[0].ID] + idx.EvictAllStats() + hg = idx.Histogram + cms = idx.CMSketch + topN = idx.TopN + require.Equal(t, float64(cms.TotalCount()+topN.TotalCount())+hg.TotalRowCount(), float64(0)) + require.False(t, idx.IsEssentialStatsLoaded()) + idx.IsInvalid(false) + require.NoError(t, h.LoadNeededHistograms()) + stat = h.GetTableStats(tableInfo) + idx = stat.Indices[tableInfo.Indices[0].ID] + hg = idx.Histogram + cms = idx.CMSketch + topN = idx.TopN + require.Greater(t, float64(cms.TotalCount()+topN.TotalCount())+hg.TotalRowCount(), float64(0)) + require.True(t, idx.IsFullLoad()) + // Following test tests whether the LoadNeededHistograms would panic. require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/statistics/handle/mockGetStatsReaderFail", `return(true)`)) err = h.LoadNeededHistograms() @@ -2554,18 +2573,6 @@ func TestHideIndexUsageSyncLease(t *testing.T) { } } -func TestHideExtendedStatsSwitch(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - // NOTICE: remove this test when this extended-stats reaches GA state. - tk := testkit.NewTestKit(t, store) - rs := tk.MustQuery("show variables").Rows() - for _, r := range rs { - require.NotEqual(t, "tidb_enable_extended_stats", strings.ToLower(r[0].(string))) - } - tk.MustQuery("show variables like 'tidb_enable_extended_stats'").Check(testkit.Rows()) -} - func TestRepetitiveAddDropExtendedStats(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() diff --git a/statistics/handle/lru_cache_test.go b/statistics/handle/lru_cache_test.go index ffb39c593cd2a..d717b95643c23 100644 --- a/statistics/handle/lru_cache_test.go +++ b/statistics/handle/lru_cache_test.go @@ -36,9 +36,9 @@ func newMockStatisticsTable(columns int, indices int) *statistics.Table { t.Indices = make(map[int64]*statistics.Index) for i := 1; i <= columns; i++ { t.Columns[int64(i)] = &statistics.Column{ - Info: &model.ColumnInfo{ID: int64(i)}, - CMSketch: statistics.NewCMSketch(1, 1), - ColLoadedStatus: statistics.NewColFullLoadStatus(), + Info: &model.ColumnInfo{ID: int64(i)}, + CMSketch: statistics.NewCMSketch(1, 1), + StatsLoadedStatus: statistics.NewStatsFullLoadStatus(), } } for i := 1; i <= indices; i++ { diff --git a/statistics/handle/update.go b/statistics/handle/update.go index 29976a0e24164..d3145e623fa58 100644 --- a/statistics/handle/update.go +++ b/statistics/handle/update.go @@ -20,7 +20,6 @@ import ( "fmt" "math" "math/rand" - "sort" "strconv" "strings" "sync" @@ -49,6 +48,7 @@ import ( "github.com/tikv/client-go/v2/oracle" "go.uber.org/atomic" "go.uber.org/zap" + "golang.org/x/exp/slices" ) type tableDeltaMap map[int64]variable.TableDelta @@ -345,7 +345,7 @@ const batchInsertSize = 10 // DumpIndexUsageToKV will dump in-memory index usage information to KV. func (h *Handle) DumpIndexUsageToKV() error { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) mapper := h.sweepIdxUsageList() type FullIndexUsageInformation struct { id GlobalIndexID @@ -519,14 +519,14 @@ func (h *Handle) dumpTableStatCountToKV(id int64, delta variable.TableDelta) (up } h.mu.Lock() defer h.mu.Unlock() - ctx := context.TODO() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin") if err != nil { return false, errors.Trace(err) } defer func() { - err = finishTransaction(context.Background(), exec, err) + err = finishTransaction(ctx, exec, err) }() txn, err := h.mu.ctx.Txn(true) @@ -579,9 +579,10 @@ func (h *Handle) dumpTableStatColSizeToKV(id int64, delta variable.TableDelta) e if len(values) == 0 { return nil } + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := fmt.Sprintf("insert into mysql.stats_histograms (table_id, is_index, hist_id, distinct_count, tot_col_size) "+ "values %s on duplicate key update tot_col_size = tot_col_size + values(tot_col_size)", strings.Join(values, ",")) - _, _, err := h.execRestrictedSQL(context.Background(), sql) + _, _, err := h.execRestrictedSQL(ctx, sql) return errors.Trace(err) } @@ -631,9 +632,10 @@ func (h *Handle) DumpFeedbackToKV(fb *statistics.QueryFeedback) error { if fb.Tp == statistics.IndexType { isIndex = 1 } + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) const sql = "insert into mysql.stats_feedback (table_id, hist_id, is_index, feedback) values (%?, %?, %?, %?)" h.mu.Lock() - _, err = h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql, fb.PhysicalID, fb.Hist.ID, isIndex, vals) + _, err = h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql, fb.PhysicalID, fb.Hist.ID, isIndex, vals) h.mu.Unlock() if err != nil { metrics.DumpFeedbackCounter.WithLabelValues(metrics.LblError).Inc() @@ -751,7 +753,7 @@ func (h *Handle) UpdateErrorRate(is infoschema.InfoSchema) { // HandleUpdateStats update the stats using feedback. func (h *Handle) HandleUpdateStats(is infoschema.InfoSchema) error { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) tables, _, err := h.execRestrictedSQL(ctx, "SELECT distinct table_id from mysql.stats_feedback") if err != nil { return errors.Trace(err) @@ -866,9 +868,10 @@ func (h *Handle) deleteOutdatedFeedback(tableID, histID, isIndex int64) error { h.mu.Lock() defer h.mu.Unlock() hasData := true + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) for hasData { sql := "delete from mysql.stats_feedback where table_id = %? and hist_id = %? and is_index = %? limit 10000" - _, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(context.TODO(), sql, tableID, histID, isIndex) + _, err := h.mu.ctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql, tableID, histID, isIndex) if err != nil { return errors.Trace(err) } @@ -908,11 +911,11 @@ func (h *Handle) DumpColStatsUsageToKV() error { for id, t := range colMap { pairs = append(pairs, pair{tblColID: id, lastUsedAt: t.UTC().Format(types.TimeFormat)}) } - sort.Slice(pairs, func(i, j int) bool { - if pairs[i].tblColID.TableID == pairs[j].tblColID.TableID { - return pairs[i].tblColID.ColumnID < pairs[j].tblColID.ColumnID + slices.SortFunc(pairs, func(i, j pair) bool { + if i.tblColID.TableID == j.tblColID.TableID { + return i.tblColID.ColumnID < j.tblColID.ColumnID } - return pairs[i].tblColID.TableID < pairs[j].tblColID.TableID + return i.tblColID.TableID < j.tblColID.TableID }) // Use batch insert to reduce cost. for i := 0; i < len(pairs); i += batchInsertSize { @@ -995,7 +998,7 @@ func NeedAnalyzeTable(tbl *statistics.Table, limit time.Duration, autoAnalyzeRat } func (h *Handle) getAutoAnalyzeParameters() map[string]string { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) sql := "select variable_name, variable_value from mysql.global_variables where variable_name in (%?, %?, %?)" rows, _, err := h.execRestrictedSQL(ctx, sql, variable.TiDBAutoAnalyzeRatio, variable.TiDBAutoAnalyzeStartTime, variable.TiDBAutoAnalyzeEndTime) if err != nil { diff --git a/statistics/histogram.go b/statistics/histogram.go index 94377998c3421..273f46614c243 100644 --- a/statistics/histogram.go +++ b/statistics/histogram.go @@ -45,6 +45,7 @@ import ( "github.com/pingcap/tipb/go-tipb" "github.com/twmb/murmur3" "go.uber.org/zap" + "golang.org/x/exp/slices" ) // Histogram represents statistics for a column or index. @@ -392,8 +393,8 @@ func (hg *Histogram) RemoveVals(valCntPairs []TopNMeta) { // AddIdxVals adds the given values to the histogram. func (hg *Histogram) AddIdxVals(idxValCntPairs []TopNMeta) { totalAddCnt := int64(0) - sort.Slice(idxValCntPairs, func(i, j int) bool { - return bytes.Compare(idxValCntPairs[i].Encoded, idxValCntPairs[j].Encoded) < 0 + slices.SortFunc(idxValCntPairs, func(i, j TopNMeta) bool { + return bytes.Compare(i.Encoded, j.Encoded) < 0 }) for bktIdx, pairIdx := 0, 0; bktIdx < hg.Len(); bktIdx++ { for pairIdx < len(idxValCntPairs) { @@ -1057,8 +1058,8 @@ type Column struct { LastAnalyzePos types.Datum StatsVer int64 // StatsVer is the version of the current stats, used to maintain compatibility - // ColLoadedStatus indicates the status of column statistics - ColLoadedStatus + // StatsLoadedStatus indicates the status of column statistics + StatsLoadedStatus } func (c *Column) String() string { @@ -1114,9 +1115,9 @@ func (c *Column) MemoryUsage() CacheItemMemoryUsage { return columnMemUsage } -// HistogramNeededColumns stores the columns whose Histograms need to be loaded from physical kv layer. +// HistogramNeededItems stores the columns/indices whose Histograms need to be loaded from physical kv layer. // Currently, we only load index/pk's Histogram from kv automatically. Columns' are loaded by needs. -var HistogramNeededColumns = neededColumnMap{cols: map[tableColumnID]struct{}{}} +var HistogramNeededItems = neededStatsMap{items: map[model.TableItemID]struct{}{}} // IsInvalid checks if this column is invalid. If this column has histogram but not loaded yet, then we mark it // as need histogram. @@ -1136,7 +1137,7 @@ func (c *Column) IsInvalid(sctx sessionctx.Context, collPseudo bool) bool { } // In some tests, the c.Info is not set, so we add this check here. if c.Info != nil { - HistogramNeededColumns.insert(tableColumnID{TableID: c.PhysicalID, ColumnID: c.Info.ID}) + HistogramNeededItems.insert(model.TableItemID{TableID: c.PhysicalID, ID: c.Info.ID, IsIndex: false}) } } } @@ -1332,6 +1333,8 @@ type Index struct { Info *model.IndexInfo Flag int64 LastAnalyzePos types.Datum + PhysicalID int64 + StatsLoadedStatus } // ItemID implements TableCacheItem @@ -1361,6 +1364,7 @@ func (idx *Index) String() string { // TotalRowCount returns the total count of this index. func (idx *Index) TotalRowCount() float64 { + idx.checkStats() if idx.StatsVer >= Version2 { return idx.Histogram.TotalRowCount() + float64(idx.TopN.TotalCount()) } @@ -1369,9 +1373,21 @@ func (idx *Index) TotalRowCount() float64 { // IsInvalid checks if this index is invalid. func (idx *Index) IsInvalid(collPseudo bool) bool { + if !collPseudo { + idx.checkStats() + } return (collPseudo && idx.NotAccurate()) || idx.TotalRowCount() == 0 } +// EvictAllStats evicts all stats +// Note that this function is only used for test +func (idx *Index) EvictAllStats() { + idx.Buckets = nil + idx.CMSketch = nil + idx.TopN = nil + idx.StatsLoadedStatus.evictedStatus = allEvicted +} + // MemoryUsage returns the total memory usage of a Histogram and CMSketch in Index. // We ignore the size of other metadata in Index. func (idx *Index) MemoryUsage() CacheItemMemoryUsage { @@ -1431,6 +1447,7 @@ func (idx *Index) equalRowCount(b []byte, realtimeRowCount int64) float64 { // QueryBytes is used to query the count of specified bytes. func (idx *Index) QueryBytes(d []byte) uint64 { + idx.checkStats() h1, h2 := murmur3.Sum128(d) if count, ok := idx.TopN.QueryTopN(d); ok { return count @@ -1441,6 +1458,7 @@ func (idx *Index) QueryBytes(d []byte) uint64 { // GetRowCount returns the row count of the given ranges. // It uses the modifyCount to adjust the influence of modifications on the table. func (idx *Index) GetRowCount(sctx sessionctx.Context, coll *HistColl, indexRanges []*ranger.Range, realtimeRowCount int64) (float64, error) { + idx.checkStats() sc := sctx.GetSessionVars().StmtCtx totalCount := float64(0) isSingleCol := len(idx.Info.Columns) == 1 @@ -1585,9 +1603,7 @@ func (idx *Index) expBackoffEstimation(sctx sessionctx.Context, coll *HistColl, singleColumnEstResults = append(singleColumnEstResults, count) } // Sort them. - sort.Slice(singleColumnEstResults, func(i, j int) bool { - return singleColumnEstResults[i] < singleColumnEstResults[j] - }) + slices.Sort(singleColumnEstResults) l := len(singleColumnEstResults) // Convert the first 4 to selectivity results. for i := 0; i < l && i < 4; i++ { @@ -1609,6 +1625,13 @@ func (idx *Index) expBackoffEstimation(sctx sessionctx.Context, coll *HistColl, return singleColumnEstResults[0] * math.Sqrt(singleColumnEstResults[1]) * math.Sqrt(math.Sqrt(singleColumnEstResults[2])) * math.Sqrt(math.Sqrt(math.Sqrt(singleColumnEstResults[3]))), true, nil } +func (idx *Index) checkStats() { + if idx.IsFullLoad() { + return + } + HistogramNeededItems.insert(model.TableItemID{TableID: idx.PhysicalID, ID: idx.Info.ID, IsIndex: true}) +} + type countByRangeFunc = func(sessionctx.Context, int64, []*ranger.Range) (float64, error) // newHistogramBySelectivity fulfills the content of new histogram by the given selectivity result. @@ -1659,7 +1682,7 @@ func (idx *Index) newIndexBySelectivity(sc *stmtctx.StatementContext, statsNode ranLowEncode, ranHighEncode []byte err error ) - newIndexHist := &Index{Info: idx.Info, StatsVer: idx.StatsVer, CMSketch: idx.CMSketch} + newIndexHist := &Index{Info: idx.Info, StatsVer: idx.StatsVer, CMSketch: idx.CMSketch, PhysicalID: idx.PhysicalID} newIndexHist.Histogram = *NewHistogram(idx.ID, int64(float64(idx.NDV)*statsNode.Selectivity), 0, 0, types.NewFieldType(mysql.TypeBlob), chunk.InitialCapacity, 0) lowBucketIdx, highBucketIdx := 0, 0 @@ -1766,7 +1789,7 @@ func (coll *HistColl) NewHistCollBySelectivity(sctx sessionctx.Context, statsNod zap.Error(err)) continue } - newCol.ColLoadedStatus = oldCol.ColLoadedStatus + newCol.StatsLoadedStatus = oldCol.StatsLoadedStatus newColl.Columns[node.ID] = newCol } for id, idx := range coll.Indices { @@ -2166,15 +2189,15 @@ func MergePartitionHist2GlobalHist(sc *stmtctx.StatementContext, hists []*Histog buckets = buckets[:tail] var sortError error - sort.Slice(buckets, func(i, j int) bool { - res, err := buckets[i].upper.Compare(sc, buckets[j].upper, collate.GetBinaryCollator()) + slices.SortFunc(buckets, func(i, j *bucket4Merging) bool { + res, err := i.upper.Compare(sc, j.upper, collate.GetBinaryCollator()) if err != nil { sortError = err } if res != 0 { return res < 0 } - res, err = buckets[i].lower.Compare(sc, buckets[j].lower, collate.GetBinaryCollator()) + res, err = i.lower.Compare(sc, j.lower, collate.GetBinaryCollator()) if err != nil { sortError = err } @@ -2282,29 +2305,29 @@ const ( allEvicted ) -// ColLoadedStatus indicates the status of column statistics -type ColLoadedStatus struct { +// StatsLoadedStatus indicates the status of statistics +type StatsLoadedStatus struct { statsInitialized bool evictedStatus int } -// NewColFullLoadStatus returns the status that the column fully loaded -func NewColFullLoadStatus() ColLoadedStatus { - return ColLoadedStatus{ +// NewStatsFullLoadStatus returns the status that the column/index fully loaded +func NewStatsFullLoadStatus() StatsLoadedStatus { + return StatsLoadedStatus{ statsInitialized: true, evictedStatus: allLoaded, } } -// IsStatsInitialized indicates whether the column's statistics was loaded from storage before. +// IsStatsInitialized indicates whether the column/index's statistics was loaded from storage before. // Note that `IsStatsInitialized` only can be set in initializing -func (s ColLoadedStatus) IsStatsInitialized() bool { +func (s StatsLoadedStatus) IsStatsInitialized() bool { return s.statsInitialized } // IsLoadNeeded indicates whether it needs load statistics during LoadNeededHistograms or sync stats -// If the column was loaded and any statistics of it is evicting, it also needs re-load statistics. -func (s ColLoadedStatus) IsLoadNeeded() bool { +// If the column/index was loaded and any statistics of it is evicting, it also needs re-load statistics. +func (s StatsLoadedStatus) IsLoadNeeded() bool { if s.statsInitialized { return s.evictedStatus > allLoaded } @@ -2312,12 +2335,17 @@ func (s ColLoadedStatus) IsLoadNeeded() bool { } // IsEssentialStatsLoaded indicates whether the essential statistics is loaded. -// If the column was loaded, and at least histogram and topN still exists, the necessary statistics is still loaded. -func (s ColLoadedStatus) IsEssentialStatsLoaded() bool { +// If the column/index was loaded, and at least histogram and topN still exists, the necessary statistics is still loaded. +func (s StatsLoadedStatus) IsEssentialStatsLoaded() bool { return s.statsInitialized && (s.evictedStatus < allEvicted) } // IsCMSEvicted indicates whether the cms got evicted now. -func (s ColLoadedStatus) IsCMSEvicted() bool { +func (s StatsLoadedStatus) IsCMSEvicted() bool { return s.statsInitialized && s.evictedStatus >= onlyCmsEvicted } + +// IsFullLoad indicates whether the stats are full loaded +func (s StatsLoadedStatus) IsFullLoad() bool { + return s.statsInitialized && s.evictedStatus == allLoaded +} diff --git a/statistics/histogram_test.go b/statistics/histogram_test.go index 238cb2f1fbbc8..78c68d3a16dab 100644 --- a/statistics/histogram_test.go +++ b/statistics/histogram_test.go @@ -40,7 +40,7 @@ func TestNewHistogramBySelectivity(t *testing.T) { intCol := &Column{} intCol.Histogram = *NewHistogram(1, 30, 30, 0, types.NewFieldType(mysql.TypeLonglong), chunk.InitialCapacity, 0) intCol.IsHandle = true - intCol.ColLoadedStatus = NewColFullLoadStatus() + intCol.StatsLoadedStatus = NewStatsFullLoadStatus() for i := 0; i < 10; i++ { intCol.Bounds.AppendInt64(0, int64(i*3)) intCol.Bounds.AppendInt64(0, int64(i*3+2)) @@ -62,7 +62,7 @@ num: 1 lower_bound: 12 upper_bound: 14 repeats: 0 ndv: 0 num: 30 lower_bound: 27 upper_bound: 29 repeats: 0 ndv: 0` stringCol := &Column{} - stringCol.ColLoadedStatus = NewColFullLoadStatus() + stringCol.StatsLoadedStatus = NewStatsFullLoadStatus() stringCol.Histogram = *NewHistogram(2, 15, 30, 0, types.NewFieldType(mysql.TypeString), chunk.InitialCapacity, 0) stringCol.Bounds.AppendString(0, "a") stringCol.Bounds.AppendString(0, "aaaabbbb") diff --git a/statistics/selectivity.go b/statistics/selectivity.go index 74e513ab2f696..464b35875d3de 100644 --- a/statistics/selectivity.go +++ b/statistics/selectivity.go @@ -18,7 +18,6 @@ import ( "bytes" "math" "math/bits" - "sort" "github.com/pingcap/errors" "github.com/pingcap/tidb/expression" @@ -34,6 +33,7 @@ import ( "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/tracing" "go.uber.org/zap" + "golang.org/x/exp/slices" ) // If one condition can't be calculated, we will assume that the selectivity of this condition is 0.8. @@ -469,11 +469,11 @@ func getMaskAndRanges(ctx sessionctx.Context, exprs []expression.Expression, ran // GetUsableSetsByGreedy will select the indices and pk used for calculate selectivity by greedy algorithm. func GetUsableSetsByGreedy(nodes []*StatsNode) (newBlocks []*StatsNode) { - sort.Slice(nodes, func(i int, j int) bool { - if r := compareType(nodes[i].Tp, nodes[j].Tp); r != 0 { + slices.SortFunc(nodes, func(i, j *StatsNode) bool { + if r := compareType(i.Tp, j.Tp); r != 0 { return r < 0 } - return nodes[i].ID < nodes[j].ID + return i.ID < j.ID }) marked := make([]bool, len(nodes)) mask := int64(math.MaxInt64) diff --git a/statistics/selectivity_test.go b/statistics/selectivity_test.go index 760eb2f96d5ea..675263bc241be 100644 --- a/statistics/selectivity_test.go +++ b/statistics/selectivity_test.go @@ -850,9 +850,9 @@ func prepareSelectivity(testKit *testkit.TestKit, dom *domain.Domain) (*statisti } for i := 1; i <= 5; i++ { statsTbl.Columns[int64(i)] = &statistics.Column{ - Histogram: *mockStatsHistogram(int64(i), colValues, 10, types.NewFieldType(mysql.TypeLonglong)), - Info: tbl.Columns[i-1], - ColLoadedStatus: statistics.NewColFullLoadStatus(), + Histogram: *mockStatsHistogram(int64(i), colValues, 10, types.NewFieldType(mysql.TypeLonglong)), + Info: tbl.Columns[i-1], + StatsLoadedStatus: statistics.NewStatsFullLoadStatus(), } } diff --git a/statistics/statistics_test.go b/statistics/statistics_test.go index 4ecabdfdd462d..bb35b094f3f25 100644 --- a/statistics/statistics_test.go +++ b/statistics/statistics_test.go @@ -251,10 +251,10 @@ func SubTestColumnRange() func(*testing.T) { hg.PreCalculateScalar() require.NoError(t, err) col := &Column{ - Histogram: *hg, - CMSketch: buildCMSketch(s.rc.(*recordSet).data), - Info: &model.ColumnInfo{}, - ColLoadedStatus: NewColFullLoadStatus(), + Histogram: *hg, + CMSketch: buildCMSketch(s.rc.(*recordSet).data), + Info: &model.ColumnInfo{}, + StatsLoadedStatus: NewStatsFullLoadStatus(), } tbl := &Table{ HistColl: HistColl{ @@ -327,7 +327,7 @@ func SubTestIntColumnRanges() func(*testing.T) { hg.PreCalculateScalar() require.NoError(t, err) require.Equal(t, int64(100000), rowCount) - col := &Column{Histogram: *hg, Info: &model.ColumnInfo{}, ColLoadedStatus: NewColFullLoadStatus()} + col := &Column{Histogram: *hg, Info: &model.ColumnInfo{}, StatsLoadedStatus: NewStatsFullLoadStatus()} tbl := &Table{ HistColl: HistColl{ Count: int64(col.TotalRowCount()), diff --git a/statistics/table.go b/statistics/table.go index 44a35bf7169b4..1adcdb01dcb96 100644 --- a/statistics/table.go +++ b/statistics/table.go @@ -17,7 +17,6 @@ package statistics import ( "fmt" "math" - "sort" "strings" "sync" @@ -39,6 +38,7 @@ import ( "github.com/pingcap/tidb/util/tracing" "go.uber.org/atomic" "go.uber.org/zap" + "golang.org/x/exp/slices" ) const ( @@ -272,7 +272,7 @@ func (t *Table) String() string { for _, col := range t.Columns { cols = append(cols, col) } - sort.Slice(cols, func(i, j int) bool { return cols[i].ID < cols[j].ID }) + slices.SortFunc(cols, func(i, j *Column) bool { return i.ID < j.ID }) for _, col := range cols { strs = append(strs, col.String()) } @@ -280,7 +280,7 @@ func (t *Table) String() string { for _, idx := range t.Indices { idxs = append(idxs, idx) } - sort.Slice(idxs, func(i, j int) bool { return idxs[i].ID < idxs[j].ID }) + slices.SortFunc(idxs, func(i, j *Index) bool { return i.ID < j.ID }) for _, idx := range idxs { strs = append(strs, idx.String()) } @@ -351,42 +351,37 @@ func (t *Table) GetStatsHealthy() (int64, bool) { return healthy, true } -type tableColumnID struct { - TableID int64 - ColumnID int64 +type neededStatsMap struct { + m sync.RWMutex + items map[model.TableItemID]struct{} } -type neededColumnMap struct { - m sync.RWMutex - cols map[tableColumnID]struct{} -} - -func (n *neededColumnMap) AllCols() []tableColumnID { +func (n *neededStatsMap) AllItems() []model.TableItemID { n.m.RLock() - keys := make([]tableColumnID, 0, len(n.cols)) - for key := range n.cols { + keys := make([]model.TableItemID, 0, len(n.items)) + for key := range n.items { keys = append(keys, key) } n.m.RUnlock() return keys } -func (n *neededColumnMap) insert(col tableColumnID) { +func (n *neededStatsMap) insert(col model.TableItemID) { n.m.Lock() - n.cols[col] = struct{}{} + n.items[col] = struct{}{} n.m.Unlock() } -func (n *neededColumnMap) Delete(col tableColumnID) { +func (n *neededStatsMap) Delete(col model.TableItemID) { n.m.Lock() - delete(n.cols, col) + delete(n.items, col) n.m.Unlock() } -func (n *neededColumnMap) Length() int { +func (n *neededStatsMap) Length() int { n.m.RLock() defer n.m.RUnlock() - return len(n.cols) + return len(n.items) } // RatioOfPseudoEstimate means if modifyCount / statsTblCount is greater than this ratio, we think the stats is invalid @@ -898,8 +893,9 @@ func PseudoTable(tblInfo *model.TableInfo) *Table { for _, idx := range tblInfo.Indices { if idx.State == model.StatePublic { t.Indices[idx.ID] = &Index{ - Info: idx, - Histogram: *NewHistogram(idx.ID, 0, 0, 0, types.NewFieldType(mysql.TypeBlob), 0, 0)} + PhysicalID: fakePhysicalID, + Info: idx, + Histogram: *NewHistogram(idx.ID, 0, 0, 0, types.NewFieldType(mysql.TypeBlob), 0, 0)} } } return t diff --git a/store/copr/BUILD.bazel b/store/copr/BUILD.bazel index 575fef1d168de..f6cde5564e9f6 100644 --- a/store/copr/BUILD.bazel +++ b/store/copr/BUILD.bazel @@ -50,6 +50,7 @@ go_library( "@com_github_tikv_client_go_v2//util", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//status", + "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", ], ) diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index 32f38a72f7576..859011771106c 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -20,7 +20,6 @@ import ( "fmt" "io" "math" - "sort" "strconv" "sync" "sync/atomic" @@ -40,6 +39,7 @@ import ( "github.com/tikv/client-go/v2/tikv" "github.com/tikv/client-go/v2/tikvrpc" "go.uber.org/zap" + "golang.org/x/exp/slices" ) // batchCopTask comprises of multiple copTask that will send to same store. @@ -208,13 +208,13 @@ func balanceBatchCopTaskWithContinuity(storeTaskMap map[uint64]*batchCopTask, ca storeTasks := deepCopyStoreTaskMap(storeTaskMap) // Sort regions by their key ranges. - sort.Slice(candidateRegionInfos, func(i, j int) bool { + slices.SortFunc(candidateRegionInfos, func(i, j RegionInfo) bool { // Special case: Sort empty ranges to the end. - if candidateRegionInfos[i].Ranges.Len() < 1 || candidateRegionInfos[j].Ranges.Len() < 1 { - return candidateRegionInfos[i].Ranges.Len() > candidateRegionInfos[j].Ranges.Len() + if i.Ranges.Len() < 1 || j.Ranges.Len() < 1 { + return i.Ranges.Len() > j.Ranges.Len() } // StartKey0 < StartKey1 - return bytes.Compare(candidateRegionInfos[i].Ranges.At(0).StartKey, candidateRegionInfos[j].Ranges.At(0).StartKey) == -1 + return bytes.Compare(i.Ranges.At(0).StartKey, j.Ranges.At(0).StartKey) == -1 }) balanceStart := time.Now() diff --git a/store/copr/coprocessor.go b/store/copr/coprocessor.go index 0b217583bb9b3..29cc437b182e6 100644 --- a/store/copr/coprocessor.go +++ b/store/copr/coprocessor.go @@ -93,6 +93,7 @@ func (c *CopClient) Send(ctx context.Context, req *kv.Request, variables interfa req.Paging = false } ctx = context.WithValue(ctx, tikv.TxnStartKey(), req.StartTs) + ctx = context.WithValue(ctx, util.RequestSourceKey, req.RequestSource) bo := backoff.NewBackofferWithVars(ctx, copBuildTaskMaxBackoff, vars) ranges := NewKeyRanges(req.KeyRanges) tasks, err := buildCopTasks(bo, c.store.GetRegionCache(), ranges, req, eventCb) @@ -165,6 +166,7 @@ type copTask struct { pagingSize uint64 partitionIndex int64 // used by balanceBatchCopTask in PartitionTableScan + requestSource util.RequestSource } func (r *copTask) String() string { @@ -212,15 +214,16 @@ func buildCopTasks(bo *Backoffer, cache *RegionCache, ranges *KeyRanges, req *kv pagingSize = paging.MinPagingSize } tasks = append(tasks, &copTask{ - region: loc.Location.Region, - bucketsVer: loc.getBucketVersion(), - ranges: loc.Ranges.Slice(i, nextI), - respChan: make(chan *copResponse, chanSize), - cmdType: cmdType, - storeType: req.StoreType, - eventCb: eventCb, - paging: req.Paging, - pagingSize: pagingSize, + region: loc.Location.Region, + bucketsVer: loc.getBucketVersion(), + ranges: loc.Ranges.Slice(i, nextI), + respChan: make(chan *copResponse, chanSize), + cmdType: cmdType, + storeType: req.StoreType, + eventCb: eventCb, + paging: req.Paging, + pagingSize: pagingSize, + requestSource: req.RequestSource, }) i = nextI } @@ -712,15 +715,15 @@ func (worker *copIteratorWorker) handleTaskOnce(bo *Backoffer, task *copTask, ch var cacheKey []byte var cacheValue *coprCacheValue - // TODO: cache paging copr // If there are many ranges, it is very likely to be a TableLookupRequest. They are not worth to cache since // computing is not the main cost. Ignore such requests directly to avoid slowly building the cache key. - if task.cmdType == tikvrpc.CmdCop && !task.paging && worker.store.coprCache != nil && worker.req.Cacheable && worker.store.coprCache.CheckRequestAdmission(len(copReq.Ranges)) { + if task.cmdType == tikvrpc.CmdCop && worker.store.coprCache != nil && worker.req.Cacheable && worker.store.coprCache.CheckRequestAdmission(len(copReq.Ranges)) { cKey, err := coprCacheBuildKey(&copReq) if err == nil { cacheKey = cKey cValue := worker.store.coprCache.Get(cKey) copReq.IsCacheEnabled = true + if cValue != nil && cValue.RegionID == task.region.GetID() && cValue.TimeStamp <= worker.req.StartTs { // Append cache version to the request to skip Coprocessor computation if possible // when request result is cached @@ -741,6 +744,7 @@ func (worker *copIteratorWorker) handleTaskOnce(bo *Backoffer, task *copTask, ch RecordTimeStat: true, RecordScanStat: true, TaskId: worker.req.TaskID, + RequestSource: task.requestSource.GetRequestSource(), }) if worker.req.ResourceGroupTagger != nil { worker.req.ResourceGroupTagger(req) @@ -779,7 +783,7 @@ func (worker *copIteratorWorker) handleTaskOnce(bo *Backoffer, task *copTask, ch metrics.TiKVCoprocessorHistogram.WithLabelValues(storeID, strconv.FormatBool(staleRead)).Observe(costTime.Seconds()) if worker.req.Paging { - return worker.handleCopPagingResult(bo, rpcCtx, &copResponse{pbResp: resp.Resp.(*coprocessor.Response)}, task, ch, costTime) + return worker.handleCopPagingResult(bo, rpcCtx, &copResponse{pbResp: resp.Resp.(*coprocessor.Response)}, cacheKey, cacheValue, task, ch, costTime) } // Handles the response for non-paging copTask. @@ -848,8 +852,8 @@ func appendScanDetail(logStr string, columnFamily string, scanInfo *kvrpcpb.Scan return logStr } -func (worker *copIteratorWorker) handleCopPagingResult(bo *Backoffer, rpcCtx *tikv.RPCContext, resp *copResponse, task *copTask, ch chan<- *copResponse, costTime time.Duration) ([]*copTask, error) { - remainedTasks, err := worker.handleCopResponse(bo, rpcCtx, resp, nil, nil, task, ch, nil, costTime) +func (worker *copIteratorWorker) handleCopPagingResult(bo *Backoffer, rpcCtx *tikv.RPCContext, resp *copResponse, cacheKey []byte, cacheValue *coprCacheValue, task *copTask, ch chan<- *copResponse, costTime time.Duration) ([]*copTask, error) { + remainedTasks, err := worker.handleCopResponse(bo, rpcCtx, resp, cacheKey, cacheValue, task, ch, nil, costTime) if err != nil || len(remainedTasks) != 0 { // If there is region error or lock error, keep the paging size and retry. for _, remainedTask := range remainedTasks { @@ -895,7 +899,9 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *tikv.R // We may meet RegionError at the first packet, but not during visiting the stream. return buildCopTasks(bo, worker.store.GetRegionCache(), task.ranges, worker.req, task.eventCb) } + var resolveLockDetail *util.ResolveLockDetail if lockErr := resp.pbResp.GetLocked(); lockErr != nil { + resolveLockDetail = worker.getLockResolverDetails() // Be care that we didn't redact the SQL statement because the log is DEBUG level. if task.eventCb != nil { task.eventCb(trxevents.WrapCopMeetLock(&trxevents.CopMeetLock{ @@ -905,11 +911,17 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *tikv.R logutil.Logger(bo.GetCtx()).Debug("coprocessor encounters lock", zap.Stringer("lock", lockErr)) } - msBeforeExpired, err1 := worker.kvclient.ResolveLocks(bo.TiKVBackoffer(), worker.req.StartTs, []*txnlock.Lock{txnlock.NewLock(lockErr)}) + resolveLocksOpts := txnlock.ResolveLocksOptions{ + CallerStartTS: worker.req.StartTs, + Locks: []*txnlock.Lock{txnlock.NewLock(lockErr)}, + Detail: resolveLockDetail, + } + resolveLocksRes, err1 := worker.kvclient.ResolveLocksWithOpts(bo.TiKVBackoffer(), resolveLocksOpts) err1 = derr.ToTiDBErr(err1) if err1 != nil { return nil, errors.Trace(err1) } + msBeforeExpired := resolveLocksRes.TTL if msBeforeExpired > 0 { if err := bo.BackoffWithMaxSleepTxnLockFast(int(msBeforeExpired), errors.New(lockErr.String())); err != nil { return nil, errors.Trace(err) @@ -934,7 +946,7 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *tikv.R zap.String("storeAddr", task.storeAddr), zap.Error(err)) if strings.Contains(err.Error(), "write conflict") { - return nil, kv.ErrWriteConflict + return nil, kv.ErrWriteConflict.FastGen("%s", otherErr) } return nil, errors.Trace(err) } @@ -944,7 +956,7 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *tikv.R } else if task.ranges != nil && task.ranges.Len() > 0 { resp.startKey = task.ranges.At(0).StartKey } - worker.handleCollectExecutionInfo(bo, rpcCtx, resp) + worker.handleCollectExecutionInfo(bo, rpcCtx, resp, resolveLockDetail) resp.respTime = costTime if resp.pbResp.IsCacheHit { if cacheValue == nil { @@ -954,21 +966,49 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *tikv.R data := make([]byte, len(cacheValue.Data)) copy(data, cacheValue.Data) resp.pbResp.Data = data + if worker.req.Paging { + var start, end []byte + if cacheValue.PageStart != nil { + start = make([]byte, len(cacheValue.PageStart)) + copy(start, cacheValue.PageStart) + } + if cacheValue.PageEnd != nil { + end = make([]byte, len(cacheValue.PageEnd)) + copy(end, cacheValue.PageEnd) + } + // When paging protocol is used, the response key range is part of the cache data. + if start != nil || end != nil { + resp.pbResp.Range = &coprocessor.KeyRange{ + Start: start, + End: end, + } + } else { + resp.pbResp.Range = nil + } + } resp.detail.CoprCacheHit = true } else { // Cache not hit or cache hit but not valid: update the cache if the response can be cached. if cacheKey != nil && resp.pbResp.CanBeCached && resp.pbResp.CacheLastVersion > 0 { - if worker.store.coprCache.CheckResponseAdmission(resp.pbResp.Data.Size(), resp.detail.TimeDetail.ProcessTime) { - data := make([]byte, len(resp.pbResp.Data)) - copy(data, resp.pbResp.Data) - - newCacheValue := coprCacheValue{ - Data: data, - TimeStamp: worker.req.StartTs, - RegionID: task.region.GetID(), - RegionDataVersion: resp.pbResp.CacheLastVersion, + if resp.detail != nil { + if worker.store.coprCache.CheckResponseAdmission(resp.pbResp.Data.Size(), resp.detail.TimeDetail.ProcessTime) { + data := make([]byte, len(resp.pbResp.Data)) + copy(data, resp.pbResp.Data) + + newCacheValue := coprCacheValue{ + Data: data, + TimeStamp: worker.req.StartTs, + RegionID: task.region.GetID(), + RegionDataVersion: resp.pbResp.CacheLastVersion, + } + // When paging protocol is used, the response key range is part of the cache data. + if r := resp.pbResp.GetRange(); r != nil { + newCacheValue.PageStart = append([]byte{}, r.GetStart()...) + newCacheValue.PageEnd = append([]byte{}, r.GetEnd()...) + } + + worker.store.coprCache.Set(cacheKey, &newCacheValue) } - worker.store.coprCache.Set(cacheKey, &newCacheValue) } } } @@ -976,7 +1016,14 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *tikv.R return nil, nil } -func (worker *copIteratorWorker) handleCollectExecutionInfo(bo *Backoffer, rpcCtx *tikv.RPCContext, resp *copResponse) { +func (worker *copIteratorWorker) getLockResolverDetails() *util.ResolveLockDetail { + if !worker.enableCollectExecutionInfo { + return nil + } + return &util.ResolveLockDetail{} +} + +func (worker *copIteratorWorker) handleCollectExecutionInfo(bo *Backoffer, rpcCtx *tikv.RPCContext, resp *copResponse, resolveLockDetail *util.ResolveLockDetail) { defer func() { worker.kvclient.Stats = nil }() @@ -1004,6 +1051,9 @@ func (worker *copIteratorWorker) handleCollectExecutionInfo(bo *Backoffer, rpcCt resp.detail.CalleeAddress = rpcCtx.Addr } sd := &util.ScanDetail{} + if resolveLockDetail != nil { + sd.ResolveLock = resolveLockDetail + } td := util.TimeDetail{} if pbDetails := resp.pbResp.ExecDetailsV2; pbDetails != nil { // Take values in `ExecDetailsV2` first. diff --git a/store/copr/coprocessor_cache.go b/store/copr/coprocessor_cache.go index 35987347fb0f2..1adef9915cb48 100644 --- a/store/copr/coprocessor_cache.go +++ b/store/copr/coprocessor_cache.go @@ -41,6 +41,10 @@ type coprCacheValue struct { TimeStamp uint64 RegionID uint64 RegionDataVersion uint64 + + // Used in coprocessor paging protocol + PageStart []byte + PageEnd []byte } func (v *coprCacheValue) String() string { @@ -54,7 +58,7 @@ func (v *coprCacheValue) String() string { const coprCacheValueSize = int(unsafe.Sizeof(coprCacheValue{})) func (v *coprCacheValue) Len() int { - return coprCacheValueSize + len(v.Key) + len(v.Data) + return coprCacheValueSize + len(v.Key) + len(v.Data) + len(v.PageStart) + len(v.PageEnd) } func newCoprCache(config *config.CoprocessorCache) (*coprCache, error) { @@ -108,6 +112,9 @@ func coprCacheBuildKey(copReq *coprocessor.Request) ([]byte, error) { } totalLength += 2 + len(r.Start) + 2 + len(r.End) } + if copReq.PagingSize > 0 { + totalLength += 1 + } key := make([]byte, totalLength) @@ -141,6 +148,11 @@ func coprCacheBuildKey(copReq *coprocessor.Request) ([]byte, error) { dest += len(r.End) } + // 1 byte when use paging protocol + if copReq.PagingSize > 0 { + key[dest] = 1 + } + return key, nil } diff --git a/store/copr/coprocessor_cache_test.go b/store/copr/coprocessor_cache_test.go index 8c68888ee61a8..91906c980d0f8 100644 --- a/store/copr/coprocessor_cache_test.go +++ b/store/copr/coprocessor_cache_test.go @@ -155,8 +155,8 @@ func TestCacheValueLen(t *testing.T) { RegionID: 0x1, RegionDataVersion: 0x3, } - // 72 = (8 byte pointer + 8 byte for length + 8 byte for cap) * 2 + 8 byte * 3 - require.Equal(t, 72, v.Len()) + // 120 = (8 byte pointer + 8 byte for length + 8 byte for cap) * 4 + 8 byte * 3 + require.Equal(t, 120, v.Len()) v = coprCacheValue{ Key: []byte("foobar"), @@ -165,7 +165,17 @@ func TestCacheValueLen(t *testing.T) { RegionID: 0x1, RegionDataVersion: 0x3, } - require.Equal(t, 72+len(v.Key)+len(v.Data), v.Len()) + require.Equal(t, 120+len(v.Key)+len(v.Data), v.Len()) + + v = coprCacheValue{ + Key: []byte("foobar"), + Data: []byte("12345678"), + TimeStamp: 0x123, + RegionID: 0x1, + RegionDataVersion: 0x3, + PageEnd: []byte("3235"), + } + require.Equal(t, 120+len(v.Key)+len(v.Data)+len(v.PageEnd), v.Len()) } func TestGetSet(t *testing.T) { diff --git a/store/driver/error/error.go b/store/driver/error/error.go index 6b7b444239d9c..1d9543cc1437d 100644 --- a/store/driver/error/error.go +++ b/store/driver/error/error.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package error //nolint:predeclared +package error //nolint: predeclared import ( stderrs "errors" diff --git a/store/driver/error/error_test.go b/store/driver/error/error_test.go index a8c2c6ddc9152..dde341e8da4f1 100644 --- a/store/driver/error/error_test.go +++ b/store/driver/error/error_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package error //nolint:predeclared +package error //nolint: predeclared import ( "testing" diff --git a/store/driver/txn/snapshot.go b/store/driver/txn/snapshot.go index 7ecfae6a57e81..9b39e635b1d71 100644 --- a/store/driver/txn/snapshot.go +++ b/store/driver/txn/snapshot.go @@ -123,6 +123,10 @@ func (s *tikvSnapshot) SetOption(opt int, val interface{}) { s.interceptor = val.(kv.SnapshotInterceptor) case kv.RPCInterceptor: s.KVSnapshot.SetRPCInterceptor(val.(interceptor.RPCInterceptor)) + case kv.RequestSourceInternal: + s.KVSnapshot.SetRequestSourceInternal(val.(bool)) + case kv.RequestSourceType: + s.KVSnapshot.SetRequestSourceType(val.(string)) } } diff --git a/store/driver/txn/txn_driver.go b/store/driver/txn/txn_driver.go index 4cdd45a6f1f92..29baf3c48c06f 100644 --- a/store/driver/txn/txn_driver.go +++ b/store/driver/txn/txn_driver.go @@ -251,6 +251,10 @@ func (txn *tikvTxn) SetOption(opt int, val interface{}) { txn.KVTxn.SetAssertionLevel(val.(kvrpcpb.AssertionLevel)) case kv.TableToColumnMaps: txn.columnMapsCache = val + case kv.RequestSourceInternal: + txn.KVTxn.SetRequestSourceInternal(val.(bool)) + case kv.RequestSourceType: + txn.KVTxn.SetRequestSourceType(val.(string)) } } @@ -262,6 +266,8 @@ func (txn *tikvTxn) GetOption(opt int) interface{} { return txn.KVTxn.GetScope() case kv.TableToColumnMaps: return txn.columnMapsCache + case kv.RequestSourceType: + return txn.RequestSourceType default: return nil } diff --git a/store/gcworker/BUILD.bazel b/store/gcworker/BUILD.bazel index c4be6351dc959..9ac77dd4ceb30 100644 --- a/store/gcworker/BUILD.bazel +++ b/store/gcworker/BUILD.bazel @@ -36,6 +36,7 @@ go_library( "@com_github_tikv_client_go_v2//txnkv/txnlock", "@com_github_tikv_client_go_v2//util", "@com_github_tikv_pd_client//:client", + "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", ], ) @@ -54,8 +55,8 @@ go_test( "//domain/infosync", "//kv", "//parser/model", + "//session", "//store/mockstore", - "//testkit", "//testkit/testmain", "//testkit/testsetup", "@com_github_pingcap_errors//:errors", diff --git a/store/gcworker/gc_worker.go b/store/gcworker/gc_worker.go index da73f28053618..e07536e599795 100644 --- a/store/gcworker/gc_worker.go +++ b/store/gcworker/gc_worker.go @@ -23,7 +23,6 @@ import ( "fmt" "math" "os" - "sort" "strconv" "strings" "sync" @@ -60,6 +59,7 @@ import ( tikvutil "github.com/tikv/client-go/v2/util" pd "github.com/tikv/pd/client" "go.uber.org/zap" + "golang.org/x/exp/slices" ) // GCWorker periodically triggers GC process on tikv server. @@ -74,8 +74,9 @@ type GCWorker struct { cancel context.CancelFunc done chan error testingKnobs struct { - scanLocks func(key []byte, regionID uint64) []*txnlock.Lock - resolveLocks func(locks []*txnlock.Lock, regionID tikv.RegionVerID) (ok bool, err error) + scanLocks func(key []byte, regionID uint64, maxVersion uint64) []*txnlock.Lock + batchResolveLocks func(locks []*txnlock.Lock, regionID tikv.RegionVerID, safepoint uint64) (ok bool, err error) + resolveLocks func(locks []*txnlock.Lock, lowResolutionTS uint64) (int64, error) } } @@ -111,6 +112,7 @@ func NewGCWorker(store kv.Storage, pdClient pd.Client) (*GCWorker, error) { func (w *GCWorker) Start() { var ctx context.Context ctx, w.cancel = context.WithCancel(context.Background()) + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnGC) var wg sync.WaitGroup wg.Add(1) go w.start(ctx, &wg) @@ -146,6 +148,9 @@ const ( gcDefaultConcurrency = 2 gcMinConcurrency = 1 gcMaxConcurrency = 128 + + gcTryResolveLocksIntervalFromNow = time.Minute * 5 + // We don't want gc to sweep out the cached info belong to other processes, like coprocessor. gcScanLockLimit = txnlock.ResolvedCacheSize / 2 @@ -276,7 +281,7 @@ func (w *GCWorker) Stats(vars *variable.SessionVars) (map[string]interface{}, er } func (w *GCWorker) tick(ctx context.Context) { - isLeader, err := w.checkLeader() + isLeader, err := w.checkLeader(ctx) if err != nil { logutil.Logger(ctx).Warn("[gc worker] check leader", zap.Error(err)) metrics.GCJobFailureCounter.WithLabelValues("check_leader").Inc() @@ -302,7 +307,7 @@ func (w *GCWorker) leaderTick(ctx context.Context) error { return nil } - ok, safePoint, err := w.prepare() + ok, safePoint, err := w.prepare(ctx) if err != nil || !ok { if err != nil { metrics.GCJobFailureCounter.WithLabelValues("prepare").Inc() @@ -338,14 +343,13 @@ func (w *GCWorker) leaderTick(ctx context.Context) error { // prepare checks preconditions for starting a GC job. It returns a bool // that indicates whether the GC job should start and the new safePoint. -func (w *GCWorker) prepare() (bool, uint64, error) { +func (w *GCWorker) prepare(ctx context.Context) (bool, uint64, error) { // Add a transaction here is to prevent following situations: // 1. GC check gcEnable is true, continue to do GC // 2. The user sets gcEnable to false // 3. The user gets `tikv_gc_safe_point` value is t1, then the user thinks the data after time t1 won't be clean by GC. // 4. GC update `tikv_gc_safe_point` value to t2, continue do GC in this round. // Then the data record that has been dropped between time t1 and t2, will be cleaned by GC, but the user thinks the data after t1 won't be clean by GC. - ctx := context.Background() se := createSession(w.store) defer se.Close() _, err := se.ExecuteInternal(ctx, "BEGIN") @@ -693,7 +697,7 @@ func (w *GCWorker) deleteRanges(ctx context.Context, safePoint uint64, concurren se := createSession(w.store) defer se.Close() - ranges, err := util.LoadDeleteRanges(se, safePoint) + ranges, err := util.LoadDeleteRanges(ctx, se, safePoint) if err != nil { return errors.Trace(err) } @@ -765,7 +769,7 @@ func (w *GCWorker) redoDeleteRanges(ctx context.Context, safePoint uint64, concu redoDeleteRangesTs := safePoint - oracle.ComposeTS(int64(gcRedoDeleteRangeDelay.Seconds())*1000, 0) se := createSession(w.store) - ranges, err := util.LoadDoneDeleteRanges(se, redoDeleteRangesTs) + ranges, err := util.LoadDoneDeleteRanges(ctx, se, redoDeleteRangesTs) se.Close() if err != nil { return errors.Trace(err) @@ -1014,12 +1018,26 @@ func (w *GCWorker) checkUsePhysicalScanLock() (bool, error) { } func (w *GCWorker) resolveLocks(ctx context.Context, safePoint uint64, concurrency int, usePhysical bool) (bool, error) { + // tryResolveLocksTS is defined as `now() - gcTryResolveLocksIntervalFromNow`, + // it used for trying resolve locks, ts of which is smaller than tryResolveLocksTS and expired. + tryResolveLocksTS, err := w.getTryResolveLocksTS() + if err != nil { + return false, err + } + + if tryResolveLocksTS < safePoint { + tryResolveLocksTS = safePoint + } else { + // to do: add a switch for tryResolveLocksTS. + // if the config log-backup.enable is false in PiTR, set safePoint to tryResolveLocksTS directly. + } + if !usePhysical { - return false, w.legacyResolveLocks(ctx, safePoint, concurrency) + return false, w.legacyResolveLocks(ctx, safePoint, tryResolveLocksTS, concurrency) } // First try resolve locks with physical scan - err := w.resolveLocksPhysical(ctx, safePoint) + err = w.resolveLocksPhysical(ctx, safePoint) if err == nil { return true, nil } @@ -1027,21 +1045,28 @@ func (w *GCWorker) resolveLocks(ctx context.Context, safePoint uint64, concurren logutil.Logger(ctx).Error("[gc worker] resolve locks with physical scan failed, trying fallback to legacy resolve lock", zap.String("uuid", w.uuid), zap.Uint64("safePoint", safePoint), + zap.Uint64("try-resolve-locks-ts", tryResolveLocksTS), zap.Error(err)) - return false, w.legacyResolveLocks(ctx, safePoint, concurrency) + return false, w.legacyResolveLocks(ctx, safePoint, tryResolveLocksTS, concurrency) } -func (w *GCWorker) legacyResolveLocks(ctx context.Context, safePoint uint64, concurrency int) error { +func (w *GCWorker) legacyResolveLocks( + ctx context.Context, + safePoint uint64, + tryResolveLocksTS uint64, + concurrency int, +) error { metrics.GCWorkerCounter.WithLabelValues("resolve_locks").Inc() logutil.Logger(ctx).Info("[gc worker] start resolve locks", zap.String("uuid", w.uuid), zap.Uint64("safePoint", safePoint), + zap.Uint64("try-resolve-locks-ts", tryResolveLocksTS), zap.Int("concurrency", concurrency)) startTime := time.Now() handler := func(ctx context.Context, r tikvstore.KeyRange) (rangetask.TaskStat, error) { - return w.resolveLocksForRange(ctx, safePoint, r.StartKey, r.EndKey) + return w.resolveLocksForRange(ctx, safePoint, tryResolveLocksTS, r.StartKey, r.EndKey) } runner := rangetask.NewRangeTaskRunner("resolve-locks-runner", w.tikvStore, concurrency, handler) @@ -1058,18 +1083,93 @@ func (w *GCWorker) legacyResolveLocks(ctx context.Context, safePoint uint64, con logutil.Logger(ctx).Info("[gc worker] finish resolve locks", zap.String("uuid", w.uuid), zap.Uint64("safePoint", safePoint), + zap.Uint64("try-resolve-locks-ts", tryResolveLocksTS), zap.Int("regions", runner.CompletedRegions())) metrics.GCHistogram.WithLabelValues("resolve_locks").Observe(time.Since(startTime).Seconds()) return nil } -func (w *GCWorker) resolveLocksForRange(ctx context.Context, safePoint uint64, startKey []byte, endKey []byte) (rangetask.TaskStat, error) { +// getTryResolveLocksTS gets the TryResolveLocksTS +// that is defined as `now() - gcTryResolveLocksIntervalFromNow`. +func (w *GCWorker) getTryResolveLocksTS() (uint64, error) { + now, err := w.tikvStore.CurrentTimestamp(kv.GlobalTxnScope) + if err != nil { + return 0, err + } + + gcTryResolveLockTS := oracle.ComposeTS(oracle.ExtractPhysical(now)-gcTryResolveLocksIntervalFromNow.Milliseconds(), oracle.ExtractLogical(now)) + return gcTryResolveLockTS, nil +} + +// batchResolveExpiredLocks tries to resolve expired locks with batch method. +// Travesal the given locks and check that: +// 1. If the ts of lock is equal with or smaller than forceResolveLocksTS(acually equals safepoint), +// it will rollback the txn, no matter the lock is expired of not. +// 2. If the ts of lock is larger than forceResolveLocksTS, it will check status of the txn. +// Resolve the lock if txn is expired, Or do nothing. +func (w *GCWorker) batchResolveExpiredLocks( + bo *tikv.Backoffer, + locks []*txnlock.Lock, + loc tikv.RegionVerID, + forceResolveLocksTS uint64, + tryResolveLocksTS uint64, +) (bool, error) { + if len(locks) == 0 { + return true, nil + } + + forceResolveLocks := make([]*txnlock.Lock, 0, len(locks)) + tryResolveLocks := make([]*txnlock.Lock, 0, len(locks)) + for _, l := range locks { + if l.TxnID <= forceResolveLocksTS { + forceResolveLocks = append(forceResolveLocks, l) + } else { + tryResolveLocks = append(tryResolveLocks, l) + } + } + + logutil.BgLogger().Debug("batchResolveExpiredLocks", + zap.Uint64("force-resolve-locks-ts", forceResolveLocksTS), + zap.Uint64("try-resolve-locks-ts", tryResolveLocksTS), + zap.Int("force-resolve-locks-count", len(forceResolveLocks)), + zap.Int("try-resolve-locks-count", len(tryResolveLocks))) + + var ( + ok bool + err error + ) + if w.testingKnobs.batchResolveLocks != nil { + ok, err = w.testingKnobs.batchResolveLocks(forceResolveLocks, loc, forceResolveLocksTS) + } else { + ok, err = w.tikvStore.GetLockResolver().BatchResolveLocks(bo, forceResolveLocks, loc) + } + if err != nil || !ok { + return ok, err + } + + if w.testingKnobs.resolveLocks != nil { + _, err = w.testingKnobs.resolveLocks(tryResolveLocks, tryResolveLocksTS) + } else { + _, err = w.tikvStore.GetLockResolver().ResolveLocks(bo, 0, tryResolveLocks) + } + return err == nil, errors.Trace(err) +} + +func (w *GCWorker) resolveLocksForRange( + ctx context.Context, + forceResolveLocksTS uint64, + tryResolveLocksTS uint64, + startKey []byte, + endKey []byte, +) (rangetask.TaskStat, error) { // for scan lock request, we must return all locks even if they are generated // by the same transaction. because gc worker need to make sure all locks have been // cleaned. req := tikvrpc.NewRequest(tikvrpc.CmdScanLock, &kvrpcpb.ScanLockRequest{ - MaxVersion: safePoint, + MaxVersion: tryResolveLocksTS, Limit: gcScanLockLimit, + }, kvrpcpb.Context{ + RequestSource: tikvutil.RequestSourceFromCtx(ctx), }) failpoint.Inject("lowScanLockLimit", func() { @@ -1127,19 +1227,11 @@ retryScanAndResolve: locks = append(locks, txnlock.NewLock(li)) } if w.testingKnobs.scanLocks != nil { - locks = append(locks, w.testingKnobs.scanLocks(key, loc.Region.GetID())...) + locks = append(locks, w.testingKnobs.scanLocks(key, loc.Region.GetID(), tryResolveLocksTS)...) } locForResolve := loc for { - var ( - ok bool - err1 error - ) - if w.testingKnobs.resolveLocks != nil { - ok, err1 = w.testingKnobs.resolveLocks(locks, locForResolve.Region) - } else { - ok, err1 = w.tikvStore.GetLockResolver().BatchResolveLocks(bo, locks, locForResolve.Region) - } + ok, err1 := w.batchResolveExpiredLocks(bo, locks, locForResolve.Region, forceResolveLocksTS, tryResolveLocksTS) if err1 != nil { return stat, errors.Trace(err1) } @@ -1361,8 +1453,8 @@ func (w *GCWorker) checkLockObservers(ctx context.Context, safePoint uint64, sto for i, lockInfo := range respInner.Locks { locks[i] = txnlock.NewLock(lockInfo) } - sort.Slice(locks, func(i, j int) bool { - return bytes.Compare(locks[i].Key, locks[j].Key) < 0 + slices.SortFunc(locks, func(i, j *txnlock.Lock) bool { + return bytes.Compare(i.Key, j.Key) < 0 }) err = w.resolveLocksAcrossRegions(ctx, locks) @@ -1681,12 +1773,11 @@ func (w *GCWorker) doGC(ctx context.Context, safePoint uint64, concurrency int) return nil } -func (w *GCWorker) checkLeader() (bool, error) { +func (w *GCWorker) checkLeader(ctx context.Context) (bool, error) { metrics.GCWorkerCounter.WithLabelValues("check_leader").Inc() se := createSession(w.store) defer se.Close() - ctx := context.Background() _, err := se.ExecuteInternal(ctx, "BEGIN") if err != nil { return false, errors.Trace(err) @@ -1817,7 +1908,7 @@ func (w *GCWorker) loadDurationWithDefault(key string, def time.Duration) (*time } func (w *GCWorker) loadValueFromSysTable(key string) (string, error) { - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnGC) se := createSession(w.store) defer se.Close() rs, err := se.ExecuteInternal(ctx, `SELECT HIGH_PRIORITY (variable_value) FROM mysql.tidb WHERE variable_name=%? FOR UPDATE`, key) @@ -1850,7 +1941,8 @@ func (w *GCWorker) saveValueToSysTable(key, value string) error { UPDATE variable_value = %?, comment = %?` se := createSession(w.store) defer se.Close() - _, err := se.ExecuteInternal(context.Background(), stmt, + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnGC) + _, err := se.ExecuteInternal(ctx, stmt, key, value, gcVariableComments[key], value, gcVariableComments[key]) logutil.BgLogger().Debug("[gc worker] save kv", @@ -2122,6 +2214,7 @@ func NewMockGCWorker(store kv.Storage) (*MockGCWorker, error) { // DeleteRanges calls deleteRanges internally, just for test. func (w *MockGCWorker) DeleteRanges(ctx context.Context, safePoint uint64) error { logutil.Logger(ctx).Error("deleteRanges is called") + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnGC) return w.worker.deleteRanges(ctx, safePoint, 1) } diff --git a/store/gcworker/gc_worker_test.go b/store/gcworker/gc_worker_test.go index 71c1f0ffea3e0..362c304efafe5 100644 --- a/store/gcworker/gc_worker_test.go +++ b/store/gcworker/gc_worker_test.go @@ -37,8 +37,8 @@ import ( "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/store/mockstore" - "github.com/pingcap/tidb/testkit" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/oracle/oracles" @@ -60,6 +60,11 @@ type mockGCWorkerClient struct { type handler = func(addr string, req *tikvrpc.Request) (*tikvrpc.Response, error) +func gcContext() context.Context { + // internal statements must bind with resource type + return kv.WithInternalSourceType(context.Background(), kv.InternalTxnGC) +} + func (c *mockGCWorkerClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { if req.Type == tikvrpc.CmdUnsafeDestroyRange && c.unsafeDestroyRangeHandler != nil { return c.unsafeDestroyRangeHandler(addr, req) @@ -121,7 +126,13 @@ func createGCWorkerSuiteWithStoreType(t *testing.T, storeType mockstore.StoreTyp } s.oracle = &oracles.MockOracle{} - s.store, s.dom, clean = testkit.CreateMockStoreWithOracle(t, s.oracle, opts...) + store, err := mockstore.NewMockStore(opts...) + require.NoError(t, err) + store.GetOracle().Close() + store.(tikv.Storage).SetOracle(s.oracle) + dom, clean := bootstrap(t, store, 0) + s.store, s.dom = store, dom + s.tikvStore = s.store.(tikv.Storage) gcWorker, err := NewGCWorker(s.store, s.pdClient) @@ -258,6 +269,17 @@ func TestGetOracleTime(t *testing.T) { timeEqual(t, t2, t1.Add(time.Second*10), time.Millisecond*10) } +func TestGetLowResolveTS(t *testing.T) { + s, clean := createGCWorkerSuite(t) + defer clean() + + lowResolveTS, err := s.gcWorker.getTryResolveLocksTS() + require.NoError(t, err) + + lowResolveTime := oracle.GetTimeFromTS(lowResolveTS) + timeEqual(t, time.Now(), lowResolveTime.Add(gcTryResolveLocksIntervalFromNow), time.Millisecond*10) +} + func TestMinStartTS(t *testing.T) { s, clean := createGCWorkerSuite(t) defer clean() @@ -296,7 +318,7 @@ func TestPrepareGC(t *testing.T) { now, err := s.gcWorker.getOracleTime() require.NoError(t, err) close(s.gcWorker.done) - ok, _, err := s.gcWorker.prepare() + ok, _, err := s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.False(t, ok) lastRun, err := s.gcWorker.loadTime(gcLastRunTimeKey) @@ -310,11 +332,11 @@ func TestPrepareGC(t *testing.T) { err = s.gcWorker.saveDuration(gcRunIntervalKey, time.Minute*5) require.NoError(t, err) s.oracle.AddOffset(time.Minute * 4) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.False(t, ok) s.oracle.AddOffset(time.Minute * 2) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.True(t, ok) @@ -322,13 +344,13 @@ func TestPrepareGC(t *testing.T) { err = s.gcWorker.saveDuration(gcLifeTimeKey, time.Minute*30) require.NoError(t, err) s.oracle.AddOffset(time.Minute * 5) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.False(t, ok) s.oracle.AddOffset(time.Minute * 40) now, err = s.gcWorker.getOracleTime() require.NoError(t, err) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.True(t, ok) safePoint, err = s.gcWorker.loadTime(gcSafePointKey) @@ -362,12 +384,12 @@ func TestPrepareGC(t *testing.T) { s.oracle.AddOffset(time.Minute * 40) err = s.gcWorker.saveValueToSysTable(gcEnableKey, booleanFalse) require.NoError(t, err) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.False(t, ok) err = s.gcWorker.saveValueToSysTable(gcEnableKey, booleanTrue) require.NoError(t, err) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.True(t, ok) @@ -375,7 +397,7 @@ func TestPrepareGC(t *testing.T) { s.oracle.AddOffset(time.Minute * 40) err = s.gcWorker.saveDuration(gcLifeTimeKey, time.Minute) require.NoError(t, err) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.True(t, ok) lifeTime, err := s.gcWorker.loadDuration(gcLifeTimeKey) @@ -385,7 +407,7 @@ func TestPrepareGC(t *testing.T) { s.oracle.AddOffset(time.Minute * 40) err = s.gcWorker.saveDuration(gcLifeTimeKey, time.Minute*30) require.NoError(t, err) - ok, _, err = s.gcWorker.prepare() + ok, _, err = s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.True(t, ok) lifeTime, err = s.gcWorker.loadDuration(gcLifeTimeKey) @@ -412,7 +434,7 @@ func TestPrepareGC(t *testing.T) { err = spkv.Put(fmt.Sprintf("%s/%s", infosync.ServerMinStartTSPath, "a"), strconv.FormatUint(minStartTS, 10)) require.NoError(t, err) s.oracle.AddOffset(time.Minute * 40) - ok, safepoint, err := s.gcWorker.prepare() + ok, safepoint, err := s.gcWorker.prepare(gcContext()) require.NoError(t, err) require.False(t, ok) require.Equal(t, uint64(0), safepoint) @@ -649,7 +671,7 @@ func TestDeleteRangesFailure(t *testing.T) { // Put some delete range tasks. se := createSession(s.gcWorker.store) defer se.Close() - _, err := se.Execute(context.Background(), `INSERT INTO mysql.gc_delete_range VALUES + _, err := se.Execute(gcContext(), `INSERT INTO mysql.gc_delete_range VALUES ("1", "2", "31", "32", "10"), ("3", "4", "33", "34", "10"), ("5", "6", "35", "36", "10")`) @@ -677,7 +699,7 @@ func TestDeleteRangesFailure(t *testing.T) { } // Check the DeleteRanges tasks. - preparedRanges, err := util.LoadDeleteRanges(se, 20) + preparedRanges, err := util.LoadDeleteRanges(gcContext(), se, 20) se.Close() require.NoError(t, err) require.Equal(t, ranges, preparedRanges) @@ -729,14 +751,14 @@ func TestDeleteRangesFailure(t *testing.T) { failKey = ranges[0].StartKey failStore = stores[0] - err = deleteRangeFunc(context.Background(), 20, 1) + err = deleteRangeFunc(gcContext(), 20, 1) require.NoError(t, err) s.checkDestroyRangeReq(t, sendReqCh, ranges, stores) // The first delete range task should be still here since it didn't success. se = createSession(s.gcWorker.store) - remainingRanges, err := loadRangesFunc(se, 20) + remainingRanges, err := loadRangesFunc(gcContext(), se, 20) se.Close() require.NoError(t, err) require.Equal(t, ranges[:1], remainingRanges) @@ -745,12 +767,12 @@ func TestDeleteRangesFailure(t *testing.T) { failStore = nil // Delete the remaining range again. - err = deleteRangeFunc(context.Background(), 20, 1) + err = deleteRangeFunc(gcContext(), 20, 1) require.NoError(t, err) s.checkDestroyRangeReq(t, sendReqCh, ranges[:1], stores) se = createSession(s.gcWorker.store) - remainingRanges, err = loadRangesFunc(se, 20) + remainingRanges, err = loadRangesFunc(gcContext(), se, 20) se.Close() require.NoError(t, err) require.Len(t, remainingRanges, 0) @@ -830,7 +852,7 @@ func TestLeaderTick(t *testing.T) { // Skip if prepare failed (disabling GC will make prepare returns ok = false). err = s.gcWorker.saveValueToSysTable(gcEnableKey, booleanFalse) require.NoError(t, err) - err = s.gcWorker.leaderTick(context.Background()) + err = s.gcWorker.leaderTick(gcContext()) require.NoError(t, err) s.checkNotCollected(t, p) err = s.gcWorker.saveValueToSysTable(gcEnableKey, booleanTrue) @@ -841,7 +863,7 @@ func TestLeaderTick(t *testing.T) { // Skip if gcWaitTime not exceeded. s.gcWorker.lastFinish = time.Now() - err = s.gcWorker.leaderTick(context.Background()) + err = s.gcWorker.leaderTick(gcContext()) require.NoError(t, err) s.checkNotCollected(t, p) s.gcWorker.lastFinish = time.Now().Add(-veryLong) @@ -850,7 +872,7 @@ func TestLeaderTick(t *testing.T) { require.NoError(t, err) // Continue GC if all those checks passed. - err = s.gcWorker.leaderTick(context.Background()) + err = s.gcWorker.leaderTick(gcContext()) require.NoError(t, err) // Wait for GC finish select { @@ -870,7 +892,7 @@ func TestLeaderTick(t *testing.T) { p = s.createGCProbe(t, "k1") s.oracle.AddOffset(gcDefaultLifeTime * 2) - err = s.gcWorker.leaderTick(context.Background()) + err = s.gcWorker.leaderTick(gcContext()) require.NoError(t, err) // Wait for GC finish select { @@ -904,7 +926,8 @@ func TestResolveLockRangeInfine(t *testing.T) { require.NoError(t, failpoint.Disable("tikvclient/invalidCacheAndRetry")) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/store/gcworker/setGcResolveMaxBackoff")) }() - _, err := s.gcWorker.resolveLocksForRange(context.Background(), 1, []byte{0}, []byte{1}) + + _, err := s.gcWorker.resolveLocksForRange(gcContext(), 1, 3, []byte{0}, []byte{1}) require.Error(t, err) } @@ -917,31 +940,92 @@ func TestResolveLockRangeMeetRegionCacheMiss(t *testing.T) { scanCntRef = &scanCnt resolveCnt int resolveCntRef = &resolveCnt + + scanLockCnt int + resolveBeforeSafepointLockCnt int + resolveAfterSafepointLockCnt int + safepointTS uint64 = 434245550444904450 + lowResolveTS uint64 = 434245550449098752 ) - s.gcWorker.testingKnobs.scanLocks = func(key []byte, regionID uint64) []*txnlock.Lock { + + allLocks := []*txnlock.Lock{ + { + Key: []byte{1}, + // TxnID < safepointTS + TxnID: 434245550444904449, + TTL: 5, + }, + { + Key: []byte{2}, + // safepointTS < TxnID < lowResolveTS , TxnID + TTL < lowResolveTS + TxnID: 434245550445166592, + TTL: 10, + }, + { + Key: []byte{3}, + // safepointTS < TxnID < lowResolveTS , TxnID + TTL > lowResolveTS + TxnID: 434245550445166593, + TTL: 20, + }, + { + Key: []byte{4}, + // TxnID > lowResolveTS + TxnID: 434245550449099752, + TTL: 20, + }, + } + + s.gcWorker.testingKnobs.scanLocks = func(key []byte, regionID uint64, maxVersion uint64) []*txnlock.Lock { *scanCntRef++ - return []*txnlock.Lock{ - { - Key: []byte{1}, - }, - { - Key: []byte{1}, - }, + + locks := make([]*txnlock.Lock, 0) + for _, l := range allLocks { + if l.TxnID <= maxVersion { + locks = append(locks, l) + scanLockCnt++ + } } + return locks } - s.gcWorker.testingKnobs.resolveLocks = func(locks []*txnlock.Lock, regionID tikv.RegionVerID) (ok bool, err error) { + s.gcWorker.testingKnobs.batchResolveLocks = func( + locks []*txnlock.Lock, + regionID tikv.RegionVerID, + safepoint uint64, + ) (ok bool, err error) { *resolveCntRef++ if *resolveCntRef == 1 { s.gcWorker.tikvStore.GetRegionCache().InvalidateCachedRegion(regionID) // mock the region cache miss error return false, nil } + + resolveBeforeSafepointLockCnt = len(locks) + for _, l := range locks { + require.True(t, l.TxnID <= safepoint) + } return true, nil } - _, err := s.gcWorker.resolveLocksForRange(context.Background(), 1, []byte{0}, []byte{10}) + + s.gcWorker.testingKnobs.resolveLocks = func( + locks []*txnlock.Lock, + lowResolutionTS uint64, + ) (int64, error) { + for _, l := range locks { + expiredTS := oracle.ComposeTS(oracle.ExtractPhysical(l.TxnID)+int64(l.TTL), oracle.ExtractLogical(l.TxnID)) + if expiredTS <= lowResolutionTS { + resolveAfterSafepointLockCnt++ + } + } + return 0, nil + } + + _, err := s.gcWorker.resolveLocksForRange(gcContext(), safepointTS, lowResolveTS, []byte{0}, []byte{10}) require.NoError(t, err) require.Equal(t, 2, resolveCnt) require.Equal(t, 1, scanCnt) + require.Equal(t, 3, scanLockCnt) + require.Equal(t, 1, resolveBeforeSafepointLockCnt) + require.Equal(t, 1, resolveAfterSafepointLockCnt) } func TestResolveLockRangeMeetRegionEnlargeCausedByRegionMerge(t *testing.T) { @@ -967,7 +1051,7 @@ func TestResolveLockRangeMeetRegionEnlargeCausedByRegionMerge(t *testing.T) { s.cluster.Split(s.initRegion.regionID, region2, []byte("m"), newPeers, newPeers[0]) // init a, b lock in region1 and o, p locks in region2 - s.gcWorker.testingKnobs.scanLocks = func(key []byte, regionID uint64) []*txnlock.Lock { + s.gcWorker.testingKnobs.scanLocks = func(key []byte, regionID uint64, maxVersion uint64) []*txnlock.Lock { if regionID == s.initRegion.regionID { return []*txnlock.Lock{{Key: []byte("a")}, {Key: []byte("b")}} } @@ -977,7 +1061,11 @@ func TestResolveLockRangeMeetRegionEnlargeCausedByRegionMerge(t *testing.T) { return []*txnlock.Lock{} } - s.gcWorker.testingKnobs.resolveLocks = func(locks []*txnlock.Lock, regionID tikv.RegionVerID) (ok bool, err error) { + s.gcWorker.testingKnobs.batchResolveLocks = func( + locks []*txnlock.Lock, + regionID tikv.RegionVerID, + safepoint uint64, + ) (ok bool, err error) { if regionID.GetID() == s.initRegion.regionID && *firstAccessRef { *firstAccessRef = false // merge region2 into region1 and return EpochNotMatch error. @@ -990,7 +1078,7 @@ func TestResolveLockRangeMeetRegionEnlargeCausedByRegionMerge(t *testing.T) { []*metapb.Region{regionMeta}) require.NoError(t, err) // also let region1 contains all 4 locks - s.gcWorker.testingKnobs.scanLocks = func(key []byte, regionID uint64) []*txnlock.Lock { + s.gcWorker.testingKnobs.scanLocks = func(key []byte, regionID uint64, maxVersion uint64) []*txnlock.Lock { if regionID == s.initRegion.regionID { locks := []*txnlock.Lock{ {Key: []byte("a")}, @@ -1013,8 +1101,14 @@ func TestResolveLockRangeMeetRegionEnlargeCausedByRegionMerge(t *testing.T) { } return true, nil } + s.gcWorker.testingKnobs.resolveLocks = func( + locks []*txnlock.Lock, + lowResolutionTS uint64, + ) (int64, error) { + return 0, nil + } - _, err := s.gcWorker.resolveLocksForRange(context.Background(), 1, []byte(""), []byte("z")) + _, err := s.gcWorker.resolveLocksForRange(gcContext(), 1, 3, []byte(""), []byte("z")) require.NoError(t, err) require.Len(t, resolvedLock, 4) expects := [][]byte{[]byte("a"), []byte("b"), []byte("o"), []byte("p")} @@ -1033,7 +1127,7 @@ func TestRunGCJob(t *testing.T) { useDistributedGC := s.gcWorker.checkUseDistributedGC() require.True(t, useDistributedGC) safePoint := s.mustAllocTs(t) - err := s.gcWorker.runGCJob(context.Background(), safePoint, 1) + err := s.gcWorker.runGCJob(gcContext(), safePoint, 1) require.NoError(t, err) pdSafePoint := s.mustGetSafePointFromPd(t) @@ -1043,7 +1137,7 @@ func TestRunGCJob(t *testing.T) { require.Equal(t, safePoint, etcdSafePoint) // Test distributed mode with safePoint regressing (although this is impossible) - err = s.gcWorker.runGCJob(context.Background(), safePoint-1, 1) + err = s.gcWorker.runGCJob(gcContext(), safePoint-1, 1) require.Error(t, err) // Central mode is deprecated in v5.0, fallback to distributed mode if it's set. @@ -1054,7 +1148,7 @@ func TestRunGCJob(t *testing.T) { p := s.createGCProbe(t, "k1") safePoint = s.mustAllocTs(t) - err = s.gcWorker.runGCJob(context.Background(), safePoint, 1) + err = s.gcWorker.runGCJob(gcContext(), safePoint, 1) require.NoError(t, err) s.checkCollected(t, p) @@ -1111,7 +1205,7 @@ func TestRunGCJobAPI(t *testing.T) { p := s.createGCProbe(t, "k1") safePoint := s.mustAllocTs(t) - err := RunGCJob(context.Background(), s.tikvStore, s.pdClient, safePoint, "mock", 1) + err := RunGCJob(gcContext(), s.tikvStore, s.pdClient, safePoint, "mock", 1) require.NoError(t, err) s.checkCollected(t, p) etcdSafePoint := s.loadEtcdSafePoint(t) @@ -1126,7 +1220,7 @@ func TestRunDistGCJobAPI(t *testing.T) { gcSafePointCacheInterval = 0 safePoint := s.mustAllocTs(t) - err := RunDistributedGCJob(context.Background(), s.tikvStore, s.pdClient, safePoint, "mock", 1) + err := RunDistributedGCJob(gcContext(), s.tikvStore, s.pdClient, safePoint, "mock", 1) require.NoError(t, err) pdSafePoint := s.mustGetSafePointFromPd(t) require.Equal(t, safePoint, pdSafePoint) @@ -1201,7 +1295,7 @@ func (s *mockGCWorkerSuite) makeMergedMockClient(t *testing.T, count int) (*merg const scanLockLimit = 3 - storesMap, err := s.gcWorker.getStoresMapForGC(context.Background()) + storesMap, err := s.gcWorker.getStoresMapForGC(gcContext()) require.NoError(t, err) scanner := newMergeLockScanner(100000, s.client, storesMap) scanner.scanLockLimit = scanLockLimit @@ -1246,7 +1340,7 @@ func (s *mockGCWorkerSuite) makeMergedMockClient(t *testing.T, count int) (*merg resultCh := make(chan []*txnlock.Lock) // Initializing and getting result from scanner is blocking operations. Collect the result in a separated thread. go func() { - err := scanner.Start(context.Background()) + err := scanner.Start(gcContext()) require.NoError(t, err) // Get a batch of a enough-large size to get all results. result := scanner.NextBatch(1000) @@ -1456,7 +1550,7 @@ func TestResolveLocksPhysical(t *testing.T) { s.client.removeLockObserverHandler = alwaysSucceedHandler } - ctx := context.Background() + ctx := gcContext() var safePoint uint64 = 10000 // No lock @@ -1612,7 +1706,7 @@ func TestPhysicalScanLockDeadlock(t *testing.T) { s, clean := createGCWorkerSuite(t) defer clean() - ctx := context.Background() + ctx := gcContext() stores := s.cluster.GetAllStores() require.Greater(t, len(stores), 1) @@ -1727,7 +1821,7 @@ func TestGCWithPendingTxn(t *testing.T) { s, clean := createGCWorkerSuite(t) defer clean() - ctx := context.Background() + ctx := gcContext() gcSafePointCacheInterval = 0 err := s.gcWorker.saveValueToSysTable(gcEnableKey, booleanFalse) require.NoError(t, err) @@ -1772,5 +1866,92 @@ func TestGCWithPendingTxn(t *testing.T) { require.NoError(t, err) err = txn.Commit(ctx) + require.Error(t, err) +} + +func TestGCWithPendingTxn2(t *testing.T) { + s, clean := createGCWorkerSuite(t) + defer clean() + + ctx := gcContext() + gcSafePointCacheInterval = 0 + err := s.gcWorker.saveValueToSysTable(gcEnableKey, booleanFalse) + require.NoError(t, err) + + now, err := s.oracle.GetTimestamp(ctx, &oracle.Option{}) + require.NoError(t, err) + + // Prepare to run gc with txn's startTS as the safepoint ts. + spkv := s.tikvStore.GetSafePointKV() + err = spkv.Put(fmt.Sprintf("%s/%s", infosync.ServerMinStartTSPath, "a"), strconv.FormatUint(now, 10)) + require.NoError(t, err) + s.mustSetTiDBServiceSafePoint(t, now, now) + veryLong := gcDefaultLifeTime * 100 + err = s.gcWorker.saveTime(gcLastRunTimeKey, oracle.GetTimeFromTS(s.mustAllocTs(t)).Add(-veryLong)) + require.NoError(t, err) + s.gcWorker.lastFinish = time.Now().Add(-veryLong) + err = s.gcWorker.saveValueToSysTable(gcEnableKey, booleanTrue) + require.NoError(t, err) + + // lock the key1 + k1 := []byte("tk1") + v1 := []byte("v1") + txn, err := s.store.Begin(tikv.WithStartTS(now)) + require.NoError(t, err) + txn.SetOption(kv.Pessimistic, true) + lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()} + + err = txn.Set(k1, v1) + require.NoError(t, err) + err = txn.LockKeys(ctx, lockCtx, k1) + require.NoError(t, err) + + // lock the key2 + k2 := []byte("tk2") + v2 := []byte("v2") + startTS := oracle.ComposeTS(oracle.ExtractPhysical(now)+10000, oracle.ExtractLogical(now)) + txn2, err := s.store.Begin(tikv.WithStartTS(startTS)) + require.NoError(t, err) + txn2.SetOption(kv.Pessimistic, true) + lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now()} + + err = txn2.Set(k2, v2) + require.NoError(t, err) + err = txn2.LockKeys(ctx, lockCtx, k2) + require.NoError(t, err) + + // Trigger the tick let the gc job start. + s.oracle.AddOffset(time.Minute * 5) + err = s.gcWorker.leaderTick(ctx) + require.NoError(t, err) + // Wait for GC finish + select { + case err = <-s.gcWorker.done: + s.gcWorker.gcIsRunning = false + break + case <-time.After(time.Second * 10): + err = errors.New("receive from s.gcWorker.done timeout") + } + require.NoError(t, err) + + err = txn.Commit(ctx) + require.Error(t, err) + err = txn2.Commit(ctx) + require.NoError(t, err) +} + +func bootstrap(t testing.TB, store kv.Storage, lease time.Duration) (*domain.Domain, func()) { + session.SetSchemaLease(lease) + session.DisableStats4Test() + dom, err := session.BootstrapSession(store) require.NoError(t, err) + + dom.SetStatsUpdating(true) + + clean := func() { + dom.Close() + err := store.Close() + require.NoError(t, err) + } + return dom, clean } diff --git a/store/helper/BUILD.bazel b/store/helper/BUILD.bazel index efe0ffe8c5a4a..ebd81b67a80a0 100644 --- a/store/helper/BUILD.bazel +++ b/store/helper/BUILD.bazel @@ -23,6 +23,7 @@ go_library( "@com_github_tikv_client_go_v2//tikv", "@com_github_tikv_client_go_v2//tikvrpc", "@com_github_tikv_client_go_v2//txnkv/txnlock", + "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", ], ) diff --git a/store/helper/helper.go b/store/helper/helper.go index 948a84c84635d..760cfb11f6a3a 100644 --- a/store/helper/helper.go +++ b/store/helper/helper.go @@ -25,7 +25,6 @@ import ( "math" "net/http" "net/url" - "sort" "strconv" "strings" "time" @@ -48,6 +47,7 @@ import ( "github.com/tikv/client-go/v2/tikvrpc" "github.com/tikv/client-go/v2/txnkv/txnlock" "go.uber.org/zap" + "golang.org/x/exp/slices" ) // Storage represents a storage that connects TiKV. @@ -620,15 +620,6 @@ func isBehindKeyRange(x withKeyRange, startKey, endKey string) bool { func (r *RegionInfo) getStartKey() string { return r.StartKey } func (r *RegionInfo) getEndKey() string { return r.EndKey } -// for sorting -type byRegionStartKey []*RegionInfo - -func (xs byRegionStartKey) Len() int { return len(xs) } -func (xs byRegionStartKey) Swap(i, j int) { xs[i], xs[j] = xs[j], xs[i] } -func (xs byRegionStartKey) Less(i, j int) bool { - return xs[i].getStartKey() < xs[j].getStartKey() -} - // TableInfoWithKeyRange stores table or index informations with its key range. type TableInfoWithKeyRange struct { *TableInfo @@ -639,15 +630,6 @@ type TableInfoWithKeyRange struct { func (t TableInfoWithKeyRange) getStartKey() string { return t.StartKey } func (t TableInfoWithKeyRange) getEndKey() string { return t.EndKey } -// for sorting -type byTableStartKey []TableInfoWithKeyRange - -func (xs byTableStartKey) Len() int { return len(xs) } -func (xs byTableStartKey) Swap(i, j int) { xs[i], xs[j] = xs[j], xs[i] } -func (xs byTableStartKey) Less(i, j int) bool { - return xs[i].getStartKey() < xs[j].getStartKey() -} - // NewTableWithKeyRange constructs TableInfoWithKeyRange for given table, it is exported only for test. func NewTableWithKeyRange(db *model.DBInfo, table *model.TableInfo) TableInfoWithKeyRange { return newTableWithKeyRange(db, table) @@ -749,7 +731,9 @@ func (h *Helper) GetTablesInfoWithKeyRange(schemas []*model.DBInfo) []TableInfoW } } } - sort.Sort(byTableStartKey(tables)) + slices.SortFunc(tables, func(i, j TableInfoWithKeyRange) bool { + return i.getStartKey() < j.getStartKey() + }) return tables } @@ -761,7 +745,9 @@ func (h *Helper) ParseRegionsTableInfos(regionsInfo []*RegionInfo, tables []Tabl return tableInfos } // tables is sorted in GetTablesInfoWithKeyRange func - sort.Sort(byRegionStartKey(regionsInfo)) + slices.SortFunc(regionsInfo, func(i, j *RegionInfo) bool { + return i.getStartKey() < j.getStartKey() + }) idx := 0 OutLoop: diff --git a/store/mockstore/unistore/cophandler/BUILD.bazel b/store/mockstore/unistore/cophandler/BUILD.bazel index 3dc116dad55af..c28fd9d865518 100644 --- a/store/mockstore/unistore/cophandler/BUILD.bazel +++ b/store/mockstore/unistore/cophandler/BUILD.bazel @@ -48,6 +48,7 @@ go_library( "@com_github_pingcap_tipb//go-tipb", "@com_github_tikv_client_go_v2//tikvrpc", "@com_github_twmb_murmur3//:murmur3", + "@org_golang_x_exp//slices", "@org_uber_go_atomic//:atomic", ], ) diff --git a/store/mockstore/unistore/cophandler/analyze.go b/store/mockstore/unistore/cophandler/analyze.go index 3aa8392a52da6..a04b158e4b4a6 100644 --- a/store/mockstore/unistore/cophandler/analyze.go +++ b/store/mockstore/unistore/cophandler/analyze.go @@ -19,7 +19,6 @@ import ( "context" "math" "math/rand" - "sort" "time" "github.com/golang/protobuf/proto" @@ -41,6 +40,7 @@ import ( "github.com/pingcap/tidb/util/rowcodec" "github.com/pingcap/tipb/go-tipb" "github.com/twmb/murmur3" + "golang.org/x/exp/slices" ) // handleCopAnalyzeRequest handles coprocessor analyze request. @@ -112,13 +112,13 @@ func handleAnalyzeIndexReq(dbReader *dbreader.DBReader, rans []kv.KeyRange, anal if processor.topNCurValuePair.Count != 0 { processor.topNValuePairs = append(processor.topNValuePairs, processor.topNCurValuePair) } - sort.Slice(processor.topNValuePairs, func(i, j int) bool { - if processor.topNValuePairs[i].Count > processor.topNValuePairs[j].Count { + slices.SortFunc(processor.topNValuePairs, func(i, j statistics.TopNMeta) bool { + if i.Count > j.Count { return true - } else if processor.topNValuePairs[i].Count < processor.topNValuePairs[j].Count { + } else if i.Count < j.Count { return false } - return bytes.Compare(processor.topNValuePairs[i].Encoded, processor.topNValuePairs[j].Encoded) < 0 + return bytes.Compare(i.Encoded, j.Encoded) < 0 }) if len(processor.topNValuePairs) > int(processor.topNCount) { processor.topNValuePairs = processor.topNValuePairs[:processor.topNCount] @@ -564,13 +564,13 @@ func handleAnalyzeMixedReq(dbReader *dbreader.DBReader, rans []kv.KeyRange, anal if e.topNCurValuePair.Count != 0 { e.topNValuePairs = append(e.topNValuePairs, e.topNCurValuePair) } - sort.Slice(e.topNValuePairs, func(i, j int) bool { - if e.topNValuePairs[i].Count > e.topNValuePairs[j].Count { + slices.SortFunc(e.topNValuePairs, func(i, j statistics.TopNMeta) bool { + if i.Count > j.Count { return true - } else if e.topNValuePairs[i].Count < e.topNValuePairs[j].Count { + } else if i.Count < j.Count { return false } - return bytes.Compare(e.topNValuePairs[i].Encoded, e.topNValuePairs[j].Encoded) < 0 + return bytes.Compare(i.Encoded, j.Encoded) < 0 }) if len(e.topNValuePairs) > int(e.topNCount) { e.topNValuePairs = e.topNValuePairs[:e.topNCount] diff --git a/store/mockstore/unistore/cophandler/cop_handler.go b/store/mockstore/unistore/cophandler/cop_handler.go index 3351f01f71888..75fa686ff8fca 100644 --- a/store/mockstore/unistore/cophandler/cop_handler.go +++ b/store/mockstore/unistore/cophandler/cop_handler.go @@ -501,7 +501,7 @@ func genRespWithMPPExec(chunks []tipb.Chunk, lastRange *coprocessor.KeyRange, co } } resp.ExecDetails = &kvrpcpb.ExecDetails{ - TimeDetail: &kvrpcpb.TimeDetail{ProcessWallTimeMs: int64(dur / time.Millisecond)}, + TimeDetail: &kvrpcpb.TimeDetail{ProcessWallTimeMs: uint64(dur / time.Millisecond)}, } resp.ExecDetailsV2 = &kvrpcpb.ExecDetailsV2{ TimeDetail: resp.ExecDetails.TimeDetail, diff --git a/store/mockstore/unistore/lockstore/load_dump.go b/store/mockstore/unistore/lockstore/load_dump.go index f0192331ecd48..4f8e9acf46dff 100644 --- a/store/mockstore/unistore/lockstore/load_dump.go +++ b/store/mockstore/unistore/lockstore/load_dump.go @@ -27,6 +27,7 @@ import ( // LoadFromFile load a meta from a file. func (ls *MemStore) LoadFromFile(fileName string) (meta []byte, err error) { + //nolint: gosec f, err := os.Open(fileName) if err != nil { if os.IsNotExist(err) { diff --git a/store/mockstore/unistore/tikv/deadlock.go b/store/mockstore/unistore/tikv/deadlock.go index 90f0c5b226a56..7eeb1fb2c5b64 100644 --- a/store/mockstore/unistore/tikv/deadlock.go +++ b/store/mockstore/unistore/tikv/deadlock.go @@ -20,14 +20,13 @@ import ( "sync/atomic" "time" - "go.uber.org/zap" - "google.golang.org/grpc" - deadlockPb "github.com/pingcap/kvproto/pkg/deadlock" "github.com/pingcap/log" "github.com/pingcap/tidb/store/mockstore/unistore/pd" "github.com/pingcap/tidb/store/mockstore/unistore/tikv/kverrors" "github.com/pingcap/tidb/store/mockstore/unistore/util/lockwaiter" + "go.uber.org/zap" + "google.golang.org/grpc" ) // Follower will send detection rpc to Leader diff --git a/store/mockstore/unistore/tikv/mock_region.go b/store/mockstore/unistore/tikv/mock_region.go index 1db8df4506691..e5609f5deabd3 100644 --- a/store/mockstore/unistore/tikv/mock_region.go +++ b/store/mockstore/unistore/tikv/mock_region.go @@ -17,7 +17,6 @@ package tikv import ( "bytes" "context" - "sort" "sync" "sync/atomic" "time" @@ -38,6 +37,7 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" pdclient "github.com/tikv/pd/client" + "golang.org/x/exp/slices" ) // MPPTaskHandlerMap is a map of *cophandler.MPPTaskHandler. @@ -400,8 +400,8 @@ func (rm *MockRegionManager) SplitRegion(req *kvrpcpb.SplitRegionRequest) *kvrpc for _, rawKey := range req.SplitKeys { splitKeys = append(splitKeys, codec.EncodeBytes(nil, rawKey)) } - sort.Slice(splitKeys, func(i, j int) bool { - return bytes.Compare(splitKeys[i], splitKeys[j]) < 0 + slices.SortFunc(splitKeys, func(i, j []byte) bool { + return bytes.Compare(i, j) < 0 }) newRegions, err := rm.splitKeys(splitKeys) diff --git a/store/mockstore/unistore/tikv/mvcc.go b/store/mockstore/unistore/tikv/mvcc.go index 8d8c7ed3363f7..927d81e379f51 100644 --- a/store/mockstore/unistore/tikv/mvcc.go +++ b/store/mockstore/unistore/tikv/mvcc.go @@ -44,6 +44,7 @@ import ( "github.com/pingcap/tidb/util/rowcodec" "github.com/tikv/client-go/v2/oracle" "go.uber.org/zap" + "golang.org/x/exp/slices" ) // MVCCStore is a wrapper of badger.DB to provide MVCC functions. @@ -174,13 +175,13 @@ func (store *MVCCStore) getDBItems(reqCtx *requestCtx, mutations []*kvrpcpb.Muta } func sortMutations(mutations []*kvrpcpb.Mutation) []*kvrpcpb.Mutation { - fn := func(i, j int) bool { - return bytes.Compare(mutations[i].Key, mutations[j].Key) < 0 + fn := func(i, j *kvrpcpb.Mutation) bool { + return bytes.Compare(i.Key, j.Key) < 0 } - if sort.SliceIsSorted(mutations, fn) { + if slices.IsSortedFunc(mutations, fn) { return mutations } - sort.Slice(mutations, fn) + slices.SortFunc(mutations, fn) return mutations } @@ -214,13 +215,13 @@ func (sorter pessimisticPrewriteSorter) Swap(i, j int) { } func sortKeys(keys [][]byte) [][]byte { - less := func(i, j int) bool { - return bytes.Compare(keys[i], keys[j]) < 0 + less := func(i, j []byte) bool { + return bytes.Compare(i, j) < 0 } - if sort.SliceIsSorted(keys, less) { + if slices.IsSortedFunc(keys, less) { return keys } - sort.Slice(keys, less) + slices.SortFunc(keys, less) return keys } @@ -1412,8 +1413,8 @@ func (store *MVCCStore) MvccGetByKey(reqCtx *requestCtx, key []byte) (*kvrpcpb.M if err != nil { return nil, err } - sort.Slice(mvccInfo.Writes, func(i, j int) bool { - return mvccInfo.Writes[i].CommitTs > mvccInfo.Writes[j].CommitTs + slices.SortFunc(mvccInfo.Writes, func(i, j *kvrpcpb.MvccWrite) bool { + return i.CommitTs > j.CommitTs }) mvccInfo.Values = make([]*kvrpcpb.MvccValue, len(mvccInfo.Writes)) for i := 0; i < len(mvccInfo.Writes); i++ { diff --git a/store/mockstore/unistore/tikv/mvcc/mvcc.go b/store/mockstore/unistore/tikv/mvcc/mvcc.go index a8ae16a9321d6..b7185766bfeeb 100644 --- a/store/mockstore/unistore/tikv/mvcc/mvcc.go +++ b/store/mockstore/unistore/tikv/mvcc/mvcc.go @@ -19,7 +19,6 @@ import ( "unsafe" "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/tidb/util/codec" ) diff --git a/store/mockstore/unistore/util/lockwaiter/BUILD.bazel b/store/mockstore/unistore/util/lockwaiter/BUILD.bazel index dd2dbcfbb8038..effbb268c1fc3 100644 --- a/store/mockstore/unistore/util/lockwaiter/BUILD.bazel +++ b/store/mockstore/unistore/util/lockwaiter/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "//store/mockstore/unistore/config", "@com_github_pingcap_kvproto//pkg/deadlock", "@com_github_pingcap_log//:log", + "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", ], ) diff --git a/store/mockstore/unistore/util/lockwaiter/lockwaiter.go b/store/mockstore/unistore/util/lockwaiter/lockwaiter.go index de8dda34e4068..b38f0335ed3da 100644 --- a/store/mockstore/unistore/util/lockwaiter/lockwaiter.go +++ b/store/mockstore/unistore/util/lockwaiter/lockwaiter.go @@ -15,7 +15,6 @@ package lockwaiter import ( - "sort" "sync" "time" @@ -23,6 +22,7 @@ import ( "github.com/pingcap/log" "github.com/pingcap/tidb/store/mockstore/unistore/config" "go.uber.org/zap" + "golang.org/x/exp/slices" ) // LockNoWait is used for pessimistic lock wait time @@ -51,8 +51,8 @@ type queue struct { func (q *queue) getOldestWaiter() (*Waiter, []*Waiter) { // make the waiters in start ts order - sort.Slice(q.waiters, func(i, j int) bool { - return q.waiters[i].startTS < q.waiters[j].startTS + slices.SortFunc(q.waiters, func(i, j *Waiter) bool { + return i.startTS < j.startTS }) oldestWaiter := q.waiters[0] remainWaiter := q.waiters[1:] diff --git a/store/pdtypes/BUILD.bazel b/store/pdtypes/BUILD.bazel index fe59445be494a..dd35cae23c4d9 100644 --- a/store/pdtypes/BUILD.bazel +++ b/store/pdtypes/BUILD.bazel @@ -17,5 +17,6 @@ go_library( "@com_github_pingcap_errors//:errors", "@com_github_pingcap_kvproto//pkg/metapb", "@com_github_pingcap_kvproto//pkg/pdpb", + "@org_golang_x_exp//slices", ], ) diff --git a/store/pdtypes/region_tree.go b/store/pdtypes/region_tree.go index efff8599030b1..cc99e981e0097 100644 --- a/store/pdtypes/region_tree.go +++ b/store/pdtypes/region_tree.go @@ -16,9 +16,9 @@ package pdtypes import ( "bytes" - "sort" "github.com/pingcap/kvproto/pkg/metapb" + "golang.org/x/exp/slices" ) // Region is a mock of PD's core.RegionInfo. For testing purpose. @@ -52,8 +52,8 @@ func (t *RegionTree) SetRegion(region *Region) { // ScanRange scans regions intersecting [start key, end key), returns at most // `limit` regions. limit <= 0 means no limit. func (t *RegionTree) ScanRange(startKey, endKey []byte, limit int) []*Region { - sort.Slice(t.Regions, func(i, j int) bool { - return bytes.Compare(t.Regions[i].Meta.StartKey, t.Regions[j].Meta.StartKey) < 0 + slices.SortFunc(t.Regions, func(i, j *Region) bool { + return bytes.Compare(i.Meta.StartKey, j.Meta.StartKey) < 0 }) pivot := NewRegionInfo(&metapb.Region{StartKey: startKey, EndKey: endKey}, nil) var res []*Region diff --git a/store/store_test.go b/store/store_test.go index 698f3d4714ce3..081462bb9bc85 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -653,7 +653,8 @@ func TestIsolationInc(t *testing.T) { defer wg.Done() for j := 0; j < 100; j++ { var id int64 - err := kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err := kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { var err1 error id, err1 = kv.IncInt64(txn, []byte("key"), 1) return err1 @@ -698,12 +699,13 @@ func TestIsolationMultiInc(t *testing.T) { var wg sync.WaitGroup + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) wg.Add(threadCnt) for i := 0; i < threadCnt; i++ { go func() { defer wg.Done() for j := 0; j < incCnt; j++ { - err := kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + err := kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { for _, key := range keys { _, err1 := kv.IncInt64(txn, key, 1) if err1 != nil { @@ -720,7 +722,7 @@ func TestIsolationMultiInc(t *testing.T) { wg.Wait() - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { for _, key := range keys { id, err1 := kv.GetInt64(context.TODO(), txn, key) if err1 != nil { diff --git a/structure/structure_test.go b/structure/structure_test.go index 9da07b2dba604..63300f004c533 100644 --- a/structure/structure_test.go +++ b/structure/structure_test.go @@ -316,7 +316,8 @@ func TestHash(t *testing.T) { err = txn.Commit(context.Background()) require.NoError(t, err) - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err = kv.RunInNewTxn(ctx, store, false, func(ctx context.Context, txn kv.Transaction) error { newTxn := structure.NewStructure(txn, txn, []byte{0x00}) err = newTxn.Set(key, []byte("abc")) require.NoError(t, err) diff --git a/table/BUILD.bazel b/table/BUILD.bazel index e5ad2bb8c1b12..de9150fad4fdf 100644 --- a/table/BUILD.bazel +++ b/table/BUILD.bazel @@ -37,6 +37,7 @@ go_library( go_test( name = "table_test", + timeout = "short", srcs = [ "column_test.go", "main_test.go", diff --git a/table/tables/BUILD.bazel b/table/tables/BUILD.bazel index 87a7fe63ad1b2..a6e9bec521355 100644 --- a/table/tables/BUILD.bazel +++ b/table/tables/BUILD.bazel @@ -61,6 +61,7 @@ go_library( go_test( name = "tables_test", + timeout = "short", srcs = [ "cache_test.go", "index_test.go", diff --git a/table/tables/cache.go b/table/tables/cache.go index fc9f3f52ce16c..4dc2bfc5c6209 100644 --- a/table/tables/cache.go +++ b/table/tables/cache.go @@ -146,7 +146,8 @@ func (c *cachedTable) loadDataFromOriginalTable(store kv.Storage) (kv.MemBuffer, } var startTS uint64 totalSize := int64(0) - err = kv.RunInNewTxn(context.Background(), store, true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnCacheTable) + err = kv.RunInNewTxn(ctx, store, true, func(ctx context.Context, txn kv.Transaction) error { prefix := tablecodec.GenTablePrefix(c.tableID) if err != nil { return errors.Trace(err) diff --git a/table/tables/state_remote.go b/table/tables/state_remote.go index 4a8d0b39b632c..c5724a2e4e732 100644 --- a/table/tables/state_remote.go +++ b/table/tables/state_remote.go @@ -20,6 +20,7 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/sqlexec" @@ -385,6 +386,7 @@ func (h *stateRemoteHandle) rollbackTxn(ctx context.Context) error { } func (h *stateRemoteHandle) runInTxn(ctx context.Context, pessimistic bool, fn func(ctx context.Context, txnTS uint64) error) error { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) err := h.beginTxn(ctx, pessimistic) if err != nil { return errors.Trace(err) @@ -415,6 +417,7 @@ func (h *stateRemoteHandle) runInTxn(ctx context.Context, pessimistic bool, fn f } func (h *stateRemoteHandle) loadRow(ctx context.Context, tid int64, forUpdate bool) (CachedTableLockType, uint64, uint64, error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) var chunkRows []chunk.Row var err error if forUpdate { @@ -450,6 +453,7 @@ func (h *stateRemoteHandle) updateRow(ctx context.Context, tid int64, lockType s func (h *stateRemoteHandle) execSQL(ctx context.Context, sql string, args ...interface{}) ([]chunk.Row, error) { rs, err := h.exec.ExecuteInternal(ctx, sql, args...) if rs != nil { + //nolint: errcheck defer rs.Close() } if err != nil { diff --git a/table/tables/state_remote_test.go b/table/tables/state_remote_test.go index 5d75c5e47d129..140c2fc1e3da9 100644 --- a/table/tables/state_remote_test.go +++ b/table/tables/state_remote_test.go @@ -40,7 +40,7 @@ func TestStateRemote(t *testing.T) { tk.MustExec("use test") se := tk.Session() - ctx := context.Background() + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) h := tables.NewStateRemote(se) // Check the initial value. diff --git a/table/tables/tables.go b/table/tables/tables.go index aab32e1f18d8f..9b35a1af8cc3b 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -1246,18 +1246,19 @@ func (t *TableCommon) addDeleteBinlog(ctx sessionctx.Context, r []types.Datum, c return nil } -func writeSequenceUpdateValueBinlog(ctx sessionctx.Context, db, sequence string, end int64) error { +func writeSequenceUpdateValueBinlog(sctx sessionctx.Context, db, sequence string, end int64) error { // 1: when sequenceCommon update the local cache passively. // 2: When sequenceCommon setval to the allocator actively. // Both of this two case means the upper bound the sequence has changed in meta, which need to write the binlog // to the downstream. // Sequence sends `select setval(seq, num)` sql string to downstream via `setDDLBinlog`, which is mocked as a DDL binlog. - binlogCli := ctx.GetSessionVars().BinlogClient - sqlMode := ctx.GetSessionVars().SQLMode + binlogCli := sctx.GetSessionVars().BinlogClient + sqlMode := sctx.GetSessionVars().SQLMode sequenceFullName := stringutil.Escape(db, sqlMode) + "." + stringutil.Escape(sequence, sqlMode) sql := "select setval(" + sequenceFullName + ", " + strconv.FormatInt(end, 10) + ")" - err := kv.RunInNewTxn(context.Background(), ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta) + err := kv.RunInNewTxn(ctx, sctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) mockJobID, err := m.GenGlobalID() if err != nil { diff --git a/table/temptable/BUILD.bazel b/table/temptable/BUILD.bazel index 262c3d05977e0..efeab02191b93 100644 --- a/table/temptable/BUILD.bazel +++ b/table/temptable/BUILD.bazel @@ -52,6 +52,7 @@ go_test( "//util/mock", "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", + "@org_golang_x_exp//slices", "@org_uber_go_goleak//:goleak", ], ) diff --git a/table/temptable/ddl.go b/table/temptable/ddl.go index fddd0bbbeabf5..ccad2b7b0214c 100644 --- a/table/temptable/ddl.go +++ b/table/temptable/ddl.go @@ -161,7 +161,8 @@ func newTemporaryTableFromTableInfo(sctx sessionctx.Context, tbInfo *model.Table // Local temporary table uses a real table ID. // We could mock a table ID, but the mocked ID might be identical to an existing // real table, and then we'll get into trouble. - err := kv.RunInNewTxn(context.Background(), sctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnCacheTable) + err := kv.RunInNewTxn(ctx, sctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) tblID, err := m.GenGlobalID() if err != nil { diff --git a/table/temptable/main_test.go b/table/temptable/main_test.go index 96c09feaeaec9..7ee3919b08f60 100644 --- a/table/temptable/main_test.go +++ b/table/temptable/main_test.go @@ -18,7 +18,6 @@ import ( "bytes" "context" "fmt" - "sort" "testing" "github.com/pingcap/tidb/infoschema" @@ -31,6 +30,7 @@ import ( "github.com/pingcap/tidb/util/mock" "github.com/stretchr/testify/require" "go.uber.org/goleak" + "golang.org/x/exp/slices" ) func TestMain(m *testing.M) { @@ -124,10 +124,10 @@ func newMockedRetriever(t *testing.T) *mockedRetriever { } func (r *mockedRetriever) SetData(data []*kv.Entry) *mockedRetriever { - lessFunc := func(i, j int) bool { return bytes.Compare(data[i].Key, data[j].Key) < 0 } - if !sort.SliceIsSorted(data, lessFunc) { + lessFunc := func(i, j *kv.Entry) bool { return bytes.Compare(i.Key, j.Key) < 0 } + if !slices.IsSortedFunc(data, lessFunc) { data = append([]*kv.Entry{}, data...) - sort.Slice(data, lessFunc) + slices.SortFunc(data, lessFunc) } r.data = data diff --git a/telemetry/BUILD.bazel b/telemetry/BUILD.bazel index 73ae1814d3308..66fc1c7a3631b 100644 --- a/telemetry/BUILD.bazel +++ b/telemetry/BUILD.bazel @@ -21,6 +21,7 @@ go_library( "//config", "//domain/infosync", "//infoschema", + "//kv", "//metrics", "//parser/model", "//parser/mysql", @@ -38,6 +39,7 @@ go_library( "@com_github_shirou_gopsutil_v3//host", "@com_github_tikv_client_go_v2//metrics", "@io_etcd_go_etcd_client_v3//:client", + "@org_golang_x_exp//slices", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", ], @@ -56,7 +58,9 @@ go_test( embed = [":telemetry"], deps = [ "//config", + "//kv", "//session", + "//sessionctx", "//sessionctx/variable", "//testkit", "//testkit/testsetup", diff --git a/telemetry/data.go b/telemetry/data.go index ddf574e3bf1ed..49bac14ce69f8 100644 --- a/telemetry/data.go +++ b/telemetry/data.go @@ -15,8 +15,10 @@ package telemetry import ( + "context" "time" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" ) @@ -31,21 +33,22 @@ type telemetryData struct { SlowQueryStats *slowQueryStats `json:"slowQueryStats"` } -func generateTelemetryData(ctx sessionctx.Context, trackingID string) telemetryData { +func generateTelemetryData(sctx sessionctx.Context, trackingID string) telemetryData { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnTelemetry) r := telemetryData{ ReportTimestamp: time.Now().Unix(), TrackingID: trackingID, } - if h, err := getClusterHardware(ctx); err == nil { + if h, err := getClusterHardware(ctx, sctx); err == nil { r.Hardware = h } - if i, err := getClusterInfo(ctx); err == nil { + if i, err := getClusterInfo(ctx, sctx); err == nil { r.Instances = i } - if f, err := getFeatureUsage(ctx); err == nil { + if f, err := getFeatureUsage(ctx, sctx); err == nil { r.FeatureUsage = f } - if s, err := getSlowQueryStats(ctx); err == nil { + if s, err := getSlowQueryStats(ctx, sctx); err == nil { r.SlowQueryStats = s } @@ -57,6 +60,7 @@ func generateTelemetryData(ctx sessionctx.Context, trackingID string) telemetryD func postReportTelemetryData() { postReportTxnUsage() postReportCTEUsage() + postReportMultiSchemaChangeUsage() postReportSlowQueryStats() postReportNonTransactionalCounter() } diff --git a/telemetry/data_cluster_hardware.go b/telemetry/data_cluster_hardware.go index d357e9243fd0b..659fceb00eac3 100644 --- a/telemetry/data_cluster_hardware.go +++ b/telemetry/data_cluster_hardware.go @@ -17,13 +17,13 @@ package telemetry import ( "context" "regexp" - "sort" "strings" "github.com/iancoleman/strcase" "github.com/pingcap/errors" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/sqlexec" + "golang.org/x/exp/slices" ) var ( @@ -36,10 +36,10 @@ var ( ) func init() { - sort.Strings(sortedCPUAllowedFieldNames) - sort.Strings(sortedDiskAllowedFieldNames) - sort.Strings(sortedDiskAllowedPaths) - sort.Strings(sortedMemoryAllowedFieldNames) + slices.Sort(sortedCPUAllowedFieldNames) + slices.Sort(sortedDiskAllowedFieldNames) + slices.Sort(sortedDiskAllowedPaths) + slices.Sort(sortedMemoryAllowedFieldNames) } type clusterHardwareItem struct { @@ -67,9 +67,9 @@ func normalizeFieldName(name string) string { return strcase.ToLowerCamel(name) } -func getClusterHardware(ctx sessionctx.Context) ([]*clusterHardwareItem, error) { - exec := ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, `SELECT TYPE, INSTANCE, DEVICE_TYPE, DEVICE_NAME, NAME, VALUE FROM information_schema.cluster_hardware`) +func getClusterHardware(ctx context.Context, sctx sessionctx.Context) ([]*clusterHardwareItem, error) { + exec := sctx.(sqlexec.RestrictedSQLExecutor) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT TYPE, INSTANCE, DEVICE_TYPE, DEVICE_NAME, NAME, VALUE FROM information_schema.cluster_hardware`) if err != nil { return nil, errors.Trace(err) } diff --git a/telemetry/data_cluster_info.go b/telemetry/data_cluster_info.go index 40f87bccdfd3d..7cda111e785ca 100644 --- a/telemetry/data_cluster_info.go +++ b/telemetry/data_cluster_info.go @@ -34,10 +34,10 @@ type clusterInfoItem struct { UpTime string `json:"upTime,omitempty"` } -func getClusterInfo(ctx sessionctx.Context) ([]*clusterInfoItem, error) { +func getClusterInfo(ctx context.Context, sctx sessionctx.Context) ([]*clusterInfoItem, error) { // Explicitly list all field names instead of using `*` to avoid potential leaking sensitive info when adding new fields in future. - exec := ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, `SELECT TYPE, INSTANCE, STATUS_ADDRESS, VERSION, GIT_HASH, START_TIME, UPTIME FROM information_schema.cluster_info`) + exec := sctx.(sqlexec.RestrictedSQLExecutor) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT TYPE, INSTANCE, STATUS_ADDRESS, VERSION, GIT_HASH, START_TIME, UPTIME FROM information_schema.cluster_info`) if err != nil { return nil, errors.Trace(err) } diff --git a/telemetry/data_feature_usage.go b/telemetry/data_feature_usage.go index 622f25c4b2430..bc73153a84e15 100644 --- a/telemetry/data_feature_usage.go +++ b/telemetry/data_feature_usage.go @@ -37,15 +37,16 @@ type featureUsage struct { Txn *TxnUsage `json:"txn"` // cluster index usage information // key is the first 6 characters of sha2(TABLE_NAME, 256) - ClusterIndex *ClusterIndexUsage `json:"clusterIndex"` - NewClusterIndex *NewClusterIndexUsage `json:"newClusterIndex"` - TemporaryTable bool `json:"temporaryTable"` - CTE *m.CTEUsageCounter `json:"cte"` - CachedTable bool `json:"cachedTable"` - AutoCapture bool `json:"autoCapture"` - PlacementPolicyUsage *placementPolicyUsage `json:"placementPolicy"` - NonTransactionalUsage *m.NonTransactionalStmtCounter `json:"nonTransactional"` - GlobalKill bool `json:"globalKill"` + ClusterIndex *ClusterIndexUsage `json:"clusterIndex"` + NewClusterIndex *NewClusterIndexUsage `json:"newClusterIndex"` + TemporaryTable bool `json:"temporaryTable"` + CTE *m.CTEUsageCounter `json:"cte"` + CachedTable bool `json:"cachedTable"` + AutoCapture bool `json:"autoCapture"` + PlacementPolicyUsage *placementPolicyUsage `json:"placementPolicy"` + NonTransactionalUsage *m.NonTransactionalStmtCounter `json:"nonTransactional"` + GlobalKill bool `json:"globalKill"` + MultiSchemaChange *m.MultiSchemaChangeUsageCounter `json:"multiSchemaChange"` } type placementPolicyUsage struct { @@ -56,23 +57,25 @@ type placementPolicyUsage struct { NumPartitionWithExplicitPolicies uint64 `json:"numPartitionWithExplicitPolicies"` } -func getFeatureUsage(ctx sessionctx.Context) (*featureUsage, error) { +func getFeatureUsage(ctx context.Context, sctx sessionctx.Context) (*featureUsage, error) { var usage featureUsage var err error - usage.NewClusterIndex, usage.ClusterIndex, err = getClusterIndexUsageInfo(ctx) + usage.NewClusterIndex, usage.ClusterIndex, err = getClusterIndexUsageInfo(ctx, sctx) if err != nil { logutil.BgLogger().Info(err.Error()) return nil, err } // transaction related feature - usage.Txn = getTxnUsageInfo(ctx) + usage.Txn = getTxnUsageInfo(sctx) usage.CTE = getCTEUsageInfo() - usage.AutoCapture = getAutoCaptureUsageInfo(ctx) + usage.MultiSchemaChange = getMultiSchemaChangeUsageInfo() - collectFeatureUsageFromInfoschema(ctx, &usage) + usage.AutoCapture = getAutoCaptureUsageInfo(sctx) + + collectFeatureUsageFromInfoschema(sctx, &usage) usage.NonTransactionalUsage = getNonTransactionalUsage() @@ -142,12 +145,12 @@ type NewClusterIndexUsage struct { } // getClusterIndexUsageInfo gets the ClusterIndex usage information. It's exported for future test. -func getClusterIndexUsageInfo(ctx sessionctx.Context) (ncu *NewClusterIndexUsage, cu *ClusterIndexUsage, err error) { +func getClusterIndexUsageInfo(ctx context.Context, sctx sessionctx.Context) (ncu *NewClusterIndexUsage, cu *ClusterIndexUsage, err error) { var newUsage NewClusterIndexUsage - exec := ctx.(sqlexec.RestrictedSQLExecutor) + exec := sctx.(sqlexec.RestrictedSQLExecutor) // query INFORMATION_SCHEMA.tables to get the latest table information about ClusterIndex - rows, _, err := exec.ExecRestrictedSQL(context.TODO(), nil, ` + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, ` SELECT TIDB_PK_TYPE FROM information_schema.tables WHERE table_schema not in ('INFORMATION_SCHEMA', 'METRICS_SCHEMA', 'PERFORMANCE_SCHEMA', 'mysql')`) @@ -168,7 +171,7 @@ func getClusterIndexUsageInfo(ctx sessionctx.Context) (ncu *NewClusterIndexUsage } }() - err = ctx.RefreshTxnCtx(context.TODO()) + err = sctx.RefreshTxnCtx(ctx) if err != nil { return nil, nil, err } @@ -201,6 +204,7 @@ type TxnUsage struct { var initialTxnCommitCounter metrics.TxnCommitCounter var initialCTECounter m.CTEUsageCounter var initialNonTransactionalCounter m.NonTransactionalStmtCounter +var initialMultiSchemaChangeCounter m.MultiSchemaChangeUsageCounter // getTxnUsageInfo gets the usage info of transaction related features. It's exported for tests. func getTxnUsageInfo(ctx sessionctx.Context) *TxnUsage { @@ -233,7 +237,6 @@ func postReportTxnUsage() { initialTxnCommitCounter = metrics.GetTxnCommitCounter() } -// ResetCTEUsage resets CTE usages. func postReportCTEUsage() { initialCTECounter = m.GetCTECounter() } @@ -245,6 +248,16 @@ func getCTEUsageInfo() *m.CTEUsageCounter { return &diff } +func postReportMultiSchemaChangeUsage() { + initialMultiSchemaChangeCounter = m.GetMultiSchemaCounter() +} + +func getMultiSchemaChangeUsageInfo() *m.MultiSchemaChangeUsageCounter { + curr := m.GetMultiSchemaCounter() + diff := curr.Sub(initialMultiSchemaChangeCounter) + return &diff +} + // getAutoCaptureUsageInfo gets the 'Auto Capture' usage func getAutoCaptureUsageInfo(ctx sessionctx.Context) bool { if val, err := variable.GetGlobalSystemVar(ctx.GetSessionVars(), variable.TiDBCapturePlanBaseline); err == nil { diff --git a/telemetry/data_feature_usage_test.go b/telemetry/data_feature_usage_test.go index 58198b5083d84..d89928bbcc19d 100644 --- a/telemetry/data_feature_usage_test.go +++ b/telemetry/data_feature_usage_test.go @@ -127,6 +127,40 @@ func TestCachedTable(t *testing.T) { require.False(t, usage.CachedTable) } +func TestMultiSchemaChange(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + usage, err := telemetry.GetFeatureUsage(tk.Session()) + require.NoError(t, err) + require.Equal(t, int64(0), usage.MultiSchemaChange.MultiSchemaChangeUsed) + + tk.MustExec("drop table if exists tele_multi_t") + tk.MustExec("create table tele_multi_t(id int)") + tk.MustExec("alter table tele_multi_t add column b int") + usage, err = telemetry.GetFeatureUsage(tk.Session()) + require.NoError(t, err) + require.Equal(t, int64(0), usage.MultiSchemaChange.MultiSchemaChangeUsed) + + tk.MustExec("alter table tele_multi_t add column c int, drop column b") + usage, err = telemetry.GetFeatureUsage(tk.Session()) + require.NoError(t, err) + require.Equal(t, int64(1), usage.MultiSchemaChange.MultiSchemaChangeUsed) + + tk.MustExec("alter table tele_multi_t add column b int, drop column c") + usage, err = telemetry.GetFeatureUsage(tk.Session()) + require.NoError(t, err) + require.Equal(t, int64(2), usage.MultiSchemaChange.MultiSchemaChangeUsed) + + tk.MustExec("alter table tele_multi_t drop column b") + usage, err = telemetry.GetFeatureUsage(tk.Session()) + require.NoError(t, err) + require.Equal(t, int64(2), usage.MultiSchemaChange.MultiSchemaChangeUsed) +} + func TestPlacementPolicies(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() diff --git a/telemetry/data_slow_query.go b/telemetry/data_slow_query.go index e32940e09be6a..7b1b9ef00d440 100644 --- a/telemetry/data_slow_query.go +++ b/telemetry/data_slow_query.go @@ -62,8 +62,8 @@ var ( slowQueryLock sync.Mutex ) -func getSlowQueryStats(ctx sessionctx.Context) (*slowQueryStats, error) { - slowQueryBucket, err := getSlowQueryBucket(ctx) +func getSlowQueryStats(ctx context.Context, sctx sessionctx.Context) (*slowQueryStats, error) { + slowQueryBucket, err := getSlowQueryBucket(sctx) if err != nil { logutil.BgLogger().Info(err.Error()) return nil, err @@ -73,9 +73,9 @@ func getSlowQueryStats(ctx sessionctx.Context) (*slowQueryStats, error) { } // getSlowQueryBucket generates the delta SlowQueryBucket to report -func getSlowQueryBucket(ctx sessionctx.Context) (*SlowQueryBucket, error) { +func getSlowQueryBucket(sctx sessionctx.Context) (*SlowQueryBucket, error) { // update currentSQBInfo first, then gen delta - if err := updateCurrentSQB(ctx); err != nil { + if err := updateCurrentSQB(sctx); err != nil { return nil, err } delta := calculateDeltaSQB() @@ -83,7 +83,7 @@ func getSlowQueryBucket(ctx sessionctx.Context) (*SlowQueryBucket, error) { } // updateCurrentSQB records current slow query buckets -func updateCurrentSQB(ctx sessionctx.Context) (err error) { +func updateCurrentSQB(sctx sessionctx.Context) (err error) { defer func() { if r := recover(); r != nil { err = pingcapErrors.Errorf(fmt.Sprintln(r)) diff --git a/telemetry/main_test.go b/telemetry/main_test.go index c3d817ebeabee..cee65c94b707e 100644 --- a/telemetry/main_test.go +++ b/telemetry/main_test.go @@ -15,17 +15,24 @@ package telemetry import ( + "context" "testing" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/testkit/testsetup" "go.uber.org/goleak" ) var ( - GetFeatureUsage = getFeatureUsage GetTxnUsageInfo = getTxnUsageInfo ) +func GetFeatureUsage(sctx sessionctx.Context) (*featureUsage, error) { + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnTelemetry) + return getFeatureUsage(ctx, sctx) +} + func TestMain(m *testing.M) { testsetup.SetupForCommonTest() diff --git a/telemetry/telemetry.go b/telemetry/telemetry.go index 3f383b3d426e8..645aad0e90c08 100644 --- a/telemetry/telemetry.go +++ b/telemetry/telemetry.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/logutil" @@ -122,7 +123,8 @@ func reportUsageData(ctx sessionctx.Context, etcdClient *clientv3.Client) (bool, // TODO: We should use the context from domain, so that when request is blocked for a long time it will not // affect TiDB shutdown. - reqCtx, cancel := context.WithTimeout(context.Background(), uploadTimeout) + reqCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnTelemetry) + reqCtx, cancel := context.WithTimeout(reqCtx, uploadTimeout) defer cancel() req, err := http.NewRequestWithContext(reqCtx, "POST", apiEndpoint, bytes.NewReader(rawJSON)) diff --git a/testkit/BUILD.bazel b/testkit/BUILD.bazel index 17034f2e778f5..a75834b392644 100644 --- a/testkit/BUILD.bazel +++ b/testkit/BUILD.bazel @@ -21,6 +21,7 @@ go_library( "//session", "//session/txninfo", "//sessionctx/variable", + "//store/driver", "//store/mockstore", "//types", "//util", @@ -33,6 +34,7 @@ go_library( "@com_github_tikv_client_go_v2//oracle", "@com_github_tikv_client_go_v2//tikv", "@com_github_tikv_client_go_v2//tikvrpc", + "@org_golang_x_exp//slices", "@org_uber_go_atomic//:atomic", ], ) diff --git a/testkit/mockstore.go b/testkit/mockstore.go index 9ab3493c3a70c..98acf94b98c58 100644 --- a/testkit/mockstore.go +++ b/testkit/mockstore.go @@ -17,20 +17,42 @@ package testkit import ( + "flag" "testing" "time" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/driver" "github.com/pingcap/tidb/store/mockstore" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/tikv" ) +// WithTiKV flag is only used for debugging locally with real tikv cluster. +var WithTiKV = flag.String("with-tikv", "", "address of tikv cluster, if set, running test with real tikv cluster") + // CreateMockStore return a new mock kv.Storage. func CreateMockStore(t testing.TB, opts ...mockstore.MockTiKVStoreOption) (store kv.Storage, clean func()) { + if *WithTiKV != "" { + var d driver.TiKVDriver + var err error + store, err = d.Open("tikv://" + *WithTiKV) + require.NoError(t, err) + + var dom *domain.Domain + dom, err = session.BootstrapSession(store) + clean = func() { + dom.Close() + err := store.Close() + require.NoError(t, err) + } + require.NoError(t, err) + return + } + store, _, clean = CreateMockStoreAndDomain(t, opts...) return } diff --git a/testkit/result.go b/testkit/result.go index ce8d011a45b03..0f7ad0ce53cbc 100644 --- a/testkit/result.go +++ b/testkit/result.go @@ -19,11 +19,11 @@ package testkit import ( "bytes" "fmt" - "sort" "strings" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" ) // Result is the result returned by MustQuery. @@ -66,9 +66,7 @@ func Rows(args ...string) [][]interface{} { // Sort sorts and return the result. func (res *Result) Sort() *Result { - sort.Slice(res.rows, func(i, j int) bool { - a := res.rows[i] - b := res.rows[j] + slices.SortFunc(res.rows, func(a, b []string) bool { for i := range a { if a[i] < b[i] { return true diff --git a/testkit/testdata/testdata.go b/testkit/testdata/testdata.go index 851a9924153b9..8bc7f101f5d6f 100644 --- a/testkit/testdata/testdata.go +++ b/testkit/testdata/testdata.go @@ -83,6 +83,7 @@ func loadTestSuiteData(dir, suiteName string) (res TestData, err error) { } func loadTestSuiteCases(filePath string) (res []testCases, err error) { + //nolint: gosec jsonFile, err := os.Open(filePath) if err != nil { return res, err diff --git a/testkit/testfork/BUILD.bazel b/testkit/testfork/BUILD.bazel index 743bd70da5b0d..89d1e33ac65a5 100644 --- a/testkit/testfork/BUILD.bazel +++ b/testkit/testfork/BUILD.bazel @@ -15,4 +15,5 @@ go_test( name = "testfork_test", srcs = ["fork_test.go"], embed = [":testfork"], + deps = ["@com_github_stretchr_testify//require"], ) diff --git a/tests/realtikvtest/sessiontest/BUILD.bazel b/tests/realtikvtest/sessiontest/BUILD.bazel index b0e5afb4dc0fd..14923fa0fa623 100644 --- a/tests/realtikvtest/sessiontest/BUILD.bazel +++ b/tests/realtikvtest/sessiontest/BUILD.bazel @@ -2,6 +2,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_test") go_test( name = "sessiontest_test", + timeout = "short", srcs = [ "main_test.go", "retry_test.go", diff --git a/tests/realtikvtest/sessiontest/session_test.go b/tests/realtikvtest/sessiontest/session_test.go index 9262ebe196498..33a36053dedb8 100644 --- a/tests/realtikvtest/sessiontest/session_test.go +++ b/tests/realtikvtest/sessiontest/session_test.go @@ -3728,3 +3728,29 @@ func TestBinaryReadOnly(t *testing.T) { require.Equal(t, 2, session.GetHistory(tk.Session()).Count()) tk.MustExec("commit") } + +func TestIndexMergeRuntimeStats(t *testing.T) { + store, clean := realtikvtest.CreateMockStoreAndSetup(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@tidb_enable_index_merge = 1") + tk.MustExec("create table t1(id int primary key, a int, b int, c int, d int)") + tk.MustExec("create index t1a on t1(a)") + tk.MustExec("create index t1b on t1(b)") + tk.MustExec("insert into t1 values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5)") + rows := tk.MustQuery("explain analyze select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4;").Rows() + require.Len(t, rows, 4) + explain := fmt.Sprintf("%v", rows[0]) + pattern := ".*time:.*loops:.*index_task:{fetch_handle:.*, merge:.*}.*table_task:{num.*concurrency.*fetch_row.*wait_time.*}.*" + require.Regexp(t, pattern, explain) + tableRangeExplain := fmt.Sprintf("%v", rows[1]) + indexExplain := fmt.Sprintf("%v", rows[2]) + tableExplain := fmt.Sprintf("%v", rows[3]) + require.Regexp(t, ".*time:.*loops:.*cop_task:.*", tableRangeExplain) + require.Regexp(t, ".*time:.*loops:.*cop_task:.*", indexExplain) + require.Regexp(t, ".*time:.*loops:.*cop_task:.*", tableExplain) + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustQuery("select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4 order by a").Check(testkit.Rows("1 1 1 1 1", "5 5 5 5 5")) +} diff --git a/tests/realtikvtest/sessiontest/temporary_table_test.go b/tests/realtikvtest/sessiontest/temporary_table_test.go index 6eb2ceddb5d3c..67f94d381fbf7 100644 --- a/tests/realtikvtest/sessiontest/temporary_table_test.go +++ b/tests/realtikvtest/sessiontest/temporary_table_test.go @@ -321,7 +321,7 @@ func TestTemporaryTableInterceptor(t *testing.T) { for _, initFunc := range initTxnFuncs { require.NoError(t, initFunc()) - require.NoError(t, sessiontxn.GetTxnManager(tk.Session()).OnStmtStart(context.TODO())) + require.NoError(t, sessiontxn.GetTxnManager(tk.Session()).OnStmtStart(context.TODO(), nil)) txn, err := tk.Session().Txn(true) require.NoError(t, err) @@ -337,7 +337,7 @@ func TestTemporaryTableInterceptor(t *testing.T) { } // Also check GetSnapshotWithTS - snap := tk.Session().GetSnapshotWithTS(0) + snap := sessiontxn.GetSnapshotWithTS(tk.Session(), 0) val, err := snap.Get(context.Background(), k) require.NoError(t, err) require.Equal(t, []byte("v1"), val) diff --git a/tests/realtikvtest/txntest/txn_state_test.go b/tests/realtikvtest/txntest/txn_state_test.go index f0e51027d6fce..46c55c552dc07 100644 --- a/tests/realtikvtest/txntest/txn_state_test.go +++ b/tests/realtikvtest/txntest/txn_state_test.go @@ -58,7 +58,7 @@ func TestBasicTxnState(t *testing.T) { info = tk.Session().TxnInfo() _, expectedDigest := parser.NormalizeDigest("select * from t for update;") require.Equal(t, expectedDigest.String(), info.CurrentSQLDigest) - require.Equal(t, txninfo.TxnLockWaiting, info.State) + require.Equal(t, txninfo.TxnLockAcquiring, info.State) require.True(t, info.BlockStartTime.Valid) require.Equal(t, startTS, info.StartTS) @@ -182,7 +182,7 @@ func TestBlocked(t *testing.T) { ch <- struct{}{} }() time.Sleep(100 * time.Millisecond) - require.Equal(t, txninfo.TxnLockWaiting, tk2.Session().TxnInfo().State) + require.Equal(t, txninfo.TxnLockAcquiring, tk2.Session().TxnInfo().State) require.NotNil(t, tk2.Session().TxnInfo().BlockStartTime) tk1.MustExec("commit;") <-ch @@ -360,7 +360,7 @@ func TestTxnInfoWithPSProtocol(t *testing.T) { info = tk.Session().TxnInfo() require.Greater(t, info.StartTS, uint64(0)) require.Equal(t, digest2.String(), info.CurrentSQLDigest) - require.Equal(t, txninfo.TxnLockWaiting, info.State) + require.Equal(t, txninfo.TxnLockAcquiring, info.State) require.True(t, info.BlockStartTime.Valid) _, beginDigest := parser.NormalizeDigest("begin pessimistic") require.Equal(t, []string{beginDigest.String(), digest1.String(), digest2.String()}, info.AllSQLDigests) diff --git a/tidb-binlog/pump_client/client_test.go b/tidb-binlog/pump_client/client_test.go index 866c91693ff27..ac287773072ed 100644 --- a/tidb-binlog/pump_client/client_test.go +++ b/tidb-binlog/pump_client/client_test.go @@ -24,8 +24,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/tidb-binlog/node" - binlog "github.com/pingcap/tipb/go-binlog" - pb "github.com/pingcap/tipb/go-binlog" + "github.com/pingcap/tipb/go-binlog" "github.com/stretchr/testify/require" "google.golang.org/grpc" ) @@ -36,7 +35,7 @@ var ( ) type testCase struct { - binlogs []*pb.Binlog + binlogs []*binlog.Binlog choosePumps []*PumpStatus setAvliable []bool setNodeID []string @@ -61,7 +60,7 @@ func testSelector(t *testing.T, strategy string) { pump.NodeID = fmt.Sprintf("pump%d", i) pump.State = node.Offline // set pump client to avoid create grpc client. - pump.Client = pb.NewPumpClient(nil) + pump.Client = binlog.NewPumpClient(nil) } for _, pump := range pumps { @@ -71,26 +70,26 @@ func testSelector(t *testing.T, strategy string) { tCase := &testCase{} - tCase.binlogs = []*pb.Binlog{ + tCase.binlogs = []*binlog.Binlog{ { - Tp: pb.BinlogType_Prewrite, + Tp: binlog.BinlogType_Prewrite, StartTs: 1, }, { - Tp: pb.BinlogType_Commit, + Tp: binlog.BinlogType_Commit, StartTs: 1, CommitTs: 2, }, { - Tp: pb.BinlogType_Prewrite, + Tp: binlog.BinlogType_Prewrite, StartTs: 3, }, { - Tp: pb.BinlogType_Commit, + Tp: binlog.BinlogType_Commit, StartTs: 3, CommitTs: 4, }, { - Tp: pb.BinlogType_Prewrite, + Tp: binlog.BinlogType_Prewrite, StartTs: 5, }, { - Tp: pb.BinlogType_Commit, + Tp: binlog.BinlogType_Commit, StartTs: 5, CommitTs: 6, }, @@ -111,12 +110,12 @@ func testSelector(t *testing.T, strategy string) { } for j := 0; j < 10; j++ { - prewriteBinlog := &pb.Binlog{ - Tp: pb.BinlogType_Prewrite, + prewriteBinlog := &binlog.Binlog{ + Tp: binlog.BinlogType_Prewrite, StartTs: int64(j), } - commitBinlog := &pb.Binlog{ - Tp: pb.BinlogType_Commit, + commitBinlog := &binlog.Binlog{ + Tp: binlog.BinlogType_Commit, StartTs: int64(j), } @@ -180,20 +179,20 @@ func TestWriteBinlog(t *testing.T) { clientCon, err := grpc.Dial(cfg.addr, opt, grpc.WithInsecure()) require.NoError(t, err) require.NotNil(t, clientCon) - pumpClient := mockPumpsClient(pb.NewPumpClient(clientCon), true) + pumpClient := mockPumpsClient(binlog.NewPumpClient(clientCon), true) // test binlog size bigger than grpc's MaxRecvMsgSize - blog := &pb.Binlog{ - Tp: pb.BinlogType_Prewrite, + blog := &binlog.Binlog{ + Tp: binlog.BinlogType_Prewrite, PrewriteValue: make([]byte, testMaxRecvMsgSize+1), } err = pumpClient.WriteBinlog(blog) require.Error(t, err) for i := 0; i < 10; i++ { - // test binlog size small than grpc's MaxRecvMsgSize - blog = &pb.Binlog{ - Tp: pb.BinlogType_Prewrite, + // test binlog size smaller than grpc's MaxRecvMsgSize + blog = &binlog.Binlog{ + Tp: binlog.BinlogType_Prewrite, PrewriteValue: make([]byte, 1), } err = pumpClient.WriteBinlog(blog) @@ -204,13 +203,13 @@ func TestWriteBinlog(t *testing.T) { require.Len(t, pumpClient.Pumps.UnAvaliablePumps, 1) // test write commit binlog, will not return error although write binlog failed. - preWriteBinlog := &pb.Binlog{ - Tp: pb.BinlogType_Prewrite, + preWriteBinlog := &binlog.Binlog{ + Tp: binlog.BinlogType_Prewrite, StartTs: 123, PrewriteValue: make([]byte, 1), } - commitBinlog := &pb.Binlog{ - Tp: pb.BinlogType_Commit, + commitBinlog := &binlog.Binlog{ + Tp: binlog.BinlogType_Commit, StartTs: 123, CommitTs: 123, PrewriteValue: make([]byte, 1), @@ -284,14 +283,14 @@ func createMockPumpServer(addr string, mode string, withError bool) (*mockPumpSe server: serv, withError: withError, } - pb.RegisterPumpServer(serv, pump) + binlog.RegisterPumpServer(serv, pump) go serv.Serve(l) return pump, nil } // mockPumpsClient creates a PumpsClient, used for test. -func mockPumpsClient(client pb.PumpClient, withBadPump bool) *PumpsClient { +func mockPumpsClient(client binlog.PumpClient, withBadPump bool) *PumpsClient { // add a available pump nodeID1 := "pump-1" pump1 := &PumpStatus{ diff --git a/tools/check/ut.go b/tools/check/ut.go index 84b00d34d9e60..45b969360d245 100644 --- a/tools/check/ut.go +++ b/tools/check/ut.go @@ -125,7 +125,7 @@ func cmdList(args ...string) bool { } exist, err := testBinaryExist(pkg) if err != nil { - fmt.Println("check test binary existance error", err) + fmt.Println("check test binary existence error", err) return false } if !exist { @@ -203,7 +203,7 @@ func cmdRun(args ...string) bool { for _, pkg := range pkgs { exist, err := testBinaryExist(pkg) if err != nil { - fmt.Println("check test binary existance error", err) + fmt.Println("check test binary existence error", err) return false } if !exist { @@ -229,7 +229,7 @@ func cmdRun(args ...string) bool { } exist, err := testBinaryExist(pkg) if err != nil { - fmt.Println("check test binary existance error", err) + fmt.Println("check test binary existence error", err) return false } @@ -254,7 +254,7 @@ func cmdRun(args ...string) bool { } exist, err := testBinaryExist(pkg) if err != nil { - fmt.Println("check test binary existance error", err) + fmt.Println("check test binary existence error", err) return false } if !exist { @@ -353,6 +353,7 @@ func parseCaseListFromFile(fileName string) (map[string]struct{}, error) { if err != nil { return nil, withTrace(err) } + //nolint: errcheck defer f.Close() ret := make(map[string]struct{}) @@ -482,6 +483,7 @@ func collectCoverProfileFile() { fmt.Println("create cover file error:", err) os.Exit(-1) } + //nolint: errcheck defer w.Close() w.WriteString("mode: set\n") @@ -519,6 +521,7 @@ func collectOneCoverProfileFile(result map[string]*cover.Profile, file os.DirEnt fmt.Println("open temp cover file error:", err) os.Exit(-1) } + //nolint: errcheck defer f.Close() profs, err := cover.ParseProfilesFromReader(f) @@ -812,14 +815,14 @@ func (n *numa) testCommand(pkg string, fn string) *exec.Cmd { // it takes a longer when race is enabled. so it is set more timeout value. args = append(args, []string{"-test.timeout", "30m"}...) } - // session.test -test.run TestClusteredPrefixColum - args = append(args, "-test.run", fn) + // session.test -test.run TestClusteredPrefixColum + args = append(args, "-test.run", "^"+fn+"$") return exec.Command(exe, args...) } func skipDIR(pkg string) bool { - skipDir := []string{"br", "cmd", "dumpling"} + skipDir := []string{"br", "cmd", "dumpling", "tests"} for _, ignore := range skipDir { if strings.HasPrefix(pkg, ignore) { return true diff --git a/tools/check/xprog.go b/tools/check/xprog.go index f6b1c6357b012..23b8ebffe141e 100644 --- a/tools/check/xprog.go +++ b/tools/check/xprog.go @@ -68,6 +68,7 @@ func getPackageInfo(dir string) string { if err != nil { os.Exit(-1) } + //nolint: errcheck defer f.Close() r := bufio.NewReader(f) diff --git a/types/json/BUILD.bazel b/types/json/BUILD.bazel index 0beccc50e4ce0..f125a1a35c0ad 100644 --- a/types/json/BUILD.bazel +++ b/types/json/BUILD.bazel @@ -15,8 +15,10 @@ go_library( "//parser/terror", "//util/dbterror", "//util/hack", + "//util/kvcache", "//util/stringutil", "@com_github_pingcap_errors//:errors", + "@org_golang_x_exp//slices", ], ) diff --git a/types/json/binary.go b/types/json/binary.go index dda980a63f8bd..f21c1cb70cbf7 100644 --- a/types/json/binary.go +++ b/types/json/binary.go @@ -21,7 +21,6 @@ import ( "fmt" "math" "reflect" - "sort" "strconv" "strings" "unicode/utf8" @@ -29,6 +28,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/util/hack" + "golang.org/x/exp/slices" ) /* @@ -647,8 +647,8 @@ func appendBinaryObject(buf []byte, x map[string]interface{}) ([]byte, error) { for key, val := range x { fields = append(fields, field{key: key, val: val}) } - sort.Slice(fields, func(i, j int) bool { - return fields[i].key < fields[j].key + slices.SortFunc(fields, func(i, j field) bool { + return i.key < j.key }) for i, field := range fields { keyEntryOff := keyEntryBegin + i*keyEntrySize diff --git a/types/json/binary_functions.go b/types/json/binary_functions.go index 926ba52b8cca0..dc24bdaed4312 100644 --- a/types/json/binary_functions.go +++ b/types/json/binary_functions.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/stringutil" + "golang.org/x/exp/slices" ) // Type returns type of BinaryJSON as string. @@ -858,8 +859,8 @@ func mergePatchBinary(target, patch *BinaryJSON) (result *BinaryJSON, err error) for key := range keyValMap { keys = append(keys, []byte(key)) } - sort.Slice(keys, func(i, j int) bool { - return bytes.Compare(keys[i], keys[j]) < 0 + slices.SortFunc(keys, func(i, j []byte) bool { + return bytes.Compare(i, j) < 0 }) length = len(keys) values := make([]BinaryJSON, 0, len(keys)) @@ -941,8 +942,8 @@ func mergeBinaryObject(objects []BinaryJSON) BinaryJSON { } } } - sort.Slice(keys, func(i, j int) bool { - return bytes.Compare(keys[i], keys[j]) < 0 + slices.SortFunc(keys, func(i, j []byte) bool { + return bytes.Compare(i, j) < 0 }) values := make([]BinaryJSON, len(keys)) for i, key := range keys { diff --git a/types/json/path_expr.go b/types/json/path_expr.go index 5d23c1a16ed3a..cee5f56c33c4b 100644 --- a/types/json/path_expr.go +++ b/types/json/path_expr.go @@ -15,11 +15,15 @@ package json import ( + "math" "regexp" "strconv" "strings" + "sync" "github.com/pingcap/errors" + "github.com/pingcap/tidb/util/hack" + "github.com/pingcap/tidb/util/kvcache" ) /* @@ -92,6 +96,20 @@ type PathExpression struct { flags pathExpressionFlag } +var peCache PathExpressionCache + +type pathExpressionKey string + +func (key pathExpressionKey) Hash() []byte { + return hack.Slice(string(key)) +} + +// PathExpressionCache is a cache for PathExpression. +type PathExpressionCache struct { + mu sync.Mutex + cache *kvcache.SimpleLRUCache +} + // popOneLeg returns a pathLeg, and a child PathExpression without that leg. func (pe PathExpression) popOneLeg() (pathLeg, PathExpression) { newPe := PathExpression{ @@ -150,6 +168,22 @@ func (pe PathExpression) ContainsAnyAsterisk() bool { // ParseJSONPathExpr parses a JSON path expression. Returns a PathExpression // object which can be used in JSON_EXTRACT, JSON_SET and so on. func ParseJSONPathExpr(pathExpr string) (pe PathExpression, err error) { + peCache.mu.Lock() + val, ok := peCache.cache.Get(pathExpressionKey(pathExpr)) + if ok { + peCache.mu.Unlock() + return val.(PathExpression), nil + } + peCache.mu.Unlock() + + defer func() { + if err == nil { + peCache.mu.Lock() + peCache.cache.Put(pathExpressionKey(pathExpr), kvcache.Value(pe)) + peCache.mu.Unlock() + } + }() + // Find the position of first '$'. If any no-blank characters in // pathExpr[0: dollarIndex), return an ErrInvalidJSONPath error. dollarIndex := strings.Index(pathExpr, "$") @@ -261,3 +295,7 @@ func (pe PathExpression) String() string { } return s.String() } + +func init() { + peCache.cache = kvcache.NewSimpleLRUCache(1000, 0.1, math.MaxUint64) +} diff --git a/util/admin/BUILD.bazel b/util/admin/BUILD.bazel index b46e0ea03bc7f..d6b829bb4ed2c 100644 --- a/util/admin/BUILD.bazel +++ b/util/admin/BUILD.bazel @@ -28,6 +28,7 @@ go_library( go_test( name = "admin_test", + timeout = "short", srcs = [ "admin_integration_test.go", "main_test.go", diff --git a/util/admin/admin.go b/util/admin/admin.go index dee2df5b209c2..27b889c127fa0 100644 --- a/util/admin/admin.go +++ b/util/admin/admin.go @@ -45,7 +45,8 @@ type RecordData struct { } func getCount(exec sqlexec.RestrictedSQLExecutor, snapshot uint64, sql string, args ...interface{}) (int64, error) { - rows, _, err := exec.ExecRestrictedSQL(context.Background(), []sqlexec.OptionFuncAlias{sqlexec.ExecOptionWithSnapshot(snapshot)}, sql, args...) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnAdmin) + rows, _, err := exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionWithSnapshot(snapshot)}, sql, args...) if err != nil { return 0, errors.Trace(err) } diff --git a/util/arena/BUILD.bazel b/util/arena/BUILD.bazel index 412221d3af4e3..3b7582f4ea356 100644 --- a/util/arena/BUILD.bazel +++ b/util/arena/BUILD.bazel @@ -9,6 +9,7 @@ go_library( go_test( name = "arena_test", + timeout = "short", srcs = [ "arena_test.go", "main_test.go", diff --git a/util/benchdaily/bench_daily.go b/util/benchdaily/bench_daily.go index 32c431159e18e..7722def61cced 100644 --- a/util/benchdaily/bench_daily.go +++ b/util/benchdaily/bench_daily.go @@ -85,6 +85,7 @@ func Run(tests ...func(b *testing.B)) { // readBenchResultFromFile is used by the daily bench test. // nolint: unused, deadcode func readBenchResultFromFile(file string) []BenchResult { + //nolint: gosec f, err := os.Open(file) if err != nil { log.Panic(err) diff --git a/util/collate/BUILD.bazel b/util/collate/BUILD.bazel index d104760eb78f5..3f1781975a1f6 100644 --- a/util/collate/BUILD.bazel +++ b/util/collate/BUILD.bazel @@ -25,6 +25,7 @@ go_library( "//util/logutil", "//util/stringutil", "@com_github_pingcap_errors//:errors", + "@org_golang_x_exp//slices", "@org_golang_x_text//encoding", "@org_uber_go_zap//:zap", ], @@ -32,6 +33,7 @@ go_library( go_test( name = "collate_test", + timeout = "short", srcs = [ "collate_bench_test.go", "collate_test.go", diff --git a/util/collate/collate.go b/util/collate/collate.go index 714a70e0c0a88..69d4c33fdeba1 100644 --- a/util/collate/collate.go +++ b/util/collate/collate.go @@ -15,7 +15,6 @@ package collate import ( - "sort" "sync/atomic" "github.com/pingcap/errors" @@ -25,6 +24,7 @@ import ( "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/logutil" "go.uber.org/zap" + "golang.org/x/exp/slices" ) var ( @@ -256,8 +256,8 @@ func GetSupportedCollations() []*charset.Collation { newSupportedCollations = append(newSupportedCollations, coll) } } - sort.Slice(newSupportedCollations, func(i int, j int) bool { - return newSupportedCollations[i].Name < newSupportedCollations[j].Name + slices.SortFunc(newSupportedCollations, func(i, j *charset.Collation) bool { + return i.Name < j.Name }) return newSupportedCollations } diff --git a/util/column-mapping/column.go b/util/column-mapping/column.go index e276b0004882c..93a1443dc6173 100644 --- a/util/column-mapping/column.go +++ b/util/column-mapping/column.go @@ -112,7 +112,7 @@ func (r *Rule) Valid() error { if r.Expression == PartitionID { switch len(r.Arguments) { case 3, 4: - break + return nil default: return errors.NotValidf("arguments %v for patition id", r.Arguments) } diff --git a/util/ddl-checker/BUILD.bazel b/util/ddl-checker/BUILD.bazel index 07ae93eca9980..f364776532653 100644 --- a/util/ddl-checker/BUILD.bazel +++ b/util/ddl-checker/BUILD.bazel @@ -23,6 +23,7 @@ go_library( go_test( name = "ddl-checker_test", + timeout = "short", srcs = ["executable_checker_test.go"], embed = [":ddl-checker"], deps = [ diff --git a/util/deadlockhistory/BUILD.bazel b/util/deadlockhistory/BUILD.bazel index 55b4c6199eeec..da5bff2870aed 100644 --- a/util/deadlockhistory/BUILD.bazel +++ b/util/deadlockhistory/BUILD.bazel @@ -17,6 +17,7 @@ go_library( go_test( name = "deadlockhistory_test", + timeout = "short", srcs = [ "deadlock_history_test.go", "main_test.go", diff --git a/util/disk/BUILD.bazel b/util/disk/BUILD.bazel index 7f28bc941a2b5..84ea6c67d676f 100644 --- a/util/disk/BUILD.bazel +++ b/util/disk/BUILD.bazel @@ -22,6 +22,7 @@ go_library( go_test( name = "disk_test", + timeout = "short", srcs = [ "main_test.go", "tempDir_test.go", diff --git a/util/execdetails/execdetails.go b/util/execdetails/execdetails.go index 5805b4e08bc70..170c5fe4516e0 100644 --- a/util/execdetails/execdetails.go +++ b/util/execdetails/execdetails.go @@ -18,7 +18,6 @@ import ( "bytes" "fmt" "math" - "sort" "strconv" "strings" "sync" @@ -153,7 +152,7 @@ func (d ExecDetails) String() string { parts = append(parts, BackoffTypesStr+": "+fmt.Sprintf("%v", commitDetails.Mu.BackoffTypes)) } commitDetails.Mu.Unlock() - resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLockTime) + resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLock.ResolveLockTime) if resolveLockTime > 0 { parts = append(parts, ResolveLockTimeStr+": "+strconv.FormatFloat(time.Duration(resolveLockTime).Seconds(), 'f', -1, 64)) } @@ -245,7 +244,7 @@ func (d ExecDetails) ToZapFields() (fields []zap.Field) { fields = append(fields, zap.String("backoff_types", fmt.Sprintf("%v", commitDetails.Mu.BackoffTypes))) } commitDetails.Mu.Unlock() - resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLockTime) + resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLock.ResolveLockTime) if resolveLockTime > 0 { fields = append(fields, zap.String("resolve_lock_time", fmt.Sprintf("%v", strconv.FormatFloat(time.Duration(resolveLockTime).Seconds(), 'f', -1, 64)+"s"))) } @@ -862,9 +861,9 @@ func (e *RuntimeStatsWithCommit) String() string { buf.WriteString("}") } e.Commit.Mu.Unlock() - if e.Commit.ResolveLockTime > 0 { + if e.Commit.ResolveLock.ResolveLockTime > 0 { buf.WriteString(", resolve_lock: ") - buf.WriteString(FormatDuration(time.Duration(e.Commit.ResolveLockTime))) + buf.WriteString(FormatDuration(time.Duration(e.Commit.ResolveLock.ResolveLockTime))) } prewriteRegionNum := atomic.LoadInt32(&e.Commit.PrewriteRegionNum) @@ -903,9 +902,9 @@ func (e *RuntimeStatsWithCommit) String() string { buf.WriteString(", keys:") buf.WriteString(strconv.FormatInt(int64(e.LockKeys.LockKeys), 10)) } - if e.LockKeys.ResolveLockTime > 0 { + if e.LockKeys.ResolveLock.ResolveLockTime > 0 { buf.WriteString(", resolve_lock:") - buf.WriteString(FormatDuration(time.Duration(e.LockKeys.ResolveLockTime))) + buf.WriteString(FormatDuration(time.Duration(e.LockKeys.ResolveLock.ResolveLockTime))) } if e.LockKeys.BackoffTime > 0 { buf.WriteString(", backoff: {time: ") @@ -949,7 +948,7 @@ func (e *RuntimeStatsWithCommit) formatBackoff(backoffTypes []string) string { tpMap[tpStr] = struct{}{} tpArray = append(tpArray, tpStr) } - sort.Strings(tpArray) + slices.Sort(tpArray) return fmt.Sprintf("%v", tpArray) } diff --git a/util/execdetails/execdetails_test.go b/util/execdetails/execdetails_test.go index 190d42322e4da..b13a09725c942 100644 --- a/util/execdetails/execdetails_test.go +++ b/util/execdetails/execdetails_test.go @@ -47,11 +47,13 @@ func TestString(t *testing.T) { "backoff2", }, }, - ResolveLockTime: 1000000000, // 10^9 ns = 1s WriteKeys: 1, WriteSize: 1, PrewriteRegionNum: 1, TxnRetry: 1, + ResolveLock: util.ResolveLockDetail{ + ResolveLockTime: 1000000000, // 10^9 ns = 1s + }, }, ScanDetail: &util.ScanDetail{ ProcessedKeys: 10, @@ -188,11 +190,13 @@ func TestRuntimeStatsWithCommit(t *testing.T) { CommitBackoffTime: int64(time.Second), BackoffTypes: []string{"backoff1", "backoff2", "backoff1"}, }, - ResolveLockTime: int64(time.Second), WriteKeys: 3, WriteSize: 66, PrewriteRegionNum: 5, TxnRetry: 2, + ResolveLock: util.ResolveLockDetail{ + ResolveLockTime: int64(time.Second), + }, } stats := &RuntimeStatsWithCommit{ Commit: commitDetail, @@ -201,11 +205,10 @@ func TestRuntimeStatsWithCommit(t *testing.T) { require.Equal(t, expect, stats.String()) lockDetail := &util.LockKeysDetails{ - TotalTime: time.Second, - RegionNum: 2, - LockKeys: 10, - ResolveLockTime: int64(time.Second * 2), - BackoffTime: int64(time.Second * 3), + TotalTime: time.Second, + RegionNum: 2, + LockKeys: 10, + BackoffTime: int64(time.Second * 3), Mu: struct { sync.Mutex BackoffTypes []string @@ -217,6 +220,9 @@ func TestRuntimeStatsWithCommit(t *testing.T) { LockRPCTime: int64(time.Second * 5), LockRPCCount: 50, RetryCount: 2, + ResolveLock: util.ResolveLockDetail{ + ResolveLockTime: int64(time.Second * 2), + }, } stats = &RuntimeStatsWithCommit{ LockKeys: lockDetail, diff --git a/util/expensivequery/BUILD.bazel b/util/expensivequery/BUILD.bazel index 993d7fdc3e75b..21576dc13d95b 100644 --- a/util/expensivequery/BUILD.bazel +++ b/util/expensivequery/BUILD.bazel @@ -17,6 +17,7 @@ go_library( "//util/logutil", "//util/memory", "@com_github_pingcap_log//:log", + "@org_golang_x_exp//slices", "@org_uber_go_zap//:zap", "@org_uber_go_zap//zapcore", ], diff --git a/util/expensivequery/memory_usage_alarm.go b/util/expensivequery/memory_usage_alarm.go index 391893e928bdd..fab62ea9bf1f5 100644 --- a/util/expensivequery/memory_usage_alarm.go +++ b/util/expensivequery/memory_usage_alarm.go @@ -20,7 +20,6 @@ import ( "path/filepath" "runtime" rpprof "runtime/pprof" - "sort" "strings" "time" @@ -31,6 +30,7 @@ import ( "github.com/pingcap/tidb/util/memory" "go.uber.org/zap" "go.uber.org/zap/zapcore" + "golang.org/x/exp/slices" ) type memoryUsageAlarm struct { @@ -183,8 +183,8 @@ func (record *memoryUsageAlarm) recordSQL(sm util.SessionManager) { logutil.BgLogger().Error("close oom record file fail", zap.Error(err)) } }() - printTop10 := func(cmp func(i, j int) bool) { - sort.Slice(pinfo, cmp) + printTop10 := func(cmp func(i, j *util.ProcessInfo) bool) { + slices.SortFunc(pinfo, cmp) list := pinfo if len(list) > 10 { list = list[:10] @@ -210,13 +210,13 @@ func (record *memoryUsageAlarm) recordSQL(sm util.SessionManager) { } _, err = f.WriteString("The 10 SQLs with the most memory usage for OOM analysis\n") - printTop10(func(i, j int) bool { - return pinfo[i].StmtCtx.MemTracker.MaxConsumed() > pinfo[j].StmtCtx.MemTracker.MaxConsumed() + printTop10(func(i, j *util.ProcessInfo) bool { + return i.StmtCtx.MemTracker.MaxConsumed() > j.StmtCtx.MemTracker.MaxConsumed() }) _, err = f.WriteString("The 10 SQLs with the most time usage for OOM analysis\n") - printTop10(func(i, j int) bool { - return pinfo[i].Time.Before(pinfo[j].Time) + printTop10(func(i, j *util.ProcessInfo) bool { + return i.Time.Before(j.Time) }) } diff --git a/util/gcutil/BUILD.bazel b/util/gcutil/BUILD.bazel index f56fff242ab1b..fc6c882078726 100644 --- a/util/gcutil/BUILD.bazel +++ b/util/gcutil/BUILD.bazel @@ -6,6 +6,7 @@ go_library( importpath = "github.com/pingcap/tidb/util/gcutil", visibility = ["//visibility:public"], deps = [ + "//kv", "//parser/model", "//sessionctx", "//sessionctx/variable", diff --git a/util/gcutil/gcutil.go b/util/gcutil/gcutil.go index 8c60534f9c265..0d3ae7da53ee2 100644 --- a/util/gcutil/gcutil.go +++ b/util/gcutil/gcutil.go @@ -18,6 +18,7 @@ import ( "context" "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" @@ -70,9 +71,10 @@ func ValidateSnapshotWithGCSafePoint(snapshotTS, safePointTS uint64) error { } // GetGCSafePoint loads GC safe point time from mysql.tidb. -func GetGCSafePoint(ctx sessionctx.Context) (uint64, error) { - exec := ctx.(sqlexec.RestrictedSQLExecutor) - rows, _, err := exec.ExecRestrictedSQL(context.Background(), nil, selectVariableValueSQL, "tikv_gc_safe_point") +func GetGCSafePoint(sctx sessionctx.Context) (uint64, error) { + exec := sctx.(sqlexec.RestrictedSQLExecutor) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnGC) + rows, _, err := exec.ExecRestrictedSQL(ctx, nil, selectVariableValueSQL, "tikv_gc_safe_point") if err != nil { return 0, errors.Trace(err) } diff --git a/util/generatedexpr/gen_expr_test.go b/util/generatedexpr/gen_expr_test.go index 484453466e7eb..6b1d39f2c2490 100644 --- a/util/generatedexpr/gen_expr_test.go +++ b/util/generatedexpr/gen_expr_test.go @@ -18,9 +18,8 @@ import ( "testing" "github.com/pingcap/tidb/parser/ast" - "github.com/stretchr/testify/require" - _ "github.com/pingcap/tidb/types/parser_driver" + "github.com/stretchr/testify/require" ) func TestParseExpression(t *testing.T) { diff --git a/util/logutil/hex_test.go b/util/logutil/hex_test.go index c9ce8713fe7ea..9351b0216cf52 100644 --- a/util/logutil/hex_test.go +++ b/util/logutil/hex_test.go @@ -21,10 +21,9 @@ import ( "testing" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/stretchr/testify/require" - "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/util/logutil" + "github.com/stretchr/testify/require" ) func TestHex(t *testing.T) { diff --git a/util/mathutil/math.go b/util/mathutil/math.go index 5461a676caa27..85ae503935a8b 100644 --- a/util/mathutil/math.go +++ b/util/mathutil/math.go @@ -68,7 +68,7 @@ func IsFinite(f float64) bool { } // Max returns the largest one from its arguments. -func Max[v constraints.Ordered](x v, xs ...v) v { +func Max[T constraints.Ordered](x T, xs ...T) T { max := x for _, n := range xs { if n > max { @@ -79,7 +79,7 @@ func Max[v constraints.Ordered](x v, xs ...v) v { } // Min returns the smallest one from its arguments. -func Min[v constraints.Ordered](x v, xs ...v) v { +func Min[T constraints.Ordered](x T, xs ...T) T { min := x for _, n := range xs { if n < min { @@ -90,7 +90,7 @@ func Min[v constraints.Ordered](x v, xs ...v) v { } // Clamp restrict a value to a certain interval. -func Clamp[v constraints.Integer | constraints.Float](n, min, max v) v { +func Clamp[T constraints.Ordered](n, min, max T) T { if n >= max { return max } else if n <= min { diff --git a/util/mathutil/math_test.go b/util/mathutil/math_test.go index 34a4366bc00a7..99ffab416fdd2 100644 --- a/util/mathutil/math_test.go +++ b/util/mathutil/math_test.go @@ -78,4 +78,7 @@ func TestClamp(t *testing.T) { require.Equal(t, float32(1.0), Clamp(float32(0), 1.0, 3.0)) require.Equal(t, 1, Clamp(0, 1, 1)) require.Equal(t, 1, Clamp(100, 1, 1)) + require.Equal(t, "ab", Clamp("aa", "ab", "xy")) + require.Equal(t, "xy", Clamp("yy", "ab", "xy")) + require.Equal(t, "ab", Clamp("ab", "ab", "ab")) } diff --git a/util/memory/BUILD.bazel b/util/memory/BUILD.bazel index 3f55456e383f9..d630240a6b4c6 100644 --- a/util/memory/BUILD.bazel +++ b/util/memory/BUILD.bazel @@ -10,13 +10,13 @@ go_library( importpath = "github.com/pingcap/tidb/util/memory", visibility = ["//visibility:public"], deps = [ - "//config", "//errno", "//metrics", "//parser/terror", "//util/dbterror", "//util/logutil", "@com_github_shirou_gopsutil_v3//mem", + "@org_golang_x_exp//slices", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", ], diff --git a/util/memory/tracker.go b/util/memory/tracker.go index 106ff210e83ed..75dba11cea11f 100644 --- a/util/memory/tracker.go +++ b/util/memory/tracker.go @@ -17,16 +17,18 @@ package memory import ( "bytes" "fmt" - "sort" "strconv" "sync" "sync/atomic" - "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/metrics" atomicutil "go.uber.org/atomic" + "golang.org/x/exp/slices" ) +// TrackMemWhenExceeds is the threshold when memory usage needs to be tracked. +const TrackMemWhenExceeds = 104857600 // 100MB + // Tracker is used to track the memory usage during query execution. // It contains an optional limit and can be arranged into a tree structure // such that the consumption tracked by a Tracker is also tracked by @@ -388,7 +390,7 @@ func (t *Tracker) Consume(bytes int64) { // BufferedConsume is used to buffer memory usage and do late consume func (t *Tracker) BufferedConsume(bufferedMemSize *int64, bytes int64) { *bufferedMemSize += bytes - if *bufferedMemSize > int64(config.TrackMemWhenExceeds) { + if *bufferedMemSize > int64(TrackMemWhenExceeds) { t.Consume(*bufferedMemSize) *bufferedMemSize = int64(0) } @@ -436,7 +438,7 @@ func (t *Tracker) toString(indent string, buffer *bytes.Buffer) { for label := range t.mu.children { labels = append(labels, label) } - sort.Ints(labels) + slices.Sort(labels) for _, label := range labels { children := t.mu.children[label] for _, child := range children { diff --git a/util/mock/context.go b/util/mock/context.go index 19a67cd3e104e..b8be5b9ddaaf9 100644 --- a/util/mock/context.go +++ b/util/mock/context.go @@ -56,12 +56,17 @@ type Context struct { sm util.SessionManager pcache *kvcache.SimpleLRUCache level kvrpcpb.DiskFullOpt + is sessionctx.InfoschemaMetaVersion } type wrapTxn struct { kv.Transaction } +func (txn *wrapTxn) Wait(_ context.Context, _ sessionctx.Context) (kv.Transaction, error) { + return txn, nil +} + func (txn *wrapTxn) Valid() bool { return txn.Transaction != nil && txn.Transaction.Valid() } @@ -173,12 +178,21 @@ func (c *Context) GetInfoSchema() sessionctx.InfoschemaMetaVersion { return is } } - return nil + if c.is == nil { + c.is = MockInfoschema(nil) + } + return c.is } +// MockInfoschema only serves for test. +var MockInfoschema func(tbList []*model.TableInfo) sessionctx.InfoschemaMetaVersion + // GetDomainInfoSchema returns the latest information schema in domain func (c *Context) GetDomainInfoSchema() sessionctx.InfoschemaMetaVersion { - return nil + if c.is == nil { + c.is = MockInfoschema(nil) + } + return c.is } // GetBuiltinFunctionUsage implements sessionctx.Context GetBuiltinFunctionUsage interface. @@ -240,11 +254,6 @@ func (c *Context) NewStaleTxnWithStartTS(ctx context.Context, startTS uint64) er return c.NewTxn(ctx) } -// GetSnapshotWithTS return a snapshot with ts -func (c *Context) GetSnapshotWithTS(ts uint64) kv.Snapshot { - return c.Store.GetSnapshot(kv.Version{Ver: ts}) -} - // RefreshTxnCtx implements the sessionctx.Context interface. func (c *Context) RefreshTxnCtx(ctx context.Context) error { return errors.Trace(c.NewTxn(ctx)) @@ -360,9 +369,9 @@ func (c *Context) PrepareTSFuture(ctx context.Context, future oracle.Future, sco return nil } -// GetPreparedTSFuture returns the prepared ts future -func (c *Context) GetPreparedTSFuture() oracle.Future { - return nil +// GetPreparedTxnFuture returns the prepared ts future +func (c *Context) GetPreparedTxnFuture() sessionctx.TxnFuture { + return &c.txn } // GetStmtStats implements the sessionctx.Context interface. diff --git a/util/mock/iter_test.go b/util/mock/iter_test.go index 9e142973185ea..e99e9d17745cb 100644 --- a/util/mock/iter_test.go +++ b/util/mock/iter_test.go @@ -17,7 +17,6 @@ import ( "testing" "github.com/pingcap/tidb/kv" - "github.com/stretchr/testify/assert" ) diff --git a/util/plancodec/id.go b/util/plancodec/id.go index 2b2e5e7e972a2..8b17660bfa589 100644 --- a/util/plancodec/id.go +++ b/util/plancodec/id.go @@ -394,6 +394,8 @@ func PhysicalIDToTypeString(id int) string { return TypeBatchPointGet case typeClusterMemTableReader: return TypeClusterMemTableReader + case typeDataSourceID: + return TypeDataSource case typeLoadDataID: return TypeLoadData case typeTableSampleID: diff --git a/util/plancodec/id_test.go b/util/plancodec/id_test.go index 8a7addd23fdba..fe275d81b2aeb 100644 --- a/util/plancodec/id_test.go +++ b/util/plancodec/id_test.go @@ -87,3 +87,9 @@ func TestPlanIDChanged(t *testing.T) { require.Equal(t, testcase.Expected, testcase.Value) } } + +func TestReverse(t *testing.T) { + for i := 1; i <= 55; i++ { + require.Equal(t, TypeStringToPhysicalID(PhysicalIDToTypeString(i)), i) + } +} diff --git a/util/profile/BUILD.bazel b/util/profile/BUILD.bazel index c541901b7cc37..e868e3200d0c6 100644 --- a/util/profile/BUILD.bazel +++ b/util/profile/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "//util/texttree", "@com_github_google_pprof//profile", "@com_github_pingcap_errors//:errors", + "@org_golang_x_exp//slices", ], ) diff --git a/util/profile/flamegraph.go b/util/profile/flamegraph.go index b814923479d8b..0de42eb0468f0 100644 --- a/util/profile/flamegraph.go +++ b/util/profile/flamegraph.go @@ -17,11 +17,11 @@ package profile import ( "fmt" "math" - "sort" "github.com/google/pprof/profile" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/texttree" + "golang.org/x/exp/slices" ) type flamegraphNode struct { @@ -93,12 +93,11 @@ func (n *flamegraphNode) sortedChildren() []flamegraphNodeWithLocation { locID: locID, }) } - sort.Slice(children, func(i, j int) bool { - a, b := children[i], children[j] - if a.cumValue != b.cumValue { - return a.cumValue > b.cumValue + slices.SortFunc(children, func(i, j flamegraphNodeWithLocation) bool { + if i.cumValue != j.cumValue { + return i.cumValue > j.cumValue } - return a.locID < b.locID + return i.locID < j.locID }) return children diff --git a/util/ranger/BUILD.bazel b/util/ranger/BUILD.bazel index 053085331755e..6f331de00afb7 100644 --- a/util/ranger/BUILD.bazel +++ b/util/ranger/BUILD.bazel @@ -30,6 +30,7 @@ go_library( "//util/collate", "//util/dbterror", "@com_github_pingcap_errors//:errors", + "@org_golang_x_exp//slices", ], ) diff --git a/util/ranger/ranger.go b/util/ranger/ranger.go index 551cd7fae176e..0aeae0ceca5ac 100644 --- a/util/ranger/ranger.go +++ b/util/ranger/ranger.go @@ -18,7 +18,6 @@ import ( "bytes" "math" "regexp" - "sort" "unicode/utf8" "github.com/pingcap/errors" @@ -35,6 +34,7 @@ import ( driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/collate" + "golang.org/x/exp/slices" ) func validInterval(sctx sessionctx.Context, low, high *point) (bool, error) { @@ -454,8 +454,8 @@ func UnionRanges(sctx sessionctx.Context, ranges []*Range, mergeConsecutive bool } objects = append(objects, &sortRange{originalValue: ran, encodedStart: left, encodedEnd: right}) } - sort.Slice(objects, func(i, j int) bool { - return bytes.Compare(objects[i].encodedStart, objects[j].encodedStart) < 0 + slices.SortFunc(objects, func(i, j *sortRange) bool { + return bytes.Compare(i.encodedStart, j.encodedStart) < 0 }) ranges = ranges[:0] lastRange := objects[0] diff --git a/util/rowDecoder/BUILD.bazel b/util/rowDecoder/BUILD.bazel index 9f6768ad49b44..7abf4578a8074 100644 --- a/util/rowDecoder/BUILD.bazel +++ b/util/rowDecoder/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "//types", "//util/chunk", "//util/rowcodec", + "@org_golang_x_exp//slices", ], ) diff --git a/util/rowDecoder/decoder.go b/util/rowDecoder/decoder.go index 0d32f26690a0a..587314c070c52 100644 --- a/util/rowDecoder/decoder.go +++ b/util/rowDecoder/decoder.go @@ -15,7 +15,6 @@ package decoder import ( - "sort" "time" "github.com/pingcap/tidb/expression" @@ -28,6 +27,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/rowcodec" + "golang.org/x/exp/slices" ) // Column contains the info and generated expr of column. @@ -181,7 +181,7 @@ func (rd *RowDecoder) EvalRemainedExprColumnMap(ctx sessionctx.Context, row map[ keys = append(keys, col.Col.Offset) ids[col.Col.Offset] = int(k) } - sort.Ints(keys) + slices.Sort(keys) for _, id := range keys { col := rd.colMap[int64(ids[id])] if col.GenExpr == nil { diff --git a/util/security.go b/util/security.go index b12c53914bcf1..0a958767a50de 100644 --- a/util/security.go +++ b/util/security.go @@ -84,6 +84,7 @@ func ToTLSConfigWithVerify(caPath, certPath, keyPath string, verifyCN []string) // Create a certificate pool from CA certPool := x509.NewCertPool() + //nolint: gosec ca, err := ioutil.ReadFile(caPath) if err != nil { return nil, errors.Annotate(err, "could not read ca certificate") diff --git a/util/sem/sem_test.go b/util/sem/sem_test.go index f7f8d20f6ee89..cb0c47ad9225d 100644 --- a/util/sem/sem_test.go +++ b/util/sem/sem_test.go @@ -19,7 +19,6 @@ import ( "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx/variable" - "github.com/stretchr/testify/assert" ) diff --git a/util/sqlexec/restricted_sql_executor.go b/util/sqlexec/restricted_sql_executor.go index 5a0d39361f1ab..2db32927705f9 100644 --- a/util/sqlexec/restricted_sql_executor.go +++ b/util/sqlexec/restricted_sql_executor.go @@ -173,6 +173,9 @@ type Statement interface { // RebuildPlan rebuilds the plan of the statement. RebuildPlan(ctx context.Context) (schemaVersion int64, err error) + + // GetStmtNode returns the stmtNode inside Statement + GetStmtNode() ast.StmtNode } // RecordSet is an abstract result set interface to help get data from Plan. diff --git a/util/stmtsummary/BUILD.bazel b/util/stmtsummary/BUILD.bazel index 0b313d255855f..e5c2d2950bf05 100644 --- a/util/stmtsummary/BUILD.bazel +++ b/util/stmtsummary/BUILD.bazel @@ -23,6 +23,7 @@ go_library( "//util/set", "@com_github_pingcap_failpoint//:failpoint", "@com_github_tikv_client_go_v2//util", + "@org_golang_x_exp//slices", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", ], diff --git a/util/stmtsummary/statement_summary.go b/util/stmtsummary/statement_summary.go index 9271f5c274ace..d0722544f1f22 100644 --- a/util/stmtsummary/statement_summary.go +++ b/util/stmtsummary/statement_summary.go @@ -19,7 +19,6 @@ import ( "container/list" "fmt" "math" - "sort" "strconv" "strings" "sync" @@ -34,6 +33,7 @@ import ( "github.com/pingcap/tidb/util/plancodec" "github.com/tikv/client-go/v2/util" atomic2 "go.uber.org/atomic" + "golang.org/x/exp/slices" ) // stmtSummaryByDigestKey defines key for stmtSummaryByDigestMap.summaryMap. @@ -753,7 +753,7 @@ func (ssElement *stmtSummaryByDigestElement) add(sei *StmtExecInfo, intervalSeco if commitDetails.GetCommitTsTime > ssElement.maxGetCommitTsTime { ssElement.maxGetCommitTsTime = commitDetails.GetCommitTsTime } - resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLockTime) + resolveLockTime := atomic.LoadInt64(&commitDetails.ResolveLock.ResolveLockTime) ssElement.sumResolveLockTime += resolveLockTime if resolveLockTime > ssElement.maxResolveLockTime { ssElement.maxResolveLockTime = resolveLockTime @@ -870,8 +870,8 @@ func formatBackoffTypes(backoffMap map[string]int) interface{} { for backoffType, count := range backoffMap { backoffArray = append(backoffArray, backoffStat{backoffType, count}) } - sort.Slice(backoffArray, func(i, j int) bool { - return backoffArray[i].count > backoffArray[j].count + slices.SortFunc(backoffArray, func(i, j backoffStat) bool { + return i.count > j.count }) var buffer bytes.Buffer diff --git a/util/stmtsummary/statement_summary_test.go b/util/stmtsummary/statement_summary_test.go index a84eb8799af07..041de2989241f 100644 --- a/util/stmtsummary/statement_summary_test.go +++ b/util/stmtsummary/statement_summary_test.go @@ -114,8 +114,8 @@ func TestAddStatement(t *testing.T) { maxLocalLatchTime: stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime, sumCommitBackoffTime: stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, maxCommitBackoffTime: stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, - sumResolveLockTime: stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, - maxResolveLockTime: stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, + sumResolveLockTime: stmtExecInfo1.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime, + maxResolveLockTime: stmtExecInfo1.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime, sumWriteKeys: int64(stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys), maxWriteKeys: stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, sumWriteSize: int64(stmtExecInfo1.ExecDetail.CommitDetail.WriteSize), @@ -191,11 +191,13 @@ func TestAddStatement(t *testing.T) { CommitBackoffTime: 1000, BackoffTypes: []string{boTxnLockName}, }, - ResolveLockTime: 10000, WriteKeys: 100000, WriteSize: 1000000, PrewriteRegionNum: 100, TxnRetry: 10, + ResolveLock: util.ResolveLockDetail{ + ResolveLockTime: 10000, + }, }, ScanDetail: &util.ScanDetail{ TotalKeys: 6000, @@ -256,8 +258,8 @@ func TestAddStatement(t *testing.T) { expectedSummaryElement.sumCommitBackoffTime += stmtExecInfo2.ExecDetail.CommitDetail.Mu.CommitBackoffTime expectedSummaryElement.maxCommitBackoffTime = stmtExecInfo2.ExecDetail.CommitDetail.Mu.CommitBackoffTime stmtExecInfo2.ExecDetail.CommitDetail.Mu.Unlock() - expectedSummaryElement.sumResolveLockTime += stmtExecInfo2.ExecDetail.CommitDetail.ResolveLockTime - expectedSummaryElement.maxResolveLockTime = stmtExecInfo2.ExecDetail.CommitDetail.ResolveLockTime + expectedSummaryElement.sumResolveLockTime += stmtExecInfo2.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime + expectedSummaryElement.maxResolveLockTime = stmtExecInfo2.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime expectedSummaryElement.sumWriteKeys += int64(stmtExecInfo2.ExecDetail.CommitDetail.WriteKeys) expectedSummaryElement.maxWriteKeys = stmtExecInfo2.ExecDetail.CommitDetail.WriteKeys expectedSummaryElement.sumWriteSize += int64(stmtExecInfo2.ExecDetail.CommitDetail.WriteSize) @@ -321,11 +323,13 @@ func TestAddStatement(t *testing.T) { CommitBackoffTime: 100, BackoffTypes: []string{boTxnLockName}, }, - ResolveLockTime: 1000, WriteKeys: 10000, WriteSize: 100000, PrewriteRegionNum: 10, TxnRetry: 1, + ResolveLock: util.ResolveLockDetail{ + ResolveLockTime: 1000, + }, }, ScanDetail: &util.ScanDetail{ TotalKeys: 600, @@ -370,7 +374,7 @@ func TestAddStatement(t *testing.T) { stmtExecInfo3.ExecDetail.CommitDetail.Mu.Lock() expectedSummaryElement.sumCommitBackoffTime += stmtExecInfo3.ExecDetail.CommitDetail.Mu.CommitBackoffTime stmtExecInfo3.ExecDetail.CommitDetail.Mu.Unlock() - expectedSummaryElement.sumResolveLockTime += stmtExecInfo3.ExecDetail.CommitDetail.ResolveLockTime + expectedSummaryElement.sumResolveLockTime += stmtExecInfo3.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime expectedSummaryElement.sumWriteKeys += int64(stmtExecInfo3.ExecDetail.CommitDetail.WriteKeys) expectedSummaryElement.sumWriteSize += int64(stmtExecInfo3.ExecDetail.CommitDetail.WriteSize) expectedSummaryElement.sumPrewriteRegionNum += int64(stmtExecInfo3.ExecDetail.CommitDetail.PrewriteRegionNum) @@ -605,11 +609,13 @@ func generateAnyExecInfo() *StmtExecInfo { CommitBackoffTime: 200, BackoffTypes: []string{boTxnLockName}, }, - ResolveLockTime: 2000, WriteKeys: 20000, WriteSize: 200000, PrewriteRegionNum: 20, TxnRetry: 2, + ResolveLock: util.ResolveLockDetail{ + ResolveLockTime: 2000, + }, }, ScanDetail: &util.ScanDetail{ TotalKeys: 1000, @@ -781,7 +787,7 @@ func TestToDatum(t *testing.T) { int64(stmtExecInfo1.ExecDetail.CommitDetail.CommitTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.CommitTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime), stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, - stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, + stmtExecInfo1.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime, stmtExecInfo1.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime, int64(stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime), stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, @@ -829,7 +835,7 @@ func TestToDatum(t *testing.T) { int64(stmtExecInfo1.ExecDetail.CommitDetail.CommitTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.CommitTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime), stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, - stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, + stmtExecInfo1.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime, stmtExecInfo1.ExecDetail.CommitDetail.ResolveLock.ResolveLockTime, int64(stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime), stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, diff --git a/util/stringutil/BUILD.bazel b/util/stringutil/BUILD.bazel index bb80478b02804..3d8e0f3433e12 100644 --- a/util/stringutil/BUILD.bazel +++ b/util/stringutil/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "//parser/mysql", "//util/hack", "@com_github_pingcap_errors//:errors", + "@org_golang_x_exp//slices", ], ) diff --git a/util/stringutil/string_util.go b/util/stringutil/string_util.go index 1ab9a45cc8ad0..9dbdb33fed27d 100644 --- a/util/stringutil/string_util.go +++ b/util/stringutil/string_util.go @@ -17,13 +17,13 @@ package stringutil import ( "bytes" "fmt" - "sort" "strings" "unicode/utf8" "github.com/pingcap/errors" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/util/hack" + "golang.org/x/exp/slices" ) // ErrSyntax indicates that a value does not have the right syntax for the target type. @@ -346,7 +346,7 @@ func BuildStringFromLabels(labels map[string]string) string { for k := range labels { s = append(s, k) } - sort.Strings(s) + slices.Sort(s) r := new(bytes.Buffer) // visit labels by sorted key in order to make sure that result should be consistency for _, key := range s { diff --git a/util/table-filter/parser.go b/util/table-filter/parser.go index 73cf29c5ac277..122984f95f86d 100644 --- a/util/table-filter/parser.go +++ b/util/table-filter/parser.go @@ -298,6 +298,7 @@ parseLoop: } func (p *matcherParser) importFile(fileName string, parseMatcher func(string, bool) error) error { + //nolint: gosec file, err := os.Open(fileName) if err != nil { return p.annotatef(err, "cannot open filter file") diff --git a/util/topsql/collector/mock/mock.go b/util/topsql/collector/mock/mock.go index 50d44bdca0b86..0320ca2c60aa8 100644 --- a/util/topsql/collector/mock/mock.go +++ b/util/topsql/collector/mock/mock.go @@ -161,7 +161,11 @@ func (c *TopSQLCollector) RegisterSQL(sqlDigest []byte, normalizedSQL string, is } // RegisterPlan uses for testing. -func (c *TopSQLCollector) RegisterPlan(planDigest []byte, normalizedPlan string) { +func (c *TopSQLCollector) RegisterPlan(planDigest []byte, normalizedPlan string, isLarge bool) { + if isLarge { + return + } + digestStr := string(hack.String(planDigest)) c.Lock() _, ok := c.planMap[digestStr] diff --git a/util/topsql/reporter/BUILD.bazel b/util/topsql/reporter/BUILD.bazel index 26cedc90a0bcf..04f402018c9a2 100644 --- a/util/topsql/reporter/BUILD.bazel +++ b/util/topsql/reporter/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "//config", "//metrics", "//util", + "//util/hack", "//util/logutil", "//util/topsql/collector", "//util/topsql/state", diff --git a/util/topsql/reporter/datamodel.go b/util/topsql/reporter/datamodel.go index 7de6f4e4392e3..747d9ceeae77f 100644 --- a/util/topsql/reporter/datamodel.go +++ b/util/topsql/reporter/datamodel.go @@ -20,6 +20,7 @@ import ( "sync" "sync/atomic" + "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/topsql/collector" topsqlstate "github.com/pingcap/tidb/util/topsql/state" @@ -594,6 +595,13 @@ type sqlMeta struct { isInternal bool } +// planMeta contains a binaryNormalizedPlan and a bool field isLarge to indicate +// whether that binaryNormalizedPlan is too large to decode quickly +type planMeta struct { + binaryNormalizedPlan string + isLarge bool +} + // normalizedSQLMap is a wrapped map used to register normalizedSQL. type normalizedSQLMap struct { data atomic.Value // *sync.Map @@ -654,6 +662,10 @@ func (m *normalizedSQLMap) toProto() []tipb.SQLMeta { // normalizedPlanMap to protobuf representation. type planBinaryDecodeFunc func(string) (string, error) +// planBinaryCompressFunc is used to compress large normalized plan +// into encoded format +type planBinaryCompressFunc func([]byte) string + // normalizedSQLMap is a wrapped map used to register normalizedPlan. type normalizedPlanMap struct { data atomic.Value // *sync.Map @@ -668,13 +680,16 @@ func newNormalizedPlanMap() *normalizedPlanMap { // register saves the relationship between planDigest and normalizedPlan. // If the internal map size exceeds the limit, the relationship will be discarded. -func (m *normalizedPlanMap) register(planDigest []byte, normalizedPlan string) { +func (m *normalizedPlanMap) register(planDigest []byte, normalizedPlan string, isLarge bool) { if m.length.Load() >= topsqlstate.GlobalState.MaxCollect.Load() { ignoreExceedPlanCounter.Inc() return } data := m.data.Load().(*sync.Map) - _, loaded := data.LoadOrStore(string(planDigest), normalizedPlan) + _, loaded := data.LoadOrStore(string(planDigest), planMeta{ + binaryNormalizedPlan: normalizedPlan, + isLarge: isLarge, + }) if !loaded { m.length.Add(1) } @@ -693,18 +708,26 @@ func (m *normalizedPlanMap) take() *normalizedPlanMap { } // toProto converts the normalizedPlanMap to the corresponding protobuf representation. -func (m *normalizedPlanMap) toProto(decodePlan planBinaryDecodeFunc) []tipb.PlanMeta { +func (m *normalizedPlanMap) toProto(decodePlan planBinaryDecodeFunc, compressPlan planBinaryCompressFunc) []tipb.PlanMeta { metas := make([]tipb.PlanMeta, 0, m.length.Load()) m.data.Load().(*sync.Map).Range(func(k, v interface{}) bool { - planDecoded, errDecode := decodePlan(v.(string)) - if errDecode != nil { - logutil.BgLogger().Warn("[top-sql] decode plan failed", zap.Error(errDecode)) + originalMeta := v.(planMeta) + protoMeta := tipb.PlanMeta{ + PlanDigest: hack.Slice(k.(string)), + } + + var err error + if originalMeta.isLarge { + protoMeta.EncodedNormalizedPlan = compressPlan(hack.Slice(originalMeta.binaryNormalizedPlan)) + } else { + protoMeta.NormalizedPlan, err = decodePlan(originalMeta.binaryNormalizedPlan) + } + if err != nil { + logutil.BgLogger().Warn("[top-sql] decode plan failed", zap.Error(err)) return true } - metas = append(metas, tipb.PlanMeta{ - PlanDigest: []byte(k.(string)), - NormalizedPlan: planDecoded, - }) + + metas = append(metas, protoMeta) return true }) return metas diff --git a/util/topsql/reporter/datamodel_test.go b/util/topsql/reporter/datamodel_test.go index b898fa152260e..82e09bbb07a23 100644 --- a/util/topsql/reporter/datamodel_test.go +++ b/util/topsql/reporter/datamodel_test.go @@ -400,16 +400,22 @@ func Test_normalizedSQLMap_toProto(t *testing.T) { func Test_normalizedPlanMap_register(t *testing.T) { topsqlstate.GlobalState.MaxCollect.Store(2) m := newNormalizedPlanMap() - m.register([]byte("PLAN-1"), "PLAN-1") - m.register([]byte("PLAN-2"), "PLAN-2") - m.register([]byte("PLAN-3"), "PLAN-3") + m.register([]byte("PLAN-1"), "PLAN-1", false) + m.register([]byte("PLAN-2"), "PLAN-2", true) + m.register([]byte("PLAN-3"), "PLAN-3", false) require.Equal(t, int64(2), m.length.Load()) v, ok := m.data.Load().(*sync.Map).Load("PLAN-1") require.True(t, ok) - require.Equal(t, "PLAN-1", v.(string)) + require.Equal(t, planMeta{ + binaryNormalizedPlan: "PLAN-1", + isLarge: false, + }, v.(planMeta)) v, ok = m.data.Load().(*sync.Map).Load("PLAN-2") require.True(t, ok) - require.Equal(t, "PLAN-2", v.(string)) + require.Equal(t, planMeta{ + binaryNormalizedPlan: "PLAN-2", + isLarge: true, + }, v.(planMeta)) _, ok = m.data.Load().(*sync.Map).Load("PLAN-3") require.False(t, ok) } @@ -417,9 +423,9 @@ func Test_normalizedPlanMap_register(t *testing.T) { func Test_normalizedPlanMap_take(t *testing.T) { topsqlstate.GlobalState.MaxCollect.Store(999) m1 := newNormalizedPlanMap() - m1.register([]byte("PLAN-1"), "PLAN-1") - m1.register([]byte("PLAN-2"), "PLAN-2") - m1.register([]byte("PLAN-3"), "PLAN-3") + m1.register([]byte("PLAN-1"), "PLAN-1", false) + m1.register([]byte("PLAN-2"), "PLAN-2", false) + m1.register([]byte("PLAN-3"), "PLAN-3", false) m2 := m1.take() require.Equal(t, int64(0), m1.length.Load()) require.Equal(t, int64(3), m2.length.Load()) @@ -442,26 +448,28 @@ func Test_normalizedPlanMap_take(t *testing.T) { func Test_normalizedPlanMap_toProto(t *testing.T) { topsqlstate.GlobalState.MaxCollect.Store(999) m := newNormalizedPlanMap() - m.register([]byte("PLAN-1"), "PLAN-1") - m.register([]byte("PLAN-2"), "PLAN-2") - m.register([]byte("PLAN-3"), "PLAN-3") - pb := m.toProto(func(s string) (string, error) { return s, nil }) + m.register([]byte("PLAN-1"), "PLAN-1", false) + m.register([]byte("PLAN-2"), "PLAN-2", true) + m.register([]byte("PLAN-3"), "PLAN-3", false) + pb := m.toProto( + func(s string) (string, error) { return "[decoded] " + s, nil }, + func(s []byte) string { return "[encoded] " + string(s) }) require.Len(t, pb, 3) hash := map[string]tipb.PlanMeta{} for _, meta := range pb { - hash[meta.NormalizedPlan] = meta + hash[string(meta.PlanDigest)] = meta } require.Equal(t, tipb.PlanMeta{ PlanDigest: []byte("PLAN-1"), - NormalizedPlan: "PLAN-1", + NormalizedPlan: "[decoded] PLAN-1", }, hash["PLAN-1"]) require.Equal(t, tipb.PlanMeta{ - PlanDigest: []byte("PLAN-2"), - NormalizedPlan: "PLAN-2", + PlanDigest: []byte("PLAN-2"), + EncodedNormalizedPlan: "[encoded] PLAN-2", }, hash["PLAN-2"]) require.Equal(t, tipb.PlanMeta{ PlanDigest: []byte("PLAN-3"), - NormalizedPlan: "PLAN-3", + NormalizedPlan: "[decoded] PLAN-3", }, hash["PLAN-3"]) } diff --git a/util/topsql/reporter/reporter.go b/util/topsql/reporter/reporter.go index 501d9bd64b0b2..d4b7cf571b0d1 100644 --- a/util/topsql/reporter/reporter.go +++ b/util/topsql/reporter/reporter.go @@ -51,7 +51,8 @@ type TopSQLReporter interface { RegisterSQL(sqlDigest []byte, normalizedSQL string, isInternal bool) // RegisterPlan like RegisterSQL, but for normalized plan strings. - RegisterPlan(planDigest []byte, normalizedPlan string) + // isLarge indicates the size of normalizedPlan is big. + RegisterPlan(planDigest []byte, normalizedPlan string, isLarge bool) // Close uses to close and release the reporter resource. Close() @@ -80,12 +81,15 @@ type RemoteTopSQLReporter struct { // calling decodePlan this can take a while, so should not block critical paths. decodePlan planBinaryDecodeFunc + + // Instead of dropping large plans, we compress it into encoded format and report + compressPlan planBinaryCompressFunc } // NewRemoteTopSQLReporter creates a new RemoteTopSQLReporter. // // decodePlan is a decoding function which will be called asynchronously to decode the plan binary to string. -func NewRemoteTopSQLReporter(decodePlan planBinaryDecodeFunc) *RemoteTopSQLReporter { +func NewRemoteTopSQLReporter(decodePlan planBinaryDecodeFunc, compressPlan planBinaryCompressFunc) *RemoteTopSQLReporter { ctx, cancel := context.WithCancel(context.Background()) tsr := &RemoteTopSQLReporter{ DefaultDataSinkRegisterer: NewDefaultDataSinkRegisterer(ctx), @@ -99,6 +103,7 @@ func NewRemoteTopSQLReporter(decodePlan planBinaryDecodeFunc) *RemoteTopSQLRepor normalizedPlanMap: newNormalizedPlanMap(), stmtStatsBuffer: map[uint64]stmtstats.StatementStatsMap{}, decodePlan: decodePlan, + compressPlan: compressPlan, } tsr.sqlCPUCollector = collector.NewSQLCPUCollector(tsr) return tsr @@ -153,8 +158,8 @@ func (tsr *RemoteTopSQLReporter) RegisterSQL(sqlDigest []byte, normalizedSQL str // RegisterPlan implements TopSQLReporter. // // This function is thread-safe and efficient. -func (tsr *RemoteTopSQLReporter) RegisterPlan(planDigest []byte, normalizedPlan string) { - tsr.normalizedPlanMap.register(planDigest, normalizedPlan) +func (tsr *RemoteTopSQLReporter) RegisterPlan(planDigest []byte, normalizedPlan string, isLarge bool) { + tsr.normalizedPlanMap.register(planDigest, normalizedPlan, isLarge) } // Close implements TopSQLReporter. @@ -270,7 +275,7 @@ func (tsr *RemoteTopSQLReporter) reportWorker() { tsr.doReport(&ReportData{ DataRecords: rs.toProto(), SQLMetas: data.normalizedSQLMap.toProto(), - PlanMetas: data.normalizedPlanMap.toProto(tsr.decodePlan), + PlanMetas: data.normalizedPlanMap.toProto(tsr.decodePlan, tsr.compressPlan), }) case <-tsr.ctx.Done(): return diff --git a/util/topsql/reporter/reporter_test.go b/util/topsql/reporter/reporter_test.go index 7dce8079eeaf3..c57e4ba3ff906 100644 --- a/util/topsql/reporter/reporter_test.go +++ b/util/topsql/reporter/reporter_test.go @@ -44,7 +44,7 @@ func populateCache(tsr *RemoteTopSQLReporter, begin, end int, timestamp uint64) for i := begin; i < end; i++ { key := []byte("planDigest" + strconv.Itoa(i+1)) value := "planNormalized" + strconv.Itoa(i+1) - tsr.RegisterPlan(key, value) + tsr.RegisterPlan(key, value, false) } // collect var records []collector.SQLCPUTimeRecord @@ -63,7 +63,7 @@ func reportCache(tsr *RemoteTopSQLReporter) { tsr.doReport(&ReportData{ DataRecords: tsr.collecting.take().getReportRecords().toProto(), SQLMetas: tsr.normalizedSQLMap.take().toProto(), - PlanMetas: tsr.normalizedPlanMap.take().toProto(tsr.decodePlan), + PlanMetas: tsr.normalizedPlanMap.take().toProto(tsr.decodePlan, tsr.compressPlan), }) } @@ -71,6 +71,10 @@ func mockPlanBinaryDecoderFunc(plan string) (string, error) { return plan, nil } +func mockPlanBinaryCompressFunc(plan []byte) string { + return string(plan) +} + type mockDataSink struct { ch chan *ReportData } @@ -94,7 +98,7 @@ func setupRemoteTopSQLReporter(maxStatementsNum, interval int) (*RemoteTopSQLRep topsqlstate.GlobalState.MaxCollect.Store(10000) topsqlstate.GlobalState.ReportIntervalSeconds.Store(int64(interval)) topsqlstate.EnableTopSQL() - ts := NewRemoteTopSQLReporter(mockPlanBinaryDecoderFunc) + ts := NewRemoteTopSQLReporter(mockPlanBinaryDecoderFunc, mockPlanBinaryCompressFunc) ds := newMockDataSink2() if err := ts.Register(ds); err != nil { panic(err) @@ -194,7 +198,7 @@ func newSQLCPUTimeRecord(tsr *RemoteTopSQLReporter, sqlID int, cpuTimeMs uint32) key = []byte("planDigest" + strconv.Itoa(sqlID)) value = "planNormalized" + strconv.Itoa(sqlID) - tsr.RegisterPlan(key, value) + tsr.RegisterPlan(key, value, false) return collector.SQLCPUTimeRecord{ SQLDigest: []byte("sqlDigest" + strconv.Itoa(sqlID)), @@ -317,7 +321,7 @@ func TestCollectCapacity(t *testing.T) { for i := 0; i < n; i++ { key := []byte("planDigest" + strconv.Itoa(i)) value := "planNormalized" + strconv.Itoa(i) - tsr.RegisterPlan(key, value) + tsr.RegisterPlan(key, value, false) } } genRecord := func(n int) []collector.SQLCPUTimeRecord { @@ -391,7 +395,7 @@ func TestMultipleDataSinks(t *testing.T) { topsqlstate.GlobalState.ReportIntervalSeconds.Store(1) topsqlstate.EnableTopSQL() - tsr := NewRemoteTopSQLReporter(mockPlanBinaryDecoderFunc) + tsr := NewRemoteTopSQLReporter(mockPlanBinaryDecoderFunc, mockPlanBinaryCompressFunc) var chs []chan *ReportData for i := 0; i < 7; i++ { @@ -477,7 +481,7 @@ func TestMultipleDataSinks(t *testing.T) { func TestReporterWorker(t *testing.T) { topsqlstate.GlobalState.ReportIntervalSeconds.Store(3) - r := NewRemoteTopSQLReporter(mockPlanBinaryDecoderFunc) + r := NewRemoteTopSQLReporter(mockPlanBinaryDecoderFunc, mockPlanBinaryCompressFunc) r.Start() defer r.Close() diff --git a/util/topsql/topsql.go b/util/topsql/topsql.go index f416de28ed7f6..61ffca2b0d460 100644 --- a/util/topsql/topsql.go +++ b/util/topsql/topsql.go @@ -45,7 +45,7 @@ var ( ) func init() { - remoteReporter := reporter.NewRemoteTopSQLReporter(plancodec.DecodeNormalizedPlan) + remoteReporter := reporter.NewRemoteTopSQLReporter(plancodec.DecodeNormalizedPlan, plancodec.Compress) globalTopSQLReport = remoteReporter singleTargetDataSink = reporter.NewSingleTargetDataSink(remoteReporter) } @@ -182,10 +182,5 @@ func linkSQLTextWithDigest(sqlDigest []byte, normalizedSQL string, isInternal bo } func linkPlanTextWithDigest(planDigest []byte, normalizedBinaryPlan string) { - if len(normalizedBinaryPlan) > MaxBinaryPlanSize { - // ignore the huge size plan - return - } - - globalTopSQLReport.RegisterPlan(planDigest, normalizedBinaryPlan) + globalTopSQLReport.RegisterPlan(planDigest, normalizedBinaryPlan, len(normalizedBinaryPlan) > MaxBinaryPlanSize) } diff --git a/util/topsql/topsql_test.go b/util/topsql/topsql_test.go index 1d9c1ccdda5f9..04855ac163011 100644 --- a/util/topsql/topsql_test.go +++ b/util/topsql/topsql_test.go @@ -86,6 +86,10 @@ func mockPlanBinaryDecoderFunc(plan string) (string, error) { return plan, nil } +func mockPlanBinaryCompressFunc(plan []byte) string { + return string(plan) +} + func TestTopSQLReporter(t *testing.T) { err := cpuprofile.StartCPUProfiler() require.NoError(t, err) @@ -100,7 +104,7 @@ func TestTopSQLReporter(t *testing.T) { }) topsqlstate.EnableTopSQL() - report := reporter.NewRemoteTopSQLReporter(mockPlanBinaryDecoderFunc) + report := reporter.NewRemoteTopSQLReporter(mockPlanBinaryDecoderFunc, mockPlanBinaryCompressFunc) report.Start() ds := reporter.NewSingleTargetDataSink(report) ds.Start() @@ -222,7 +226,7 @@ func TestTopSQLPubSub(t *testing.T) { topsqlstate.GlobalState.ReportIntervalSeconds.Store(1) topsqlstate.EnableTopSQL() - report := reporter.NewRemoteTopSQLReporter(mockPlanBinaryDecoderFunc) + report := reporter.NewRemoteTopSQLReporter(mockPlanBinaryDecoderFunc, mockPlanBinaryCompressFunc) report.Start() defer report.Close() topsql.SetupTopSQLForTest(report) @@ -341,7 +345,7 @@ func TestTopSQLPubSub(t *testing.T) { func TestPubSubWhenReporterIsStopped(t *testing.T) { topsqlstate.EnableTopSQL() - report := reporter.NewRemoteTopSQLReporter(mockPlanBinaryDecoderFunc) + report := reporter.NewRemoteTopSQLReporter(mockPlanBinaryDecoderFunc, mockPlanBinaryCompressFunc) report.Start() server, err := mockServer.NewMockPubSubServer()