Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sessionctx/binloginfo: fix uncomment pre_split_regions ddl-querys in binlog #11762

Merged
merged 14 commits into from
Aug 19, 2019
50 changes: 42 additions & 8 deletions sessionctx/binloginfo/binloginfo.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
package binloginfo

import (
"math"
"regexp"
"strings"
"sync"
Expand All @@ -28,7 +29,7 @@ import (
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/util/logutil"
binlog "github.com/pingcap/tipb/go-binlog"
"github.com/pingcap/tipb/go-binlog"
"go.uber.org/zap"
"google.golang.org/grpc"
)
Expand All @@ -41,7 +42,9 @@ func init() {
// shared by all sessions.
var pumpsClient *pumpcli.PumpsClient
var pumpsClientLock sync.RWMutex
var shardPat = regexp.MustCompile(`SHARD_ROW_ID_BITS\s*=\s*\d+`)
var shardPat = regexp.MustCompile(`SHARD_ROW_ID_BITS\s*=\s*\d+\s*`)
var preSplitPat = regexp.MustCompile(`PRE_SPLIT_REGIONS\s*=\s*\d+\s*`)
var redundantCommentPat = regexp.MustCompile(` \*\/\s*\/\*!90000`)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Now this variable is redudant

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done. thanks


// BinlogInfo contains binlog data and binlog client.
type BinlogInfo struct {
Expand Down Expand Up @@ -132,11 +135,12 @@ func (info *BinlogInfo) WriteBinlog(clusterID uint64) error {

// SetDDLBinlog sets DDL binlog in the kv.Transaction.
func SetDDLBinlog(client *pumpcli.PumpsClient, txn kv.Transaction, jobID int64, ddlQuery string) {
ddlQuery = AddSpecialComment(ddlQuery)
if client == nil {
return
}

ddlQuery = addSpecialComment(ddlQuery)
ddlQuery = AddSpecialComment(ddlQuery)
info := &BinlogInfo{
Data: &binlog.Binlog{
Tp: binlog.BinlogType_Prewrite,
Expand All @@ -150,15 +154,45 @@ func SetDDLBinlog(client *pumpcli.PumpsClient, txn kv.Transaction, jobID int64,

const specialPrefix = `/*!90000 `

func addSpecialComment(ddlQuery string) string {
// AddSpecialComment uses to add comment for table option in DDL query.
// Export for testing.
func AddSpecialComment(ddlQuery string) string {
if strings.Contains(ddlQuery, specialPrefix) {
return ddlQuery
}
loc := shardPat.FindStringIndex(strings.ToUpper(ddlQuery))
if loc == nil {
return ddlQuery
return addSpecialCommentByRegexps(ddlQuery, shardPat, preSplitPat)
}

func addSpecialCommentByRegexps(ddlQuery string, regs ...*regexp.Regexp) string {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please add some comments for this function.

upperQuery := strings.ToUpper(ddlQuery)
var specialComments []string
minIdx := math.MaxInt64
for _, reg := range regs {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For SQL : create table t6 (id int ) shard_row_id_bits=2 shard_row_id_bits=2 pre_split_regions=2;
If it comes with the same Regexps in SQL, will the comments duplicate here?
Should do the check and test.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch. Done.

loc := reg.FindStringIndex(upperQuery)
if len(loc) < 2 {
continue
}
specialComments = append(specialComments, ddlQuery[loc[0]:loc[1]])
if loc[0] < minIdx {
minIdx = loc[0]
}
ddlQuery = ddlQuery[:loc[0]] + ddlQuery[loc[1]:]
upperQuery = upperQuery[:loc[0]] + upperQuery[loc[1]:]
}
if minIdx != math.MaxInt64 {
query := ddlQuery[:minIdx] + specialPrefix
for _, comment := range specialComments {
if query[len(query)-1] != ' ' {
query += " "
}
query += comment
}
if query[len(query)-1] != ' ' {
query += " "
}
return query + "*/ " + ddlQuery[minIdx:]
}
return ddlQuery[:loc[0]] + specialPrefix + ddlQuery[loc[0]:loc[1]] + ` */` + ddlQuery[loc[1]:]
return ddlQuery
}

// MockPumpsClient creates a PumpsClient, used for test.
Expand Down
33 changes: 33 additions & 0 deletions sessionctx/binloginfo/binloginfo_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -431,3 +431,36 @@ func (s *testBinlogSuite) TestDeleteSchema(c *C) {
tk.MustExec("delete from b1 where job_id in (select job_id from b2 where batch_class = 'TEST') or split_job_id in (select job_id from b2 where batch_class = 'TEST');")
tk.MustExec("delete b1 from b2 right join b1 on b1.job_id = b2.job_id and batch_class = 'TEST';")
}

func (s *testBinlogSuite) TestAddSpecialComment(c *C) {
testCase := []struct {
input string
result string
}{
{
"create table t1 (id int ) shard_row_id_bits=2;",
"create table t1 (id int ) /*!90000 shard_row_id_bits=2 */ ;",
},
{
"create table t1 (id int ) shard_row_id_bits=2 pre_split_regions=2;",
"create table t1 (id int ) /*!90000 shard_row_id_bits=2 pre_split_regions=2 */ ;",
},
{
"create table t1 (id int ) shard_row_id_bits=2 pre_split_regions=2;",
"create table t1 (id int ) /*!90000 shard_row_id_bits=2 pre_split_regions=2 */ ;",
},

{
"create table t1 (id int ) shard_row_id_bits=2 engine=innodb pre_split_regions=2;",
"create table t1 (id int ) /*!90000 shard_row_id_bits=2 pre_split_regions=2 */ engine=innodb ;",
},
{
"create table t1 (id int ) pre_split_regions=2 shard_row_id_bits=2;",
"create table t1 (id int ) /*!90000 shard_row_id_bits=2 pre_split_regions=2 */ ;",
},
}
for _, ca := range testCase {
re := binloginfo.AddSpecialComment(ca.input)
c.Assert(re, Equals, ca.result)
}
}