-
Notifications
You must be signed in to change notification settings - Fork 5.9k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
store/tikv: drop the unreachable store's regions from cache. #2792
Changes from 4 commits
08fe1de
25ddbcb
b7aa920
34b5312
fd22bae
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -202,7 +202,7 @@ func (s *testRegionCacheSuite) TestSplit(c *C) { | |
} | ||
|
||
func (s *testRegionCacheSuite) TestMerge(c *C) { | ||
// ['' - 'm' - 'z'] | ||
// key range: ['' - 'm' - 'z'] | ||
region2 := s.cluster.AllocID() | ||
newPeers := s.cluster.AllocIDs(2) | ||
s.cluster.Split(s.region1, region2, []byte("m"), newPeers, newPeers[0]) | ||
|
@@ -254,6 +254,31 @@ func (s *testRegionCacheSuite) TestRequestFail(c *C) { | |
c.Assert(region.unreachableStores, HasLen, 0) | ||
} | ||
|
||
func (s *testRegionCacheSuite) TestRequestFail2(c *C) { | ||
// key range: ['' - 'm' - 'z'] | ||
region2 := s.cluster.AllocID() | ||
newPeers := s.cluster.AllocIDs(2) | ||
s.cluster.Split(s.region1, region2, []byte("m"), newPeers, newPeers[0]) | ||
|
||
// Check the two regions. | ||
loc1, err := s.cache.LocateKey(s.bo, []byte("a")) | ||
c.Assert(err, IsNil) | ||
c.Assert(loc1.Region.id, Equals, s.region1) | ||
loc2, err := s.cache.LocateKey(s.bo, []byte("x")) | ||
c.Assert(err, IsNil) | ||
c.Assert(loc2.Region.id, Equals, region2) | ||
|
||
// Request should fail on region1. | ||
ctx, _ := s.cache.GetRPCContext(s.bo, loc1.Region) | ||
c.Assert(s.cache.storeMu.stores, HasLen, 1) | ||
s.checkCache(c, 2) | ||
s.cache.OnRequestFail(ctx) | ||
// Both region2 and store should be dropped from cache. | ||
c.Assert(s.cache.storeMu.stores, HasLen, 0) | ||
c.Assert(s.cache.getRegionFromCache([]byte("x")), IsNil) | ||
s.checkCache(c, 1) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If only one store is used, why there is still one region in the cache? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. region1 is not removed, we have to try its other peers before reload it from pd, because pd may not be updated in time. |
||
} | ||
|
||
func (s *testRegionCacheSuite) TestUpdateStoreAddr(c *C) { | ||
client := &RawKVClient{ | ||
clusterID: 0, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's hard for me to understand this test case==
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It first splits the cluster to 2 regions (region1, region2) while their leaders are on the same store (store1). After request fails on region1, we check if region2 and store1 are removed from cache.