From 4b0878ed38c46eca398ee4857520c8c06c92ec5f Mon Sep 17 00:00:00 2001 From: Hu# Date: Fri, 16 Aug 2024 12:01:11 +0800 Subject: [PATCH 01/10] tests/api: add wait leader for api (#8540) close tikv/pd#8513 tests/api: add wait leader for api Signed-off-by: husharp --- tests/server/api/api_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/server/api/api_test.go b/tests/server/api/api_test.go index 31769d91ffc..8d48221784a 100644 --- a/tests/server/api/api_test.go +++ b/tests/server/api/api_test.go @@ -129,6 +129,13 @@ func (suite *middlewareTestSuite) SetupSuite() { suite.cluster = cluster } +func (suite *middlewareTestSuite) SetupTest() { + re := suite.Require() + re.NotEmpty(suite.cluster.WaitLeader()) + leader := suite.cluster.GetLeaderServer() + re.NotNil(leader) +} + func (suite *middlewareTestSuite) TearDownSuite() { re := suite.Require() re.NoError(failpoint.Disable("github.com/tikv/pd/server/api/enableFailpointAPI")) From 10cbdcf0c551c3e337ebc1b885b1f2ccd874b163 Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Fri, 16 Aug 2024 14:50:41 +0800 Subject: [PATCH 02/10] config: disable EnableV2 of etcd (#8536) close tikv/pd#8535 Signed-off-by: lhy1024 Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- server/api/member_test.go | 25 +++++++++++-------------- server/config/config.go | 1 - 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/server/api/member_test.go b/server/api/member_test.go index d3318081e62..ad4812249c7 100644 --- a/server/api/member_test.go +++ b/server/api/member_test.go @@ -15,9 +15,8 @@ package api import ( - "bytes" + "context" "encoding/json" - "fmt" "io" "math/rand" "net/http" @@ -31,6 +30,7 @@ import ( "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" + clientv3 "go.etcd.io/etcd/client/v3" ) type memberTestSuite struct { @@ -124,11 +124,10 @@ func (suite *memberTestSuite) TestChangeLeaderPeerUrls() { var got pdpb.Member re.NoError(json.Unmarshal(buf, &got)) - id := got.GetMemberId() peerUrls := got.GetPeerUrls() newPeerUrls := []string{"http://127.0.0.1:1111"} - suite.changeLeaderPeerUrls(leader, id, newPeerUrls) + suite.changeLeaderPeerUrls(leader, newPeerUrls) addr = suite.cfgs[rand.Intn(len(suite.cfgs))].ClientUrls + apiPrefix + "/api/v1/members" resp, err = testDialClient.Get(addr) re.NoError(err) @@ -141,21 +140,19 @@ func (suite *memberTestSuite) TestChangeLeaderPeerUrls() { re.Equal(newPeerUrls, got1["etcd_leader"].GetPeerUrls()) // reset - suite.changeLeaderPeerUrls(leader, id, peerUrls) + suite.changeLeaderPeerUrls(leader, peerUrls) } -func (suite *memberTestSuite) changeLeaderPeerUrls(leader *pdpb.Member, id uint64, urls []string) { +func (suite *memberTestSuite) changeLeaderPeerUrls(leader *pdpb.Member, urls []string) { re := suite.Require() - data := map[string][]string{"peerURLs": urls} - postData, err := json.Marshal(data) - re.NoError(err) - req, err := http.NewRequest(http.MethodPut, fmt.Sprintf("%s/v2/members/%s", leader.GetClientUrls()[0], fmt.Sprintf("%x", id)), bytes.NewBuffer(postData)) + + cli, err := clientv3.New(clientv3.Config{ + Endpoints: leader.GetClientUrls(), + }) re.NoError(err) - req.Header.Set("Content-Type", "application/json") - resp, err := testDialClient.Do(req) + _, err = cli.MemberUpdate(context.Background(), leader.GetMemberId(), urls) re.NoError(err) - re.Equal(204, resp.StatusCode) - resp.Body.Close() + cli.Close() } func (suite *memberTestSuite) TestResignMyself() { diff --git a/server/config/config.go b/server/config/config.go index 8c4f6eaacc8..c1791be1fd9 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -746,7 +746,6 @@ func (c *Config) GenEmbedEtcdConfig() (*embed.Config, error) { cfg.ForceNewCluster = c.ForceNewCluster cfg.ZapLoggerBuilder = embed.NewZapCoreLoggerBuilder(c.Logger, c.Logger.Core(), c.LogProps.Syncer) cfg.EnableGRPCGateway = c.EnableGRPCGateway - cfg.EnableV2 = true cfg.Logger = "zap" var err error From a7663514c5ebfc78a99bacc84e030c9b7530a10b Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Mon, 19 Aug 2024 17:55:41 +0800 Subject: [PATCH 03/10] *: fix some typos (#8544) ref tikv/pd#4399 Signed-off-by: lhy1024 --- client/pd_service_discovery.go | 4 ++-- conf/config.toml | 2 +- pkg/autoscaling/calculation.go | 2 +- pkg/gc/safepoint_test.go | 16 ++++++++-------- pkg/member/election_leader.go | 2 +- pkg/movingaverage/weight_allocator.go | 2 +- pkg/replication/replication_mode_test.go | 2 +- pkg/schedule/checker/rule_checker.go | 4 ++-- pkg/schedule/operator/operator.go | 2 +- .../operator/operator_controller_test.go | 2 +- pkg/schedule/plan/plan.go | 18 +++++++++--------- pkg/schedule/scatter/region_scatterer.go | 2 +- pkg/schedule/schedulers/evict_slow_store.go | 2 +- pkg/schedule/schedulers/evict_slow_trend.go | 2 +- pkg/schedule/schedulers/metrics.go | 2 +- pkg/schedule/schedulers/utils.go | 2 +- scripts/dashboard-version | 2 +- scripts/update-dashboard.sh | 8 ++++---- server/api/middleware.go | 2 +- server/cluster/cluster_test.go | 4 ++-- tests/server/apiv2/handlers/keyspace_test.go | 4 ++-- tests/server/cluster/cluster_test.go | 2 +- tools/pd-api-bench/cases/controller.go | 12 ++++++------ tools/pd-ctl/pdctl/command/config_command.go | 18 +++++++++--------- tools/pd-simulator/README.md | 2 +- 25 files changed, 60 insertions(+), 60 deletions(-) diff --git a/client/pd_service_discovery.go b/client/pd_service_discovery.go index e8f4c0d7707..c34a5bebac6 100644 --- a/client/pd_service_discovery.go +++ b/client/pd_service_discovery.go @@ -157,7 +157,7 @@ type pdServiceClient struct { } // NOTE: In the current implementation, the URL passed in is bound to have a scheme, -// because it is processed in `newPDServiceDiscovery`, and the url returned by etcd member owns the sheme. +// because it is processed in `newPDServiceDiscovery`, and the url returned by etcd member owns the scheme. // When testing, the URL is also bound to have a scheme. func newPDServiceClient(url, leaderURL string, conn *grpc.ClientConn, isLeader bool) ServiceClient { cli := &pdServiceClient{ @@ -1074,7 +1074,7 @@ func (c *pdServiceDiscovery) updateServiceClient(members []*pdpb.Member, leader leaderURL := pickMatchedURL(leader.GetClientUrls(), c.tlsCfg) leaderChanged, err := c.switchLeader(leaderURL) followerChanged := c.updateFollowers(members, leader.GetMemberId(), leaderURL) - // don't need to recreate balancer if no changess. + // don't need to recreate balancer if no changes. if !followerChanged && !leaderChanged { return err } diff --git a/conf/config.toml b/conf/config.toml index 0c4acf5fd8c..f2feacf30f7 100644 --- a/conf/config.toml +++ b/conf/config.toml @@ -206,7 +206,7 @@ [keyspace] ## pre-alloc is used to pre-allocate keyspaces during pd bootstrap. -## Its value should be a list of strings, denotting the name of the keyspaces. +## Its value should be a list of strings, denoting the name of the keyspaces. ## Example: ## pre-alloc = ["admin", "user1", "user2"] # pre-alloc = [] diff --git a/pkg/autoscaling/calculation.go b/pkg/autoscaling/calculation.go index b155c44bf68..43aa2972ed8 100644 --- a/pkg/autoscaling/calculation.go +++ b/pkg/autoscaling/calculation.go @@ -431,7 +431,7 @@ func findBestGroupToScaleOut(strategy *Strategy, groups []*Plan, component Compo }, } - // TODO: we can provide different senerios by using options and remove this kind of special judgement. + // TODO: we can provide different scenarios by using options and remove this kind of special judgement. if component == TiKV { group.Labels[filter.SpecialUseKey] = filter.SpecialUseHotRegion } diff --git a/pkg/gc/safepoint_test.go b/pkg/gc/safepoint_test.go index 39cd3660b2b..bc1e551594c 100644 --- a/pkg/gc/safepoint_test.go +++ b/pkg/gc/safepoint_test.go @@ -85,7 +85,7 @@ func TestGCSafePointUpdateCurrently(t *testing.T) { func TestServiceGCSafePointUpdate(t *testing.T) { re := require.New(t) manager := NewSafePointManager(newGCStorage(), config.PDServerConfig{}) - gcworkerServiceID := "gc_worker" + gcWorkerServiceID := "gc_worker" cdcServiceID := "cdc" brServiceID := "br" cdcServiceSafePoint := uint64(10) @@ -101,7 +101,7 @@ func TestServiceGCSafePointUpdate(t *testing.T) { re.NoError(err) re.True(updated) // the service will init the service safepoint to 0(<10 for cdc) for gc_worker. - re.Equal(gcworkerServiceID, min.ServiceID) + re.Equal(gcWorkerServiceID, min.ServiceID) }() // update the safepoint for br to 15 should success @@ -111,24 +111,24 @@ func TestServiceGCSafePointUpdate(t *testing.T) { re.NoError(err) re.True(updated) // the service will init the service safepoint to 0(<10 for cdc) for gc_worker. - re.Equal(gcworkerServiceID, min.ServiceID) + re.Equal(gcWorkerServiceID, min.ServiceID) }() - // update safepoint to 8 for gc_woker should be success + // update safepoint to 8 for gc_worker should be success go func() { defer wg.Done() // update with valid ttl for gc_worker should be success. - min, updated, _ := manager.UpdateServiceGCSafePoint(gcworkerServiceID, gcWorkerSafePoint, math.MaxInt64, time.Now()) + min, updated, _ := manager.UpdateServiceGCSafePoint(gcWorkerServiceID, gcWorkerSafePoint, math.MaxInt64, time.Now()) re.True(updated) // the current min safepoint should be 8 for gc_worker(cdc 10) re.Equal(gcWorkerSafePoint, min.SafePoint) - re.Equal(gcworkerServiceID, min.ServiceID) + re.Equal(gcWorkerServiceID, min.ServiceID) }() go func() { defer wg.Done() // update safepoint of gc_worker's service with ttl not infinity should be failed. - _, updated, err := manager.UpdateServiceGCSafePoint(gcworkerServiceID, 10000, 10, time.Now()) + _, updated, err := manager.UpdateServiceGCSafePoint(gcWorkerServiceID, 10000, 10, time.Now()) re.Error(err) re.False(updated) }() @@ -145,7 +145,7 @@ func TestServiceGCSafePointUpdate(t *testing.T) { wg.Wait() // update safepoint to 15(>10 for cdc) for gc_worker gcWorkerSafePoint = uint64(15) - min, updated, err := manager.UpdateServiceGCSafePoint(gcworkerServiceID, gcWorkerSafePoint, math.MaxInt64, time.Now()) + min, updated, err := manager.UpdateServiceGCSafePoint(gcWorkerServiceID, gcWorkerSafePoint, math.MaxInt64, time.Now()) re.NoError(err) re.True(updated) re.Equal(cdcServiceID, min.ServiceID) diff --git a/pkg/member/election_leader.go b/pkg/member/election_leader.go index 24520bfbe64..81afc5dbd0a 100644 --- a/pkg/member/election_leader.go +++ b/pkg/member/election_leader.go @@ -21,7 +21,7 @@ import ( ) // ElectionLeader defines the common interface of the leader, which is the pdpb.Member -// for in PD/API service or the tsopb.Participant in the microserives. +// for in PD/API service or the tsopb.Participant in the micro services. type ElectionLeader interface { // GetListenUrls returns the listen urls GetListenUrls() []string diff --git a/pkg/movingaverage/weight_allocator.go b/pkg/movingaverage/weight_allocator.go index f63ce377e08..e0427d84645 100644 --- a/pkg/movingaverage/weight_allocator.go +++ b/pkg/movingaverage/weight_allocator.go @@ -20,7 +20,7 @@ package movingaverage // WeightAllocator will divide these items into some segments whose number named as segNum which should great than 0. // And the items at first segment will be assigned more weight that is `segNum` times that of item at last segment. // If you want assign same weights, just input segNum as 1. -// If length is 10 and segNum is 3, it will make the weight arrry as [3,3,3,3,2,2,2,1,1,1], +// If length is 10 and segNum is 3, it will make the weight array as [3,3,3,3,2,2,2,1,1,1], // and then uniform it : [3,3,3,3,2,2,2,1,1,1]/sum(arr)=arr/21, // And the final weight is [0.143,0.143,0.143,0.143,0.095,0.095,0.095,0.047,0.047,0.047]; // If length is 10 and segNum is 1, the weight is [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1]; diff --git a/pkg/replication/replication_mode_test.go b/pkg/replication/replication_mode_test.go index bbaada98924..243d7f7d8f1 100644 --- a/pkg/replication/replication_mode_test.go +++ b/pkg/replication/replication_mode_test.go @@ -409,7 +409,7 @@ func TestReplicateState(t *testing.T) { rep.tickReplicateStatus() assertLastData(t, replicator.lastData[1], "sync", stateID, nil) - // repliate state to new member + // replicate state to new member replicator.memberIDs = append(replicator.memberIDs, 2, 3) rep.tickReplicateStatus() assertLastData(t, replicator.lastData[2], "sync", stateID, nil) diff --git a/pkg/schedule/checker/rule_checker.go b/pkg/schedule/checker/rule_checker.go index e29cd2bc05b..23018ad7096 100644 --- a/pkg/schedule/checker/rule_checker.go +++ b/pkg/schedule/checker/rule_checker.go @@ -265,7 +265,7 @@ func (c *RuleChecker) replaceUnexpectedRulePeer(region *core.RegionInfo, rf *pla minCount := uint64(math.MaxUint64) for _, p := range region.GetPeers() { count := c.record.getOfflineLeaderCount(p.GetStoreId()) - checkPeerhealth := func() bool { + checkPeerHealth := func() bool { if p.GetId() == peer.GetId() { return true } @@ -274,7 +274,7 @@ func (c *RuleChecker) replaceUnexpectedRulePeer(region *core.RegionInfo, rf *pla } return c.allowLeader(fit, p) } - if minCount > count && checkPeerhealth() { + if minCount > count && checkPeerHealth() { minCount = count newLeader = p } diff --git a/pkg/schedule/operator/operator.go b/pkg/schedule/operator/operator.go index 4d57d4fc6c7..f89f6606412 100644 --- a/pkg/schedule/operator/operator.go +++ b/pkg/schedule/operator/operator.go @@ -45,7 +45,7 @@ var ( EpochNotMatch CancelReasonType = "epoch not match" // AlreadyExist is the cancel reason when the operator is running. AlreadyExist CancelReasonType = "already exist" - // AdminStop is the cancel reason when the operator is stopped by adminer. + // AdminStop is the cancel reason when the operator is stopped by admin. AdminStop CancelReasonType = "admin stop" // NotInRunningState is the cancel reason when the operator is not in running state. NotInRunningState CancelReasonType = "not in running state" diff --git a/pkg/schedule/operator/operator_controller_test.go b/pkg/schedule/operator/operator_controller_test.go index 3894df7e5e7..16ba899db1d 100644 --- a/pkg/schedule/operator/operator_controller_test.go +++ b/pkg/schedule/operator/operator_controller_test.go @@ -523,7 +523,7 @@ func (suite *operatorControllerTestSuite) TestCheckOperatorLightly() { re.Nil(r) re.Equal(reason, RegionNotFound) - // check failed because of verions of region epoch changed + // check failed because of versions of region epoch changed cluster.PutRegion(target) source.GetMeta().RegionEpoch = &metapb.RegionEpoch{ConfVer: 0, Version: 1} r, reason = controller.checkOperatorLightly(ops[0]) diff --git a/pkg/schedule/plan/plan.go b/pkg/schedule/plan/plan.go index 8a389b9b9e8..5eb8a345914 100644 --- a/pkg/schedule/plan/plan.go +++ b/pkg/schedule/plan/plan.go @@ -30,18 +30,18 @@ type Plan interface { SetStatus(*Status) } -// Summary is used to analyse plan simply. +// Summary is used to analyze plan simply. // It will return the status of store. type Summary func([]Plan) (map[uint64]Status, bool, error) -// Collector is a plan collector +// Collector is a plan collector. type Collector struct { basePlan Plan unschedulablePlans []Plan schedulablePlans []Plan } -// NewCollector returns a new Collector +// NewCollector returns a new Collector. func NewCollector(plan Plan) *Collector { return &Collector{ basePlan: plan, @@ -50,7 +50,7 @@ func NewCollector(plan Plan) *Collector { } } -// Collect is used to collect a new Plan and save it into PlanCollector +// Collect is used to collect a new Plan and save it into PlanCollector. func (c *Collector) Collect(opts ...Option) { if c == nil { return @@ -63,7 +63,7 @@ func (c *Collector) Collect(opts ...Option) { } } -// GetPlans returns all plans and the first part plans are schedulable +// GetPlans returns all plans and the first part plans are schedulable. func (c *Collector) GetPlans() []Plan { if c == nil { return nil @@ -71,24 +71,24 @@ func (c *Collector) GetPlans() []Plan { return append(c.schedulablePlans, c.unschedulablePlans...) } -// Option is to do some action for plan +// Option is to do some action for plan. type Option func(plan Plan) -// SetStatus is used to set status for plan +// SetStatus is used to set status for plan. func SetStatus(status *Status) Option { return func(plan Plan) { plan.SetStatus(status) } } -// SetResource is used to generate Resource for plan +// SetResource is used to generate Resource for plan. func SetResource(resource any) Option { return func(plan Plan) { plan.SetResource(resource) } } -// SetResourceWithStep is used to generate Resource for plan +// SetResourceWithStep is used to generate Resource for plan. func SetResourceWithStep(resource any, step int) Option { return func(plan Plan) { plan.SetResourceWithStep(resource, step) diff --git a/pkg/schedule/scatter/region_scatterer.go b/pkg/schedule/scatter/region_scatterer.go index efef5439fed..71b2cd346c7 100644 --- a/pkg/schedule/scatter/region_scatterer.go +++ b/pkg/schedule/scatter/region_scatterer.go @@ -437,7 +437,7 @@ func isSameDistribution(region *core.RegionInfo, targetPeers map[uint64]*metapb. // selectNewPeer return the new peer which pick the fewest picked count. // it keeps the origin peer if the origin store's pick count is equal the fewest pick. -// it can be diveded into three steps: +// it can be divided into three steps: // 1. found the max pick count and the min pick count. // 2. if max pick count equals min pick count, it means all store picked count are some, return the origin peer. // 3. otherwise, select the store which pick count is the min pick count and pass all filter. diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index de581f597bb..8f9c8841e04 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -94,7 +94,7 @@ func (conf *evictSlowStoreSchedulerConfig) evictStore() uint64 { return conf.getStores()[0] } -// readyForRecovery checks whether the last cpatured candidate is ready for recovery. +// readyForRecovery checks whether the last captured candidate is ready for recovery. func (conf *evictSlowStoreSchedulerConfig) readyForRecovery() bool { conf.RLock() defer conf.RUnlock() diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index 427787016a2..b63b4b4a2e0 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -147,7 +147,7 @@ func (conf *evictSlowTrendSchedulerConfig) lastCandidateCapturedSecs() uint64 { return DurationSinceAsSecs(conf.lastEvictCandidate.captureTS) } -// readyForRecovery checks whether the last cpatured candidate is ready for recovery. +// readyForRecovery checks whether the last captured candidate is ready for recovery. func (conf *evictSlowTrendSchedulerConfig) readyForRecovery() bool { conf.RLock() defer conf.RUnlock() diff --git a/pkg/schedule/schedulers/metrics.go b/pkg/schedule/schedulers/metrics.go index 42170e43818..90b0b0c6bb2 100644 --- a/pkg/schedule/schedulers/metrics.go +++ b/pkg/schedule/schedulers/metrics.go @@ -110,7 +110,7 @@ var ( Namespace: "pd", Subsystem: "scheduler", Name: "store_slow_trend_evicted_status", - Help: "Store evited by slow trend status for schedule", + Help: "Store evicted by slow trend status for schedule", }, []string{"address", "store"}) storeSlowTrendActionStatusGauge = prometheus.NewGaugeVec( diff --git a/pkg/schedule/schedulers/utils.go b/pkg/schedule/schedulers/utils.go index 1e911cf7b06..7cbfe714aa9 100644 --- a/pkg/schedule/schedulers/utils.go +++ b/pkg/schedule/schedulers/utils.go @@ -286,7 +286,7 @@ func sliceLoadCmp(cmps ...storeLoadCmp) storeLoadCmp { } // stLdRankCmp returns a cmp that compares the two loads with discretized data. -// For example, if the rank function discretice data by step 10 , the load 11 and 19 will be considered equal. +// For example, if the rank function discretize data by step 10 , the load 11 and 19 will be considered equal. func stLdRankCmp(dim func(ld *statistics.StoreLoad) float64, rank func(value float64) int64) storeLoadCmp { return func(ld1, ld2 *statistics.StoreLoad) int { return rankCmp(dim(ld1), dim(ld2), rank) diff --git a/scripts/dashboard-version b/scripts/dashboard-version index 42ff906db4e..d8d960683ae 100644 --- a/scripts/dashboard-version +++ b/scripts/dashboard-version @@ -1,3 +1,3 @@ # This file is updated by running scripts/update-dashboard.sh -# Don't edit it manullay +# Don't edit it manually 8.3.0-e6e78c7c diff --git a/scripts/update-dashboard.sh b/scripts/update-dashboard.sh index 7411960f766..6eff7d5f827 100755 --- a/scripts/update-dashboard.sh +++ b/scripts/update-dashboard.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash set -euo pipefail -CUR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" BASE_DIR="$(dirname "$CUR_DIR")" DASHBOARD_VERSION_FILE="$BASE_DIR/scripts/dashboard-version" # old version @@ -23,9 +23,9 @@ if [ "$#" -ge 1 ]; then # so that we don't need to modify the embed-dashboard-ui.sh logic TO_FILE_VERSION=${DASHBOARD_VERSION#v} - echo "# This file is updated by running scripts/update-dashboard.sh" > $DASHBOARD_VERSION_FILE - echo "# Don't edit it manullay" >> $DASHBOARD_VERSION_FILE - echo $TO_FILE_VERSION >> $DASHBOARD_VERSION_FILE + echo "# This file is updated by running scripts/update-dashboard.sh" >$DASHBOARD_VERSION_FILE + echo "# Don't edit it manually" >>$DASHBOARD_VERSION_FILE + echo $TO_FILE_VERSION >>$DASHBOARD_VERSION_FILE fi echo "+ Update dashboard version to $DASHBOARD_VERSION" diff --git a/server/api/middleware.go b/server/api/middleware.go index 010889f08ce..fd0d81412ea 100644 --- a/server/api/middleware.go +++ b/server/api/middleware.go @@ -46,7 +46,7 @@ func (s *serviceMiddlewareBuilder) createHandler(next func(http.ResponseWriter, return negroni.New(append(s.handlers, negroni.WrapFunc(next))...) } -// requestInfoMiddleware is used to gather info from requsetInfo +// requestInfoMiddleware is used to gather info from requestInfo type requestInfoMiddleware struct { svr *server.Server } diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index 3f01305b3f1..cffefbba444 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -952,8 +952,8 @@ func TestRegionHeartbeat(t *testing.T) { re.NoError(cluster.processRegionHeartbeat(ctx, overlapRegion)) tracer.OnAllStageFinished() re.Condition(func() bool { - fileds := tracer.LogFields() - return slice.AllOf(fileds, func(i int) bool { return fileds[i].Integer > 0 }) + fields := tracer.LogFields() + return slice.AllOf(fields, func(i int) bool { return fields[i].Integer > 0 }) }, "should have stats") region = &metapb.Region{} ok, err = storage.LoadRegion(regions[n-1].GetID(), region) diff --git a/tests/server/apiv2/handlers/keyspace_test.go b/tests/server/apiv2/handlers/keyspace_test.go index 18466ca0da7..f3aa55bbe43 100644 --- a/tests/server/apiv2/handlers/keyspace_test.go +++ b/tests/server/apiv2/handlers/keyspace_test.go @@ -109,7 +109,7 @@ func (suite *keyspaceTestSuite) TestUpdateKeyspaceState() { success, disabledAgain := sendUpdateStateRequest(re, suite.server, created.Name, &handlers.UpdateStateParam{State: "disabled"}) re.True(success) re.Equal(disabled, disabledAgain) - // Tombstoning a DISABLED keyspace should not be allowed. + // Tombstone a DISABLED keyspace should not be allowed. success, _ = sendUpdateStateRequest(re, suite.server, created.Name, &handlers.UpdateStateParam{State: "tombstone"}) re.False(success) // Archiving a DISABLED keyspace should be allowed. @@ -119,7 +119,7 @@ func (suite *keyspaceTestSuite) TestUpdateKeyspaceState() { // Enabling an ARCHIVED keyspace is not allowed. success, _ = sendUpdateStateRequest(re, suite.server, created.Name, &handlers.UpdateStateParam{State: "enabled"}) re.False(success) - // Tombstoning an ARCHIVED keyspace is allowed. + // Tombstone an ARCHIVED keyspace is allowed. success, tombstone := sendUpdateStateRequest(re, suite.server, created.Name, &handlers.UpdateStateParam{State: "tombstone"}) re.True(success) re.Equal(keyspacepb.KeyspaceState_TOMBSTONE, tombstone.State) diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index c95aa50cb3d..27d3300b293 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -827,7 +827,7 @@ func TestSetScheduleOpt(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // TODO: enable placementrules + // TODO: enable placementRules tc, err := tests.NewTestCluster(ctx, 1, func(cfg *config.Config, _ string) { cfg.Replication.EnablePlacementRules = false }) defer tc.Destroy() re.NoError(err) diff --git a/tools/pd-api-bench/cases/controller.go b/tools/pd-api-bench/cases/controller.go index a4e20f25758..e19c79c0f3e 100644 --- a/tools/pd-api-bench/cases/controller.go +++ b/tools/pd-api-bench/cases/controller.go @@ -231,10 +231,10 @@ func (c *httpController) run() { case <-ticker.C: err := c.Do(c.ctx, hCli) if err != nil { - log.Error("meet erorr when doing HTTP request", zap.String("case", c.Name()), zap.Error(err)) + log.Error("meet error when doing HTTP request", zap.String("case", c.Name()), zap.Error(err)) } case <-c.ctx.Done(): - log.Info("Got signal to exit running HTTP case") + log.Info("got signal to exit running HTTP case") return } } @@ -300,10 +300,10 @@ func (c *gRPCController) run() { case <-ticker.C: err := c.Unary(c.ctx, cli) if err != nil { - log.Error("meet erorr when doing gRPC request", zap.String("case", c.Name()), zap.Error(err)) + log.Error("meet error when doing gRPC request", zap.String("case", c.Name()), zap.Error(err)) } case <-c.ctx.Done(): - log.Info("Got signal to exit running gRPC case") + log.Info("got signal to exit running gRPC case") return } } @@ -374,10 +374,10 @@ func (c *etcdController) run() { case <-ticker.C: err := c.Unary(c.ctx, cli) if err != nil { - log.Error("meet erorr when doing etcd request", zap.String("case", c.Name()), zap.Error(err)) + log.Error("meet error when doing etcd request", zap.String("case", c.Name()), zap.Error(err)) } case <-c.ctx.Done(): - log.Info("Got signal to exit running etcd case") + log.Info("got signal to exit running etcd case") return } } diff --git a/tools/pd-ctl/pdctl/command/config_command.go b/tools/pd-ctl/pdctl/command/config_command.go index 6f88e06a0dd..4efaae3fa13 100644 --- a/tools/pd-ctl/pdctl/command/config_command.go +++ b/tools/pd-ctl/pdctl/command/config_command.go @@ -77,7 +77,7 @@ func NewShowConfigCommand() *cobra.Command { sc.AddCommand(NewShowClusterVersionCommand()) sc.AddCommand(newShowReplicationModeCommand()) sc.AddCommand(NewShowServerConfigCommand()) - sc.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micor service") + sc.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") return sc } @@ -88,7 +88,7 @@ func NewShowAllConfigCommand() *cobra.Command { Short: "show all config of PD", Run: showAllConfigCommandFunc, } - sc.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micor service") + sc.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") return sc } @@ -99,7 +99,7 @@ func NewShowScheduleConfigCommand() *cobra.Command { Short: "show schedule config of PD", Run: showScheduleConfigCommandFunc, } - sc.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micor service") + sc.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") return sc } @@ -110,7 +110,7 @@ func NewShowReplicationConfigCommand() *cobra.Command { Short: "show replication config of PD", Run: showReplicationConfigCommandFunc, } - sc.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micor service") + sc.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") return sc } @@ -447,7 +447,7 @@ func NewPlacementRulesCommand() *cobra.Command { show.Flags().String("id", "", "rule id") show.Flags().String("region", "", "region id") show.Flags().Bool("detail", false, "detailed match info for region") - show.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micor service") + show.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") load := &cobra.Command{ Use: "load", Short: "load placement rules to a file", @@ -457,7 +457,7 @@ func NewPlacementRulesCommand() *cobra.Command { load.Flags().String("id", "", "rule id") load.Flags().String("region", "", "region id") load.Flags().String("out", "rules.json", "the filename contains rules") - load.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micor service") + load.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") save := &cobra.Command{ Use: "save", Short: "save rules from file", @@ -473,7 +473,7 @@ func NewPlacementRulesCommand() *cobra.Command { Short: "show rule group configuration(s)", Run: showRuleGroupFunc, } - ruleGroupShow.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micor service") + ruleGroupShow.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") ruleGroupSet := &cobra.Command{ Use: "set ", Short: "update rule group configuration", @@ -496,7 +496,7 @@ func NewPlacementRulesCommand() *cobra.Command { Run: getRuleBundle, } ruleBundleGet.Flags().String("out", "", "the output file") - ruleBundleGet.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micor service") + ruleBundleGet.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") ruleBundleSet := &cobra.Command{ Use: "set", Short: "set rule group config and its rules from file", @@ -515,7 +515,7 @@ func NewPlacementRulesCommand() *cobra.Command { Run: loadRuleBundle, } ruleBundleLoad.Flags().String("out", "rules.json", "the output file") - ruleBundleLoad.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micor service") + ruleBundleLoad.Flags().Bool(flagFromAPIServer, false, "read data from api server rather than micro service") ruleBundleSave := &cobra.Command{ Use: "save", Short: "save all group configs and rules from file", diff --git a/tools/pd-simulator/README.md b/tools/pd-simulator/README.md index 107f6c40f64..d0519bfedff 100644 --- a/tools/pd-simulator/README.md +++ b/tools/pd-simulator/README.md @@ -44,7 +44,7 @@ Run a specific case with an external PD: ./pd-simulator -pd="http://127.0.0.1:2379" -case="casename" ``` -Run with tiup playgroudn : +Run with tiup playground: ```shell tiup playground nightly --host 127.0.0.1 --kv.binpath ./pd-simulator --kv=1 --db=0 --kv.config=./tikv.conf ``` From 6bf980de258c8e13c032fa023504ba30929d7f48 Mon Sep 17 00:00:00 2001 From: Hu# Date: Tue, 20 Aug 2024 15:51:12 +0800 Subject: [PATCH 04/10] tests/tso: Create independent clients to prevent interfering with other tests. (#8546) ref tikv/pd#8103 tests/tso: Create independent clients to prevent interfering with other tests. Signed-off-by: husharp Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- tests/integrations/tso/client_test.go | 53 ++++++++++++++------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/tests/integrations/tso/client_test.go b/tests/integrations/tso/client_test.go index f5f33240d18..a669e093200 100644 --- a/tests/integrations/tso/client_test.go +++ b/tests/integrations/tso/client_test.go @@ -103,17 +103,7 @@ func (suite *tsoClientTestSuite) SetupSuite() { suite.backendEndpoints = suite.pdLeaderServer.GetAddr() suite.keyspaceIDs = make([]uint32, 0) - if suite.legacy { - client, err := pd.NewClientWithContext(suite.ctx, suite.getBackendEndpoints(), pd.SecurityOption{}, pd.WithForwardingOption(true)) - re.NoError(err) - innerClient, ok := client.(interface{ GetServiceDiscovery() pd.ServiceDiscovery }) - re.True(ok) - re.Equal(constant.NullKeyspaceID, innerClient.GetServiceDiscovery().GetKeyspaceID()) - re.Equal(constant.DefaultKeyspaceGroupID, innerClient.GetServiceDiscovery().GetKeyspaceGroupID()) - mcs.WaitForTSOServiceAvailable(suite.ctx, re, client) - suite.clients = make([]pd.Client, 0) - suite.clients = append(suite.clients, client) - } else { + if !suite.legacy { suite.tsoCluster, err = tests.NewTestTSOCluster(suite.ctx, 3, suite.backendEndpoints) re.NoError(err) @@ -148,7 +138,23 @@ func (suite *tsoClientTestSuite) SetupSuite() { }, }) } + } +} +// Create independent clients to prevent interfering with other tests. +func (suite *tsoClientTestSuite) SetupTest() { + re := suite.Require() + if suite.legacy { + client, err := pd.NewClientWithContext(suite.ctx, suite.getBackendEndpoints(), pd.SecurityOption{}, pd.WithForwardingOption(true)) + re.NoError(err) + innerClient, ok := client.(interface{ GetServiceDiscovery() pd.ServiceDiscovery }) + re.True(ok) + re.Equal(constant.NullKeyspaceID, innerClient.GetServiceDiscovery().GetKeyspaceID()) + re.Equal(constant.DefaultKeyspaceGroupID, innerClient.GetServiceDiscovery().GetKeyspaceGroupID()) + mcs.WaitForTSOServiceAvailable(suite.ctx, re, client) + suite.clients = make([]pd.Client, 0) + suite.clients = append(suite.clients, client) + } else { suite.waitForAllKeyspaceGroupsInServing(re) } } @@ -183,15 +189,18 @@ func (suite *tsoClientTestSuite) waitForAllKeyspaceGroupsInServing(re *require.A re.Equal(len(suite.keyspaceIDs), len(suite.clients)) } +func (suite *tsoClientTestSuite) TearDownTest() { + for _, client := range suite.clients { + client.Close() + } +} + func (suite *tsoClientTestSuite) TearDownSuite() { suite.cancel() if !suite.legacy { suite.tsoCluster.Destroy() } suite.cluster.Destroy() - for _, client := range suite.clients { - client.Close() - } } func (suite *tsoClientTestSuite) TestGetTS() { @@ -432,21 +441,13 @@ func (suite *tsoClientTestSuite) TestGetTSWhileResettingTSOClient() { re := suite.Require() re.NoError(failpoint.Enable("github.com/tikv/pd/client/delayDispatchTSORequest", "return(true)")) var ( - clients []pd.Client stopSignal atomic.Bool wg sync.WaitGroup ) - // Create independent clients to prevent interfering with other tests. - if suite.legacy { - client, err := pd.NewClientWithContext(suite.ctx, suite.getBackendEndpoints(), pd.SecurityOption{}, pd.WithForwardingOption(true)) - re.NoError(err) - clients = []pd.Client{client} - } else { - clients = mcs.WaitForMultiKeyspacesTSOAvailable(suite.ctx, re, suite.keyspaceIDs, suite.getBackendEndpoints()) - } - wg.Add(tsoRequestConcurrencyNumber * len(clients)) + + wg.Add(tsoRequestConcurrencyNumber * len(suite.clients)) for i := 0; i < tsoRequestConcurrencyNumber; i++ { - for _, client := range clients { + for _, client := range suite.clients { go func(client pd.Client) { defer wg.Done() var lastTS uint64 @@ -465,7 +466,7 @@ func (suite *tsoClientTestSuite) TestGetTSWhileResettingTSOClient() { } // Reset the TSO clients while requesting TSO concurrently. for i := 0; i < tsoRequestConcurrencyNumber; i++ { - for _, client := range clients { + for _, client := range suite.clients { client.(interface{ ResetTSOClient() }).ResetTSOClient() } } From 2e8f20fef9ce497f9a2d9ed2be3174ecf16fd17e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=90=8D=E7=99=BD?= <251098199@qq.com> Date: Wed, 21 Aug 2024 11:21:11 +0800 Subject: [PATCH 05/10] server: refactor params of `askSplit` call (#8537) close tikv/pd#5014 Signed-off-by: qingfeng777 <251098199@qq.com> Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- server/grpc_service.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/server/grpc_service.go b/server/grpc_service.go index f7a0ea9443e..d3db45c8364 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -1756,10 +1756,7 @@ func (s *GrpcServer) AskSplit(ctx context.Context, request *pdpb.AskSplitRequest "missing region for split"), }, nil } - req := &pdpb.AskSplitRequest{ - Region: request.Region, - } - split, err := rc.HandleAskSplit(req) + split, err := rc.HandleAskSplit(request) if err != nil { return &pdpb.AskSplitResponse{ Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), @@ -1836,11 +1833,7 @@ func (s *GrpcServer) AskBatchSplit(ctx context.Context, request *pdpb.AskBatchSp "missing region for split"), }, nil } - req := &pdpb.AskBatchSplitRequest{ - Region: request.Region, - SplitCount: request.SplitCount, - } - split, err := rc.HandleAskBatchSplit(req) + split, err := rc.HandleAskBatchSplit(request) if err != nil { return &pdpb.AskBatchSplitResponse{ Header: s.wrapErrorToHeader(pdpb.ErrorType_UNKNOWN, err.Error()), From 9c14c63a06a6230eaf1a59fe0aa97342015da1f0 Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Wed, 21 Aug 2024 12:07:42 +0800 Subject: [PATCH 06/10] mcs: fix potential data race in scheduling server (#8539) close tikv/pd#8538 Signed-off-by: lhy1024 Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/mcs/scheduling/server/server.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/mcs/scheduling/server/server.go b/pkg/mcs/scheduling/server/server.go index e1753cf2972..9ea369aae9e 100644 --- a/pkg/mcs/scheduling/server/server.go +++ b/pkg/mcs/scheduling/server/server.go @@ -55,6 +55,7 @@ import ( "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" "github.com/tikv/pd/pkg/utils/apiutil" + "github.com/tikv/pd/pkg/utils/etcdutil" "github.com/tikv/pd/pkg/utils/grpcutil" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/memberutil" @@ -193,7 +194,7 @@ func (s *Server) updateAPIServerMemberLoop() { if !s.IsServing() { continue } - members, err := s.GetClient().MemberList(ctx) + members, err := etcdutil.ListEtcdMembers(ctx, s.GetClient()) if err != nil { log.Warn("failed to list members", errs.ZapError(err)) continue @@ -212,6 +213,11 @@ func (s *Server) updateAPIServerMemberLoop() { cc, err := s.GetDelegateClient(ctx, s.GetTLSConfig(), ep.ClientURLs[0]) if err != nil { log.Info("failed to get delegate client", errs.ZapError(err)) + continue + } + if !s.IsServing() { + // double check + break } if s.cluster.SwitchAPIServerLeader(pdpb.NewPDClient(cc)) { if status.Leader != curLeader { From ce1d0e800f34130e56677861d1570a47c7aa2a87 Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Wed, 21 Aug 2024 13:24:11 +0800 Subject: [PATCH 07/10] scheduler: remove old scheduler name (#8516) ref tikv/pd#8379 Signed-off-by: okJiang <819421878@qq.com> Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/mcs/scheduling/server/cluster.go | 2 +- pkg/mcs/scheduling/server/config/config.go | 2 +- pkg/mock/mockconfig/mockconfig.go | 2 +- pkg/schedule/checker/merge_checker.go | 2 +- pkg/schedule/checker/replica_checker.go | 2 +- pkg/schedule/checker/rule_checker.go | 2 +- pkg/schedule/config/config.go | 2 +- pkg/schedule/config/config_provider.go | 2 +- pkg/schedule/coordinator.go | 4 +- pkg/schedule/filter/counter_test.go | 2 +- pkg/schedule/handler/handler.go | 4 +- pkg/schedule/operator/metrics.go | 2 +- pkg/schedule/schedulers/balance_leader.go | 4 +- pkg/schedule/schedulers/balance_region.go | 7 +-- pkg/schedule/schedulers/balance_test.go | 2 +- pkg/schedule/schedulers/balance_witness.go | 4 +- .../schedulers/balance_witness_test.go | 2 +- pkg/schedule/schedulers/base_scheduler.go | 2 +- .../schedulers/diagnostic_recorder.go | 36 ++++++++------ pkg/schedule/schedulers/evict_leader.go | 6 +-- pkg/schedule/schedulers/evict_leader_test.go | 2 +- pkg/schedule/schedulers/evict_slow_store.go | 5 +- .../schedulers/evict_slow_store_test.go | 2 +- pkg/schedule/schedulers/evict_slow_trend.go | 7 +-- .../schedulers/evict_slow_trend_test.go | 2 +- pkg/schedule/schedulers/grant_hot_region.go | 7 +-- pkg/schedule/schedulers/grant_leader.go | 9 +--- pkg/schedule/schedulers/hot_region.go | 6 +-- pkg/schedule/schedulers/hot_region_test.go | 8 ++-- pkg/schedule/schedulers/init.go | 2 +- pkg/schedule/schedulers/label.go | 9 +--- pkg/schedule/schedulers/metrics.go | 2 +- pkg/schedule/schedulers/random_merge.go | 7 +-- pkg/schedule/schedulers/scatter_range.go | 7 +-- pkg/schedule/schedulers/scheduler.go | 2 +- .../schedulers/scheduler_controller.go | 5 +- pkg/schedule/schedulers/scheduler_test.go | 2 +- pkg/schedule/schedulers/shuffle_hot_region.go | 7 +-- pkg/schedule/schedulers/shuffle_leader.go | 7 +-- pkg/schedule/schedulers/shuffle_region.go | 13 ++--- pkg/schedule/schedulers/split_bucket.go | 4 +- .../schedulers/transfer_witness_leader.go | 4 +- .../transfer_witness_leader_test.go | 2 +- pkg/schedule/{type => types}/type.go | 0 plugin/scheduler_example/evict_leader.go | 2 +- server/api/diagnostic_test.go | 13 ++--- server/api/scheduler.go | 11 ++--- server/cluster/cluster_test.go | 48 +++++++++---------- server/cluster/scheduling_controller.go | 3 +- server/config/persist_options.go | 2 +- server/handler.go | 2 +- .../mcs/scheduling/config_test.go | 15 +++--- .../mcs/scheduling/server_test.go | 35 +++++++------- .../realcluster/scheduler_test.go | 26 +++++----- tests/server/api/scheduler_test.go | 2 +- tests/server/cluster/cluster_test.go | 11 +++-- 56 files changed, 164 insertions(+), 218 deletions(-) rename pkg/schedule/{type => types}/type.go (100%) diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index a5a3a709184..31120ae6082 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -27,7 +27,7 @@ import ( "github.com/tikv/pd/pkg/schedule/scatter" "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/pkg/schedule/splitter" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" diff --git a/pkg/mcs/scheduling/server/config/config.go b/pkg/mcs/scheduling/server/config/config.go index 4b855d09899..4d7cee91f4a 100644 --- a/pkg/mcs/scheduling/server/config/config.go +++ b/pkg/mcs/scheduling/server/config/config.go @@ -36,7 +36,7 @@ import ( "github.com/tikv/pd/pkg/core/storelimit" mcsconstant "github.com/tikv/pd/pkg/mcs/utils/constant" sc "github.com/tikv/pd/pkg/schedule/config" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/configutil" diff --git a/pkg/mock/mockconfig/mockconfig.go b/pkg/mock/mockconfig/mockconfig.go index ccd1f98154d..6e595f982c1 100644 --- a/pkg/mock/mockconfig/mockconfig.go +++ b/pkg/mock/mockconfig/mockconfig.go @@ -16,7 +16,7 @@ package mockconfig import ( sc "github.com/tikv/pd/pkg/schedule/config" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/server/config" ) diff --git a/pkg/schedule/checker/merge_checker.go b/pkg/schedule/checker/merge_checker.go index 65189d35c1d..d5a39da83ae 100644 --- a/pkg/schedule/checker/merge_checker.go +++ b/pkg/schedule/checker/merge_checker.go @@ -31,7 +31,7 @@ import ( "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/logutil" ) diff --git a/pkg/schedule/checker/replica_checker.go b/pkg/schedule/checker/replica_checker.go index b0c42e88258..a21e19b3d66 100644 --- a/pkg/schedule/checker/replica_checker.go +++ b/pkg/schedule/checker/replica_checker.go @@ -26,7 +26,7 @@ import ( "github.com/tikv/pd/pkg/schedule/config" sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "go.uber.org/zap" ) diff --git a/pkg/schedule/checker/rule_checker.go b/pkg/schedule/checker/rule_checker.go index 23018ad7096..82807441bf8 100644 --- a/pkg/schedule/checker/rule_checker.go +++ b/pkg/schedule/checker/rule_checker.go @@ -31,7 +31,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/versioninfo" "go.uber.org/zap" ) diff --git a/pkg/schedule/config/config.go b/pkg/schedule/config/config.go index 124bff0a704..344569d6460 100644 --- a/pkg/schedule/config/config.go +++ b/pkg/schedule/config/config.go @@ -20,7 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" "github.com/tikv/pd/pkg/core/storelimit" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/configutil" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/tikv/pd/pkg/utils/typeutil" diff --git a/pkg/schedule/config/config_provider.go b/pkg/schedule/config/config_provider.go index d7bc38a7c03..95bcad5add0 100644 --- a/pkg/schedule/config/config_provider.go +++ b/pkg/schedule/config/config_provider.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/core/storelimit" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/storage/endpoint" ) diff --git a/pkg/schedule/coordinator.go b/pkg/schedule/coordinator.go index 89c99ac90b8..2a31045129e 100644 --- a/pkg/schedule/coordinator.go +++ b/pkg/schedule/coordinator.go @@ -34,7 +34,7 @@ import ( "github.com/tikv/pd/pkg/schedule/scatter" "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/pkg/schedule/splitter" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/utils/logutil" @@ -182,7 +182,7 @@ func (c *Coordinator) driveSlowNodeScheduler() { case <-ticker.C: { // If enabled, exit. - if exists, _ := c.schedulers.IsSchedulerExisted(schedulers.EvictSlowTrendName); exists { + if exists, _ := c.schedulers.IsSchedulerExisted(types.EvictSlowTrendScheduler.String()); exists { return } // If the cluster was set up with `raft-kv2` engine, this cluster should diff --git a/pkg/schedule/filter/counter_test.go b/pkg/schedule/filter/counter_test.go index 5b8d5144412..7c7acc5e9a5 100644 --- a/pkg/schedule/filter/counter_test.go +++ b/pkg/schedule/filter/counter_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/stretchr/testify/require" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" ) func TestString(t *testing.T) { diff --git a/pkg/schedule/handler/handler.go b/pkg/schedule/handler/handler.go index 748a17b87ef..a8540b4b5f4 100644 --- a/pkg/schedule/handler/handler.go +++ b/pkg/schedule/handler/handler.go @@ -38,6 +38,7 @@ import ( "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/schedule/scatter" "github.com/tikv/pd/pkg/schedule/schedulers" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" "github.com/tikv/pd/pkg/statistics/utils" @@ -884,7 +885,8 @@ func (h *Handler) GetSchedulerByStatus(status string, needTS bool) (any, error) // GetDiagnosticResult returns the diagnostic results of the specified scheduler. func (h *Handler) GetDiagnosticResult(name string) (*schedulers.DiagnosticResult, error) { - if _, ok := schedulers.DiagnosableSummaryFunc[name]; !ok { + tp := types.StringToSchedulerType[name] + if _, ok := schedulers.DiagnosableSummaryFunc[tp]; !ok { return nil, errs.ErrSchedulerUndiagnosable.FastGenByArgs(name) } co := h.GetCoordinator() diff --git a/pkg/schedule/operator/metrics.go b/pkg/schedule/operator/metrics.go index 74f9ddad0c7..47a165500e9 100644 --- a/pkg/schedule/operator/metrics.go +++ b/pkg/schedule/operator/metrics.go @@ -16,7 +16,7 @@ package operator import ( "github.com/prometheus/client_golang/prometheus" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" ) var ( diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index ef254ee6950..60dbee79dc4 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -31,7 +31,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/reflectutil" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/tikv/pd/pkg/utils/typeutil" @@ -40,8 +40,6 @@ import ( ) const ( - // BalanceLeaderName is balance leader scheduler name. - BalanceLeaderName = "balance-leader-scheduler" // BalanceLeaderBatchSize is the default number of operators to transfer leaders by one scheduling. // Default value is 4 which is subjected by scheduler-max-waiting-operator and leader-schedule-limit // If you want to increase balance speed more, please increase above-mentioned param. diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index 174b6af1c83..1ebe65d732c 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -26,15 +26,10 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "go.uber.org/zap" ) -const ( - // BalanceRegionName is balance region scheduler name. - BalanceRegionName = "balance-region-scheduler" -) - type balanceRegionSchedulerConfig struct { Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. diff --git a/pkg/schedule/schedulers/balance_test.go b/pkg/schedule/schedulers/balance_test.go index 2f163742bbb..e9ae771d18b 100644 --- a/pkg/schedule/schedulers/balance_test.go +++ b/pkg/schedule/schedulers/balance_test.go @@ -31,7 +31,7 @@ import ( "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" "github.com/tikv/pd/pkg/versioninfo" diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index 450c43647cf..47f23d470cc 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -32,7 +32,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/reflectutil" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/unrolled/render" @@ -40,8 +40,6 @@ import ( ) const ( - // BalanceWitnessName is balance witness scheduler name. - BalanceWitnessName = "balance-witness-scheduler" // balanceWitnessBatchSize is the default number of operators to transfer witnesses by one scheduling. // Default value is 4 which is subjected by scheduler-max-waiting-operator and witness-schedule-limit // If you want to increase balance speed more, please increase above-mentioned param. diff --git a/pkg/schedule/schedulers/balance_witness_test.go b/pkg/schedule/schedulers/balance_witness_test.go index d8715f71784..03fcac77ccc 100644 --- a/pkg/schedule/schedulers/balance_witness_test.go +++ b/pkg/schedule/schedulers/balance_witness_test.go @@ -23,7 +23,7 @@ import ( "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/storage" ) diff --git a/pkg/schedule/schedulers/base_scheduler.go b/pkg/schedule/schedulers/base_scheduler.go index b3dae9856e6..6e160effea7 100644 --- a/pkg/schedule/schedulers/base_scheduler.go +++ b/pkg/schedule/schedulers/base_scheduler.go @@ -23,7 +23,7 @@ import ( "github.com/tikv/pd/pkg/errs" sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/typeutil" ) diff --git a/pkg/schedule/schedulers/diagnostic_recorder.go b/pkg/schedule/schedulers/diagnostic_recorder.go index df57dbebe71..cd99262ee48 100644 --- a/pkg/schedule/schedulers/diagnostic_recorder.go +++ b/pkg/schedule/schedulers/diagnostic_recorder.go @@ -23,6 +23,7 @@ import ( sc "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/types" ) const ( @@ -46,27 +47,27 @@ const ( // DiagnosableSummaryFunc includes all implementations of plan.Summary. // And it also includes all schedulers which pd support to diagnose. -var DiagnosableSummaryFunc = map[string]plan.Summary{ - BalanceRegionName: plan.BalancePlanSummary, - BalanceLeaderName: plan.BalancePlanSummary, +var DiagnosableSummaryFunc = map[types.CheckerSchedulerType]plan.Summary{ + types.BalanceRegionScheduler: plan.BalancePlanSummary, + types.BalanceLeaderScheduler: plan.BalancePlanSummary, } // DiagnosticRecorder is used to manage diagnostic for one scheduler. type DiagnosticRecorder struct { - schedulerName string + schedulerType types.CheckerSchedulerType config sc.SchedulerConfigProvider summaryFunc plan.Summary results *cache.FIFO } // NewDiagnosticRecorder creates a new DiagnosticRecorder. -func NewDiagnosticRecorder(name string, config sc.SchedulerConfigProvider) *DiagnosticRecorder { - summaryFunc, ok := DiagnosableSummaryFunc[name] +func NewDiagnosticRecorder(tp types.CheckerSchedulerType, config sc.SchedulerConfigProvider) *DiagnosticRecorder { + summaryFunc, ok := DiagnosableSummaryFunc[tp] if !ok { return nil } return &DiagnosticRecorder{ - schedulerName: name, + schedulerType: tp, config: config, summaryFunc: summaryFunc, results: cache.NewFIFO(maxDiagnosticResultNum), @@ -131,11 +132,11 @@ func (d *DiagnosticRecorder) GetLastResult() *DiagnosticResult { } } else if firstStatus == Pending { // This is used to handle pending status because of reach limit in `IsScheduleAllowed` - resStr = fmt.Sprintf("%s reach limit", d.schedulerName) + resStr = fmt.Sprintf("%v reach limit", d.schedulerType) } } return &DiagnosticResult{ - Name: d.schedulerName, + Name: d.schedulerType.String(), Status: firstStatus, Summary: resStr, Timestamp: uint64(time.Now().Unix()), @@ -147,7 +148,11 @@ func (d *DiagnosticRecorder) SetResultFromStatus(status string) { if d == nil { return } - result := &DiagnosticResult{Name: d.schedulerName, Timestamp: uint64(time.Now().Unix()), Status: status} + result := &DiagnosticResult{ + Name: d.schedulerType.String(), + Timestamp: uint64(time.Now().Unix()), + Status: status, + } d.results.Put(result.Timestamp, result) } @@ -161,11 +166,14 @@ func (d *DiagnosticRecorder) SetResultFromPlans(ops []*operator.Operator, plans } func (d *DiagnosticRecorder) analyze(ops []*operator.Operator, plans []plan.Plan, ts uint64) *DiagnosticResult { - res := &DiagnosticResult{Name: d.schedulerName, Timestamp: ts, Status: Normal} - name := d.schedulerName + res := &DiagnosticResult{ + Name: d.schedulerType.String(), + Timestamp: ts, + Status: Normal, + } // TODO: support more schedulers and checkers - switch name { - case BalanceRegionName, BalanceLeaderName: + switch d.schedulerType { + case types.BalanceRegionScheduler, types.BalanceLeaderScheduler: if len(ops) != 0 { res.Status = Scheduling return res diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index 85f861f0082..a7d656a3e42 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -28,7 +28,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/unrolled/render" @@ -36,8 +36,6 @@ import ( ) const ( - // EvictLeaderName is evict leader scheduler name. - EvictLeaderName = "evict-leader-scheduler" // EvictLeaderBatchSize is the number of operators to transfer // leaders by one scheduling EvictLeaderBatchSize = 3 @@ -212,7 +210,7 @@ func (conf *evictLeaderSchedulerConfig) delete(id uint64) (any, error) { return resp, nil } conf.Unlock() - if err := conf.removeSchedulerCb(EvictLeaderName); err != nil { + if err := conf.removeSchedulerCb(types.EvictLeaderScheduler.String()); err != nil { if !errors.ErrorEqual(err, errs.ErrSchedulerNotFound.FastGenByArgs()) { conf.resetStore(id, keyRanges) } diff --git a/pkg/schedule/schedulers/evict_leader_test.go b/pkg/schedule/schedulers/evict_leader_test.go index 692dda63437..b2c84ec68b7 100644 --- a/pkg/schedule/schedulers/evict_leader_test.go +++ b/pkg/schedule/schedulers/evict_leader_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule/operator" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" ) diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index 8f9c8841e04..d23fc2f8ff8 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -26,7 +26,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/unrolled/render" @@ -34,9 +34,6 @@ import ( ) const ( - // EvictSlowStoreName is evict leader scheduler name. - EvictSlowStoreName = "evict-slow-store-scheduler" - slowStoreEvictThreshold = 100 slowStoreRecoverThreshold = 1 ) diff --git a/pkg/schedule/schedulers/evict_slow_store_test.go b/pkg/schedule/schedulers/evict_slow_store_test.go index 406a08b9c99..79651fb5b5c 100644 --- a/pkg/schedule/schedulers/evict_slow_store_test.go +++ b/pkg/schedule/schedulers/evict_slow_store_test.go @@ -23,7 +23,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/schedule/operator" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" ) diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index b63b4b4a2e0..8fd76bdccd4 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -27,18 +27,13 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/unrolled/render" "go.uber.org/zap" ) -const ( - // EvictSlowTrendName is evict leader by slow trend scheduler name. - EvictSlowTrendName = "evict-slow-trend-scheduler" -) - const ( alterEpsilon = 1e-9 minReCheckDurationGap = 120 // default gap for re-check the slow node, unit: s diff --git a/pkg/schedule/schedulers/evict_slow_trend_test.go b/pkg/schedule/schedulers/evict_slow_trend_test.go index 02cb65021eb..4be6eeb58d9 100644 --- a/pkg/schedule/schedulers/evict_slow_trend_test.go +++ b/pkg/schedule/schedulers/evict_slow_trend_test.go @@ -25,7 +25,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/schedule/operator" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" ) diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index a441f41062a..18402c14437 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -30,7 +30,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/utils" @@ -40,11 +40,6 @@ import ( "go.uber.org/zap" ) -const ( - // GrantHotRegionName is grant hot region scheduler name. - GrantHotRegionName = "grant-hot-region-scheduler" -) - type grantHotRegionSchedulerConfig struct { syncutil.RWMutex schedulerConfig diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index d70c0b19d67..5dbb6eef5f6 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -28,18 +28,13 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/unrolled/render" "go.uber.org/zap" ) -const ( - // GrantLeaderName is grant leader scheduler name. - GrantLeaderName = "grant-leader-scheduler" -) - type grantLeaderSchedulerConfig struct { syncutil.RWMutex schedulerConfig @@ -312,7 +307,7 @@ func (handler *grantLeaderHandler) deleteConfig(w http.ResponseWriter, r *http.R return } if last { - if err := handler.config.removeSchedulerCb(GrantLeaderName); err != nil { + if err := handler.config.removeSchedulerCb(types.GrantLeaderScheduler.String()); err != nil { if errors.ErrorEqual(err, errs.ErrSchedulerNotFound.FastGenByArgs()) { handler.rd.JSON(w, http.StatusNotFound, err.Error()) } else { diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index ab595ec9058..6506698b75c 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -34,7 +34,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" @@ -45,8 +45,6 @@ import ( ) const ( - // HotRegionName is balance hot region scheduler name. - HotRegionName = "balance-hot-region-scheduler" splitHotReadBuckets = "split-hot-read-region" splitHotWriteBuckets = "split-hot-write-region" splitProgressiveRank = 5 @@ -191,7 +189,6 @@ func (h *baseHotScheduler) randomType() resourceType { } type hotScheduler struct { - name string *baseHotScheduler syncutil.RWMutex // config of hot scheduler @@ -203,7 +200,6 @@ func newHotScheduler(opController *operator.Controller, conf *hotRegionScheduler base := newBaseHotScheduler(opController, conf.getHistorySampleDuration(), conf.getHistorySampleInterval()) ret := &hotScheduler{ - name: HotRegionName, baseHotScheduler: base, conf: conf, } diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index 7ee0883fb7d..a4b3225312d 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -29,7 +29,7 @@ import ( "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" "github.com/tikv/pd/pkg/statistics/utils" @@ -61,14 +61,12 @@ func init() { func newHotReadScheduler(opController *operator.Controller, conf *hotRegionSchedulerConfig) *hotScheduler { ret := newHotScheduler(opController, conf) - ret.name = "" ret.types = []resourceType{readLeader, readPeer} return ret } func newHotWriteScheduler(opController *operator.Controller, conf *hotRegionSchedulerConfig) *hotScheduler { ret := newHotScheduler(opController, conf) - ret.name = "" ret.types = []resourceType{writeLeader, writePeer} return ret } @@ -2465,7 +2463,7 @@ func TestCompatibilityConfig(t *testing.T) { "dst-tolerance-ratio": 1.05, }) re.NoError(err) - err = storage.SaveSchedulerConfig(HotRegionName, data) + err = storage.SaveSchedulerConfig(types.BalanceHotRegionScheduler.String(), data) re.NoError(err) hb, err = CreateScheduler(types.BalanceHotRegionScheduler, oc, storage, ConfigJSONDecoder(data)) re.NoError(err) @@ -2481,7 +2479,7 @@ func TestCompatibilityConfig(t *testing.T) { cfg.WriteLeaderPriorities = []string{"query", "key"} data, err = EncodeConfig(cfg) re.NoError(err) - err = storage.SaveSchedulerConfig(HotRegionName, data) + err = storage.SaveSchedulerConfig(types.BalanceHotRegionScheduler.String(), data) re.NoError(err) hb, err = CreateScheduler(types.BalanceHotRegionScheduler, oc, storage, ConfigJSONDecoder(data)) re.NoError(err) diff --git a/pkg/schedule/schedulers/init.go b/pkg/schedule/schedulers/init.go index 16f78284cf8..e3101d6788b 100644 --- a/pkg/schedule/schedulers/init.go +++ b/pkg/schedule/schedulers/init.go @@ -22,7 +22,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule/operator" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/storage/endpoint" ) diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index 8d4f42262ac..d1a06a5c4ff 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -24,15 +24,10 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "go.uber.org/zap" ) -const ( - // LabelName is label scheduler name. - LabelName = "label-scheduler" -) - type labelSchedulerConfig struct { Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. @@ -95,7 +90,7 @@ func (s *labelScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*ope f := filter.NewExcludedFilter(s.GetName(), nil, excludeStores) target := filter.NewCandidates(cluster.GetFollowerStores(region)). - FilterTarget(cluster.GetSchedulerConfig(), nil, nil, &filter.StoreStateFilter{ActionScope: LabelName, TransferLeader: true, OperatorLevel: constant.Medium}, f). + FilterTarget(cluster.GetSchedulerConfig(), nil, nil, &filter.StoreStateFilter{ActionScope: s.GetName(), TransferLeader: true, OperatorLevel: constant.Medium}, f). RandomPick() if target == nil { log.Debug("label scheduler no target found for region", zap.Uint64("region-id", region.GetID())) diff --git a/pkg/schedule/schedulers/metrics.go b/pkg/schedule/schedulers/metrics.go index 90b0b0c6bb2..a518a167af7 100644 --- a/pkg/schedule/schedulers/metrics.go +++ b/pkg/schedule/schedulers/metrics.go @@ -16,7 +16,7 @@ package schedulers import ( "github.com/prometheus/client_golang/prometheus" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" ) var ( diff --git a/pkg/schedule/schedulers/random_merge.go b/pkg/schedule/schedulers/random_merge.go index 676e5407e72..f6660472f57 100644 --- a/pkg/schedule/schedulers/random_merge.go +++ b/pkg/schedule/schedulers/random_merge.go @@ -26,12 +26,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" -) - -const ( - // RandomMergeName is random merge scheduler name. - RandomMergeName = "random-merge-scheduler" + "github.com/tikv/pd/pkg/schedule/types" ) type randomMergeSchedulerConfig struct { diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index e86785fcc19..37f00d2df6e 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -25,17 +25,12 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" "github.com/unrolled/render" ) -const ( - // ScatterRangeName is scatter range scheduler name - ScatterRangeName = "scatter-range" -) - type scatterRangeSchedulerConfig struct { syncutil.RWMutex schedulerConfig diff --git a/pkg/schedule/schedulers/scheduler.go b/pkg/schedule/schedulers/scheduler.go index 7fce5d9c46e..27be70680af 100644 --- a/pkg/schedule/schedulers/scheduler.go +++ b/pkg/schedule/schedulers/scheduler.go @@ -27,7 +27,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/storage/endpoint" "go.uber.org/zap" ) diff --git a/pkg/schedule/schedulers/scheduler_controller.go b/pkg/schedule/schedulers/scheduler_controller.go index c29b75a371a..5e1082acee3 100644 --- a/pkg/schedule/schedulers/scheduler_controller.go +++ b/pkg/schedule/schedulers/scheduler_controller.go @@ -29,6 +29,7 @@ import ( "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -400,7 +401,7 @@ func (c *Controller) GetPausedSchedulerDelayUntil(name string) (int64, error) { func (c *Controller) CheckTransferWitnessLeader(region *core.RegionInfo) { if core.NeedTransferWitnessLeader(region) { c.RLock() - s, ok := c.schedulers[TransferWitnessLeaderName] + s, ok := c.schedulers[types.TransferWitnessLeaderScheduler.String()] c.RUnlock() if ok { select { @@ -440,7 +441,7 @@ func NewScheduleController(ctx context.Context, cluster sche.SchedulerCluster, o nextInterval: s.GetMinInterval(), ctx: ctx, cancel: cancel, - diagnosticRecorder: NewDiagnosticRecorder(s.GetName(), cluster.GetSchedulerConfig()), + diagnosticRecorder: NewDiagnosticRecorder(s.GetType(), cluster.GetSchedulerConfig()), } } diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index 8dfe9f3616f..ba734230ea5 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -28,7 +28,7 @@ import ( "github.com/tikv/pd/pkg/schedule/hbstream" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index f8544fff48d..b8818dc48da 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -26,7 +26,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -34,11 +34,6 @@ import ( "go.uber.org/zap" ) -const ( - // ShuffleHotRegionName is shuffle hot region scheduler name. - ShuffleHotRegionName = "shuffle-hot-region-scheduler" -) - type shuffleHotRegionSchedulerConfig struct { syncutil.RWMutex schedulerConfig diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index 4270613667b..842a26d9b12 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -23,12 +23,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" -) - -const ( - // ShuffleLeaderName is shuffle leader scheduler name. - ShuffleLeaderName = "shuffle-leader-scheduler" + "github.com/tikv/pd/pkg/schedule/types" ) type shuffleLeaderSchedulerConfig struct { diff --git a/pkg/schedule/schedulers/shuffle_region.go b/pkg/schedule/schedulers/shuffle_region.go index 5d4c49e0fcc..a4c247d3363 100644 --- a/pkg/schedule/schedulers/shuffle_region.go +++ b/pkg/schedule/schedulers/shuffle_region.go @@ -24,12 +24,7 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" -) - -const ( - // ShuffleRegionName is shuffle region scheduler name. - ShuffleRegionName = "shuffle-region-scheduler" + "github.com/tikv/pd/pkg/schedule/types" ) type shuffleRegionScheduler struct { @@ -41,11 +36,11 @@ type shuffleRegionScheduler struct { // newShuffleRegionScheduler creates an admin scheduler that shuffles regions // between stores. func newShuffleRegionScheduler(opController *operator.Controller, conf *shuffleRegionSchedulerConfig) Scheduler { + base := NewBaseScheduler(opController, types.ShuffleRegionScheduler) filters := []filter.Filter{ - &filter.StoreStateFilter{ActionScope: ShuffleRegionName, MoveRegion: true, OperatorLevel: constant.Low}, - filter.NewSpecialUseFilter(ShuffleRegionName), + &filter.StoreStateFilter{ActionScope: base.GetName(), MoveRegion: true, OperatorLevel: constant.Low}, + filter.NewSpecialUseFilter(base.GetName()), } - base := NewBaseScheduler(opController, types.ShuffleRegionScheduler) return &shuffleRegionScheduler{ BaseScheduler: base, conf: conf, diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 0d8fa614aef..a0881ae1a34 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -28,7 +28,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/statistics/buckets" "github.com/tikv/pd/pkg/utils/reflectutil" "github.com/tikv/pd/pkg/utils/syncutil" @@ -36,8 +36,6 @@ import ( ) const ( - // SplitBucketName is the split bucket name. - SplitBucketName = "split-bucket-scheduler" // defaultHotDegree is the default hot region threshold. defaultHotDegree = 3 defaultSplitLimit = 10 diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index 9e7bd9bccc1..2ef0fc6a4f2 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -24,12 +24,10 @@ import ( "github.com/tikv/pd/pkg/schedule/filter" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" ) const ( - // TransferWitnessLeaderName is transfer witness leader scheduler name. - TransferWitnessLeaderName = "transfer-witness-leader-scheduler" // TransferWitnessLeaderBatchSize is the number of operators to to transfer // leaders by one scheduling transferWitnessLeaderBatchSize = 3 diff --git a/pkg/schedule/schedulers/transfer_witness_leader_test.go b/pkg/schedule/schedulers/transfer_witness_leader_test.go index 046b7aeb53f..b100e0a9535 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader_test.go +++ b/pkg/schedule/schedulers/transfer_witness_leader_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule/operator" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" ) diff --git a/pkg/schedule/type/type.go b/pkg/schedule/types/type.go similarity index 100% rename from pkg/schedule/type/type.go rename to pkg/schedule/types/type.go diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index c7842debdcb..c20cfd41814 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -30,7 +30,7 @@ import ( "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/plan" "github.com/tikv/pd/pkg/schedule/schedulers" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" diff --git a/server/api/diagnostic_test.go b/server/api/diagnostic_test.go index 8c4089a8710..c85d9a45369 100644 --- a/server/api/diagnostic_test.go +++ b/server/api/diagnostic_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/schedule/schedulers" + "github.com/tikv/pd/pkg/schedule/types" tu "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" @@ -95,17 +96,17 @@ func (suite *diagnosticTestSuite) TestSchedulerDiagnosticAPI() { re.NoError(tu.ReadGetJSON(re, testDialClient, addr, cfg)) re.True(cfg.Schedule.EnableDiagnostic) - balanceRegionURL := suite.urlPrefix + "/" + schedulers.BalanceRegionName + balanceRegionURL := suite.urlPrefix + "/" + types.BalanceRegionScheduler.String() result := &schedulers.DiagnosticResult{} err = tu.ReadGetJSON(re, testDialClient, balanceRegionURL, result) re.NoError(err) re.Equal("disabled", result.Status) - evictLeaderURL := suite.urlPrefix + "/" + schedulers.EvictLeaderName + evictLeaderURL := suite.urlPrefix + "/" + types.EvictLeaderScheduler.String() re.NoError(tu.CheckGetJSON(testDialClient, evictLeaderURL, nil, tu.StatusNotOK(re))) input := make(map[string]any) - input["name"] = schedulers.BalanceRegionName + input["name"] = types.BalanceRegionScheduler.String() body, err := json.Marshal(input) re.NoError(err) err = tu.CheckPostJSON(testDialClient, suite.schedulerPrefix, body, tu.StatusOK(re)) @@ -116,14 +117,14 @@ func (suite *diagnosticTestSuite) TestSchedulerDiagnosticAPI() { input["delay"] = 30 pauseArgs, err := json.Marshal(input) re.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.schedulerPrefix+"/"+schedulers.BalanceRegionName, pauseArgs, tu.StatusOK(re)) + err = tu.CheckPostJSON(testDialClient, suite.schedulerPrefix+"/"+types.BalanceRegionScheduler.String(), pauseArgs, tu.StatusOK(re)) re.NoError(err) suite.checkStatus("paused", balanceRegionURL) input["delay"] = 0 pauseArgs, err = json.Marshal(input) re.NoError(err) - err = tu.CheckPostJSON(testDialClient, suite.schedulerPrefix+"/"+schedulers.BalanceRegionName, pauseArgs, tu.StatusOK(re)) + err = tu.CheckPostJSON(testDialClient, suite.schedulerPrefix+"/"+types.BalanceRegionScheduler.String(), pauseArgs, tu.StatusOK(re)) re.NoError(err) suite.checkStatus("pending", balanceRegionURL) @@ -132,7 +133,7 @@ func (suite *diagnosticTestSuite) TestSchedulerDiagnosticAPI() { fmt.Println("after put region") suite.checkStatus("normal", balanceRegionURL) - deleteURL := fmt.Sprintf("%s/%s", suite.schedulerPrefix, schedulers.BalanceRegionName) + deleteURL := fmt.Sprintf("%s/%s", suite.schedulerPrefix, types.BalanceRegionScheduler.String()) err = tu.CheckDelete(testDialClient, deleteURL, tu.StatusOK(re)) re.NoError(err) suite.checkStatus("disabled", balanceRegionURL) diff --git a/server/api/scheduler.go b/server/api/scheduler.go index 1d502013558..c96e4c123de 100644 --- a/server/api/scheduler.go +++ b/server/api/scheduler.go @@ -24,8 +24,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/pkg/schedule/schedulers" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/server" "github.com/unrolled/render" @@ -177,11 +176,11 @@ func (h *schedulerHandler) CreateScheduler(w http.ResponseWriter, r *http.Reques func (h *schedulerHandler) DeleteScheduler(w http.ResponseWriter, r *http.Request) { name := mux.Vars(r)["name"] switch { - case strings.HasPrefix(name, schedulers.EvictLeaderName) && name != schedulers.EvictLeaderName: - h.redirectSchedulerDelete(w, name, schedulers.EvictLeaderName) + case strings.HasPrefix(name, types.EvictLeaderScheduler.String()) && name != types.EvictLeaderScheduler.String(): + h.redirectSchedulerDelete(w, name, types.EvictLeaderScheduler.String()) return - case strings.HasPrefix(name, schedulers.GrantLeaderName) && name != schedulers.GrantLeaderName: - h.redirectSchedulerDelete(w, name, schedulers.GrantLeaderName) + case strings.HasPrefix(name, types.GrantLeaderScheduler.String()) && name != types.GrantLeaderScheduler.String(): + h.redirectSchedulerDelete(w, name, types.GrantLeaderScheduler.String()) return default: if err := h.RemoveScheduler(name); err != nil { diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index cffefbba444..9a9420988a1 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -53,7 +53,7 @@ import ( "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/schedule/schedulers" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/utils" @@ -366,7 +366,7 @@ func TestSetOfflineStoreWithEvictLeader(t *testing.T) { err = cluster.RemoveStore(3, false) re.Error(err) re.Contains(err.Error(), string(errs.ErrNoStoreForRegionLeader.RFCCode())) - re.NoError(cluster.RemoveScheduler(schedulers.EvictLeaderName)) + re.NoError(cluster.RemoveScheduler(types.EvictLeaderScheduler.String())) re.NoError(cluster.RemoveStore(3, false)) } @@ -2453,10 +2453,10 @@ func TestDispatch(t *testing.T) { waitOperator(re, co, 1) controller := co.GetSchedulersController() operatorutil.CheckTransferPeer(re, co.GetOperatorController().GetOperator(1), operator.OpKind(0), 4, 1) - re.NoError(controller.RemoveScheduler(schedulers.BalanceRegionName)) + re.NoError(controller.RemoveScheduler(types.BalanceRegionScheduler.String())) waitOperator(re, co, 2) operatorutil.CheckTransferLeader(re, co.GetOperatorController().GetOperator(2), operator.OpKind(0), 4, 2) - re.NoError(controller.RemoveScheduler(schedulers.BalanceLeaderName)) + re.NoError(controller.RemoveScheduler(types.BalanceLeaderScheduler.String())) stream := mockhbstream.NewHeartbeatStream() @@ -3075,10 +3075,10 @@ func TestAddScheduler(t *testing.T) { defer cleanup() controller := co.GetSchedulersController() re.Len(controller.GetSchedulerNames(), len(sc.DefaultSchedulers)) - re.NoError(controller.RemoveScheduler(schedulers.BalanceLeaderName)) - re.NoError(controller.RemoveScheduler(schedulers.BalanceRegionName)) - re.NoError(controller.RemoveScheduler(schedulers.HotRegionName)) - re.NoError(controller.RemoveScheduler(schedulers.EvictSlowStoreName)) + re.NoError(controller.RemoveScheduler(types.BalanceLeaderScheduler.String())) + re.NoError(controller.RemoveScheduler(types.BalanceRegionScheduler.String())) + re.NoError(controller.RemoveScheduler(types.BalanceHotRegionScheduler.String())) + re.NoError(controller.RemoveScheduler(types.EvictSlowStoreScheduler.String())) re.Empty(controller.GetSchedulerNames()) stream := mockhbstream.NewHeartbeatStream() @@ -3170,10 +3170,10 @@ func TestPersistScheduler(t *testing.T) { re.Len(sches, defaultCount+2) // remove all default schedulers - re.NoError(controller.RemoveScheduler(schedulers.BalanceLeaderName)) - re.NoError(controller.RemoveScheduler(schedulers.BalanceRegionName)) - re.NoError(controller.RemoveScheduler(schedulers.HotRegionName)) - re.NoError(controller.RemoveScheduler(schedulers.EvictSlowStoreName)) + re.NoError(controller.RemoveScheduler(types.BalanceLeaderScheduler.String())) + re.NoError(controller.RemoveScheduler(types.BalanceRegionScheduler.String())) + re.NoError(controller.RemoveScheduler(types.BalanceHotRegionScheduler.String())) + re.NoError(controller.RemoveScheduler(types.EvictSlowStoreScheduler.String())) // only remains 2 items with independent config. re.Len(controller.GetSchedulerNames(), 2) re.NoError(co.GetCluster().GetSchedulerConfig().Persist(storage)) @@ -3233,7 +3233,7 @@ func TestPersistScheduler(t *testing.T) { // the scheduler option should contain 9 items // the `hot scheduler` are disabled re.Len(co.GetCluster().GetSchedulerConfig().(*config.PersistOptions).GetSchedulers(), defaultCount+3) - re.NoError(controller.RemoveScheduler(schedulers.GrantLeaderName)) + re.NoError(controller.RemoveScheduler(types.GrantLeaderScheduler.String())) // the scheduler that is not enable by default will be completely deleted re.Len(co.GetCluster().GetSchedulerConfig().(*config.PersistOptions).GetSchedulers(), defaultCount+2) re.Len(controller.GetSchedulerNames(), 4) @@ -3250,7 +3250,7 @@ func TestPersistScheduler(t *testing.T) { co.Run() controller = co.GetSchedulersController() re.Len(controller.GetSchedulerNames(), 4) - re.NoError(controller.RemoveScheduler(schedulers.EvictLeaderName)) + re.NoError(controller.RemoveScheduler(types.EvictLeaderScheduler.String())) re.Len(controller.GetSchedulerNames(), 3) } @@ -3283,11 +3283,11 @@ func TestRemoveScheduler(t *testing.T) { re.Len(sches, defaultCount+1) // remove all schedulers - re.NoError(controller.RemoveScheduler(schedulers.BalanceLeaderName)) - re.NoError(controller.RemoveScheduler(schedulers.BalanceRegionName)) - re.NoError(controller.RemoveScheduler(schedulers.HotRegionName)) - re.NoError(controller.RemoveScheduler(schedulers.GrantLeaderName)) - re.NoError(controller.RemoveScheduler(schedulers.EvictSlowStoreName)) + re.NoError(controller.RemoveScheduler(types.BalanceLeaderScheduler.String())) + re.NoError(controller.RemoveScheduler(types.BalanceRegionScheduler.String())) + re.NoError(controller.RemoveScheduler(types.BalanceHotRegionScheduler.String())) + re.NoError(controller.RemoveScheduler(types.GrantLeaderScheduler.String())) + re.NoError(controller.RemoveScheduler(types.EvictSlowStoreScheduler.String())) // all removed sches, _, err = storage.LoadAllSchedulerConfigs() re.NoError(err) @@ -3360,15 +3360,15 @@ func TestPauseScheduler(t *testing.T) { controller := co.GetSchedulersController() _, err := controller.IsSchedulerAllowed("test") re.Error(err) - controller.PauseOrResumeScheduler(schedulers.BalanceLeaderName, 60) - paused, _ := controller.IsSchedulerPaused(schedulers.BalanceLeaderName) + controller.PauseOrResumeScheduler(types.BalanceLeaderScheduler.String(), 60) + paused, _ := controller.IsSchedulerPaused(types.BalanceLeaderScheduler.String()) re.True(paused) - pausedAt, err := controller.GetPausedSchedulerDelayAt(schedulers.BalanceLeaderName) + pausedAt, err := controller.GetPausedSchedulerDelayAt(types.BalanceLeaderScheduler.String()) re.NoError(err) - resumeAt, err := controller.GetPausedSchedulerDelayUntil(schedulers.BalanceLeaderName) + resumeAt, err := controller.GetPausedSchedulerDelayUntil(types.BalanceLeaderScheduler.String()) re.NoError(err) re.Equal(int64(60), resumeAt-pausedAt) - allowed, _ := controller.IsSchedulerAllowed(schedulers.BalanceLeaderName) + allowed, _ := controller.IsSchedulerAllowed(types.BalanceLeaderScheduler.String()) re.False(allowed) } diff --git a/server/cluster/scheduling_controller.go b/server/cluster/scheduling_controller.go index b4c29ceed46..20a36a6817d 100644 --- a/server/cluster/scheduling_controller.go +++ b/server/cluster/scheduling_controller.go @@ -33,6 +33,7 @@ import ( "github.com/tikv/pd/pkg/schedule/scatter" "github.com/tikv/pd/pkg/schedule/schedulers" "github.com/tikv/pd/pkg/schedule/splitter" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/buckets" "github.com/tikv/pd/pkg/statistics/utils" @@ -455,7 +456,7 @@ func (sc *schedulingController) getEvictLeaderStores() (evictStores []uint64) { if sc.coordinator == nil { return nil } - handler, ok := sc.coordinator.GetSchedulersController().GetSchedulerHandlers()[schedulers.EvictLeaderName] + handler, ok := sc.coordinator.GetSchedulersController().GetSchedulerHandlers()[types.EvictLeaderScheduler.String()] if !ok { return } diff --git a/server/config/persist_options.go b/server/config/persist_options.go index 19f0ef0d475..807e9699a25 100644 --- a/server/config/persist_options.go +++ b/server/config/persist_options.go @@ -33,7 +33,7 @@ import ( "github.com/tikv/pd/pkg/core/constant" "github.com/tikv/pd/pkg/core/storelimit" sc "github.com/tikv/pd/pkg/schedule/config" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/etcdutil" diff --git a/server/handler.go b/server/handler.go index e3e4184f177..1464c768aed 100644 --- a/server/handler.go +++ b/server/handler.go @@ -34,7 +34,7 @@ import ( sche "github.com/tikv/pd/pkg/schedule/core" "github.com/tikv/pd/pkg/schedule/handler" "github.com/tikv/pd/pkg/schedule/schedulers" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" diff --git a/tests/integrations/mcs/scheduling/config_test.go b/tests/integrations/mcs/scheduling/config_test.go index 168d3a8742c..fcc9a78b0f3 100644 --- a/tests/integrations/mcs/scheduling/config_test.go +++ b/tests/integrations/mcs/scheduling/config_test.go @@ -28,6 +28,7 @@ import ( "github.com/tikv/pd/pkg/mcs/scheduling/server/config" sc "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/schedulers" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" @@ -165,14 +166,14 @@ func (suite *configTestSuite) TestSchedulerConfigWatch() { }) re.Equal(namesFromAPIServer, namesFromSchedulingServer) // Add a new scheduler. - api.MustAddScheduler(re, suite.pdLeaderServer.GetAddr(), schedulers.EvictLeaderName, map[string]any{ + api.MustAddScheduler(re, suite.pdLeaderServer.GetAddr(), types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 1, }) // Check the new scheduler's config. testutil.Eventually(re, func() bool { namesFromSchedulingServer, _, err = storage.LoadAllSchedulerConfigs() re.NoError(err) - return slice.Contains(namesFromSchedulingServer, schedulers.EvictLeaderName) + return slice.Contains(namesFromSchedulingServer, types.EvictLeaderScheduler.String()) }) assertEvictLeaderStoreIDs(re, storage, []uint64{1}) // Update the scheduler by adding a store. @@ -187,20 +188,20 @@ func (suite *configTestSuite) TestSchedulerConfigWatch() { }, ) re.NoError(err) - api.MustAddScheduler(re, suite.pdLeaderServer.GetAddr(), schedulers.EvictLeaderName, map[string]any{ + api.MustAddScheduler(re, suite.pdLeaderServer.GetAddr(), types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 2, }) assertEvictLeaderStoreIDs(re, storage, []uint64{1, 2}) // Update the scheduler by removing a store. - api.MustDeleteScheduler(re, suite.pdLeaderServer.GetAddr(), fmt.Sprintf("%s-%d", schedulers.EvictLeaderName, 1)) + api.MustDeleteScheduler(re, suite.pdLeaderServer.GetAddr(), fmt.Sprintf("%s-%d", types.EvictLeaderScheduler.String(), 1)) assertEvictLeaderStoreIDs(re, storage, []uint64{2}) // Delete the scheduler. - api.MustDeleteScheduler(re, suite.pdLeaderServer.GetAddr(), schedulers.EvictLeaderName) + api.MustDeleteScheduler(re, suite.pdLeaderServer.GetAddr(), types.EvictLeaderScheduler.String()) // Check the removed scheduler's config. testutil.Eventually(re, func() bool { namesFromSchedulingServer, _, err = storage.LoadAllSchedulerConfigs() re.NoError(err) - return !slice.Contains(namesFromSchedulingServer, schedulers.EvictLeaderName) + return !slice.Contains(namesFromSchedulingServer, types.EvictLeaderScheduler.String()) }) watcher.Close() } @@ -212,7 +213,7 @@ func assertEvictLeaderStoreIDs( StoreIDWithRanges map[uint64][]core.KeyRange `json:"store-id-ranges"` } testutil.Eventually(re, func() bool { - cfg, err := storage.LoadSchedulerConfig(schedulers.EvictLeaderName) + cfg, err := storage.LoadSchedulerConfig(types.EvictLeaderScheduler.String()) re.NoError(err) err = schedulers.DecodeConfig([]byte(cfg), &evictLeaderCfg) re.NoError(err) diff --git a/tests/integrations/mcs/scheduling/server_test.go b/tests/integrations/mcs/scheduling/server_test.go index 5c08892a972..57306640394 100644 --- a/tests/integrations/mcs/scheduling/server_test.go +++ b/tests/integrations/mcs/scheduling/server_test.go @@ -32,6 +32,7 @@ import ( "github.com/tikv/pd/pkg/mcs/utils/constant" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/schedulers" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server" "github.com/tikv/pd/tests" @@ -305,7 +306,7 @@ func (suite *serverTestSuite) TestSchedulerSync() { schedulersController := tc.GetPrimaryServer().GetCluster().GetCoordinator().GetSchedulersController() checkEvictLeaderSchedulerExist(re, schedulersController, false) // Add a new evict-leader-scheduler through the API server. - api.MustAddScheduler(re, suite.backendEndpoints, schedulers.EvictLeaderName, map[string]any{ + api.MustAddScheduler(re, suite.backendEndpoints, types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 1, }) // Check if the evict-leader-scheduler is added. @@ -323,54 +324,54 @@ func (suite *serverTestSuite) TestSchedulerSync() { }, ) re.NoError(err) - api.MustAddScheduler(re, suite.backendEndpoints, schedulers.EvictLeaderName, map[string]any{ + api.MustAddScheduler(re, suite.backendEndpoints, types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 2, }) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{1, 2}) // Delete a store_id from the evict-leader-scheduler through the API server. - api.MustDeleteScheduler(re, suite.backendEndpoints, fmt.Sprintf("%s-%d", schedulers.EvictLeaderName, 1)) + api.MustDeleteScheduler(re, suite.backendEndpoints, fmt.Sprintf("%s-%d", types.EvictLeaderScheduler.String(), 1)) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{2}) // Add a store_id to the evict-leader-scheduler through the API server by the scheduler handler. - api.MustCallSchedulerConfigAPI(re, http.MethodPost, suite.backendEndpoints, schedulers.EvictLeaderName, []string{"config"}, map[string]any{ - "name": schedulers.EvictLeaderName, + api.MustCallSchedulerConfigAPI(re, http.MethodPost, suite.backendEndpoints, types.EvictLeaderScheduler.String(), []string{"config"}, map[string]any{ + "name": types.EvictLeaderScheduler.String(), "store_id": 1, }) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{1, 2}) // Delete a store_id from the evict-leader-scheduler through the API server by the scheduler handler. - api.MustCallSchedulerConfigAPI(re, http.MethodDelete, suite.backendEndpoints, schedulers.EvictLeaderName, []string{"delete", "2"}, nil) + api.MustCallSchedulerConfigAPI(re, http.MethodDelete, suite.backendEndpoints, types.EvictLeaderScheduler.String(), []string{"delete", "2"}, nil) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{1}) // If the last store is deleted, the scheduler should be removed. - api.MustCallSchedulerConfigAPI(re, http.MethodDelete, suite.backendEndpoints, schedulers.EvictLeaderName, []string{"delete", "1"}, nil) + api.MustCallSchedulerConfigAPI(re, http.MethodDelete, suite.backendEndpoints, types.EvictLeaderScheduler.String(), []string{"delete", "1"}, nil) // Check if the scheduler is removed. checkEvictLeaderSchedulerExist(re, schedulersController, false) // Delete the evict-leader-scheduler through the API server by removing the last store_id. - api.MustAddScheduler(re, suite.backendEndpoints, schedulers.EvictLeaderName, map[string]any{ + api.MustAddScheduler(re, suite.backendEndpoints, types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 1, }) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{1}) - api.MustDeleteScheduler(re, suite.backendEndpoints, fmt.Sprintf("%s-%d", schedulers.EvictLeaderName, 1)) + api.MustDeleteScheduler(re, suite.backendEndpoints, fmt.Sprintf("%s-%d", types.EvictLeaderScheduler.String(), 1)) checkEvictLeaderSchedulerExist(re, schedulersController, false) // Delete the evict-leader-scheduler through the API server. - api.MustAddScheduler(re, suite.backendEndpoints, schedulers.EvictLeaderName, map[string]any{ + api.MustAddScheduler(re, suite.backendEndpoints, types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 1, }) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{1}) - api.MustDeleteScheduler(re, suite.backendEndpoints, schedulers.EvictLeaderName) + api.MustDeleteScheduler(re, suite.backendEndpoints, types.EvictLeaderScheduler.String()) checkEvictLeaderSchedulerExist(re, schedulersController, false) // The default scheduler could not be deleted, it could only be disabled. defaultSchedulerNames := []string{ - schedulers.BalanceLeaderName, - schedulers.BalanceRegionName, - schedulers.HotRegionName, + types.BalanceLeaderScheduler.String(), + types.BalanceRegionScheduler.String(), + types.BalanceHotRegionScheduler.String(), } checkDisabled := func(name string, shouldDisabled bool) { re.NotNil(schedulersController.GetScheduler(name), name) @@ -395,14 +396,14 @@ func (suite *serverTestSuite) TestSchedulerSync() { func checkEvictLeaderSchedulerExist(re *require.Assertions, sc *schedulers.Controller, exist bool) { testutil.Eventually(re, func() bool { if !exist { - return sc.GetScheduler(schedulers.EvictLeaderName) == nil + return sc.GetScheduler(types.EvictLeaderScheduler.String()) == nil } - return sc.GetScheduler(schedulers.EvictLeaderName) != nil + return sc.GetScheduler(types.EvictLeaderScheduler.String()) != nil }) } func checkEvictLeaderStoreIDs(re *require.Assertions, sc *schedulers.Controller, expected []uint64) { - handler, ok := sc.GetSchedulerHandlers()[schedulers.EvictLeaderName] + handler, ok := sc.GetSchedulerHandlers()[types.EvictLeaderScheduler.String()] re.True(ok) h, ok := handler.(interface { EvictStoreIDs() []uint64 diff --git a/tests/integrations/realcluster/scheduler_test.go b/tests/integrations/realcluster/scheduler_test.go index 3b75e6c8c88..98a18158114 100644 --- a/tests/integrations/realcluster/scheduler_test.go +++ b/tests/integrations/realcluster/scheduler_test.go @@ -25,7 +25,7 @@ import ( pd "github.com/tikv/pd/client/http" "github.com/tikv/pd/client/testutil" "github.com/tikv/pd/pkg/schedule/labeler" - "github.com/tikv/pd/pkg/schedule/schedulers" + "github.com/tikv/pd/pkg/schedule/types" ) // https://github.com/tikv/pd/issues/6988#issuecomment-1694924611 @@ -47,9 +47,9 @@ func TestTransferLeader(t *testing.T) { } // record scheduler - re.NoError(pdHTTPCli.CreateScheduler(ctx, schedulers.EvictLeaderName, 1)) + re.NoError(pdHTTPCli.CreateScheduler(ctx, types.EvictLeaderScheduler.String(), 1)) defer func() { - re.NoError(pdHTTPCli.DeleteScheduler(ctx, schedulers.EvictLeaderName)) + re.NoError(pdHTTPCli.DeleteScheduler(ctx, types.EvictLeaderScheduler.String())) }() res, err := pdHTTPCli.GetSchedulers(ctx) re.NoError(err) @@ -89,16 +89,16 @@ func TestRegionLabelDenyScheduler(t *testing.T) { re.NotEmpty(regions.Regions) region1 := regions.Regions[0] - err = pdHTTPCli.DeleteScheduler(ctx, schedulers.BalanceLeaderName) + err = pdHTTPCli.DeleteScheduler(ctx, types.BalanceLeaderScheduler.String()) if err == nil { defer func() { - pdHTTPCli.CreateScheduler(ctx, schedulers.BalanceLeaderName, 0) + pdHTTPCli.CreateScheduler(ctx, types.BalanceLeaderScheduler.String(), 0) }() } - re.NoError(pdHTTPCli.CreateScheduler(ctx, schedulers.GrantLeaderName, uint64(region1.Leader.StoreID))) + re.NoError(pdHTTPCli.CreateScheduler(ctx, types.GrantLeaderScheduler.String(), uint64(region1.Leader.StoreID))) defer func() { - pdHTTPCli.DeleteScheduler(ctx, schedulers.GrantLeaderName) + pdHTTPCli.DeleteScheduler(ctx, types.GrantLeaderScheduler.String()) }() // wait leader transfer @@ -135,10 +135,10 @@ func TestRegionLabelDenyScheduler(t *testing.T) { re.Equal(labelRule.RuleType, labelRules[1].RuleType) // enable evict leader scheduler, and check it works - re.NoError(pdHTTPCli.DeleteScheduler(ctx, schedulers.GrantLeaderName)) - re.NoError(pdHTTPCli.CreateScheduler(ctx, schedulers.EvictLeaderName, uint64(region1.Leader.StoreID))) + re.NoError(pdHTTPCli.DeleteScheduler(ctx, types.GrantLeaderScheduler.String())) + re.NoError(pdHTTPCli.CreateScheduler(ctx, types.EvictLeaderScheduler.String(), uint64(region1.Leader.StoreID))) defer func() { - pdHTTPCli.DeleteScheduler(ctx, schedulers.EvictLeaderName) + pdHTTPCli.DeleteScheduler(ctx, types.EvictLeaderScheduler.String()) }() testutil.Eventually(re, func() bool { regions, err := pdHTTPCli.GetRegions(ctx) @@ -151,10 +151,10 @@ func TestRegionLabelDenyScheduler(t *testing.T) { return true }, testutil.WithWaitFor(time.Minute)) - re.NoError(pdHTTPCli.DeleteScheduler(ctx, schedulers.EvictLeaderName)) - re.NoError(pdHTTPCli.CreateScheduler(ctx, schedulers.GrantLeaderName, uint64(region1.Leader.StoreID))) + re.NoError(pdHTTPCli.DeleteScheduler(ctx, types.EvictLeaderScheduler.String())) + re.NoError(pdHTTPCli.CreateScheduler(ctx, types.GrantLeaderScheduler.String(), uint64(region1.Leader.StoreID))) defer func() { - pdHTTPCli.DeleteScheduler(ctx, schedulers.GrantLeaderName) + pdHTTPCli.DeleteScheduler(ctx, types.GrantLeaderScheduler.String()) }() testutil.Eventually(re, func() bool { regions, err := pdHTTPCli.GetRegions(ctx) diff --git a/tests/server/api/scheduler_test.go b/tests/server/api/scheduler_test.go index f3b8509d2e5..330a69eca63 100644 --- a/tests/server/api/scheduler_test.go +++ b/tests/server/api/scheduler_test.go @@ -29,7 +29,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" sc "github.com/tikv/pd/pkg/schedule/config" - types "github.com/tikv/pd/pkg/schedule/type" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/utils/apiutil" tu "github.com/tikv/pd/pkg/utils/testutil" diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index 27d3300b293..375edf027db 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -40,6 +40,7 @@ import ( sc "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/operator" "github.com/tikv/pd/pkg/schedule/schedulers" + "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/syncer" @@ -1410,10 +1411,10 @@ func TestTransferLeaderForScheduler(t *testing.T) { re.True(leaderServer.GetRaftCluster().IsPrepared()) schedsNum := len(rc.GetCoordinator().GetSchedulersController().GetSchedulerNames()) // Add evict leader scheduler - api.MustAddScheduler(re, leaderServer.GetAddr(), schedulers.EvictLeaderName, map[string]any{ + api.MustAddScheduler(re, leaderServer.GetAddr(), types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 1, }) - api.MustAddScheduler(re, leaderServer.GetAddr(), schedulers.EvictLeaderName, map[string]any{ + api.MustAddScheduler(re, leaderServer.GetAddr(), types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 2, }) // Check scheduler updated. @@ -1468,14 +1469,14 @@ func TestTransferLeaderForScheduler(t *testing.T) { func checkEvictLeaderSchedulerExist(re *require.Assertions, sc *schedulers.Controller, exist bool) { testutil.Eventually(re, func() bool { if !exist { - return sc.GetScheduler(schedulers.EvictLeaderName) == nil + return sc.GetScheduler(types.EvictLeaderScheduler.String()) == nil } - return sc.GetScheduler(schedulers.EvictLeaderName) != nil + return sc.GetScheduler(types.EvictLeaderScheduler.String()) != nil }) } func checkEvictLeaderStoreIDs(re *require.Assertions, sc *schedulers.Controller, expected []uint64) { - handler, ok := sc.GetSchedulerHandlers()[schedulers.EvictLeaderName] + handler, ok := sc.GetSchedulerHandlers()[types.EvictLeaderScheduler.String()] re.True(ok) h, ok := handler.(interface { EvictStoreIDs() []uint64 From 6c30bbf6eda61747d96f8ae090fba5d1e298c4a6 Mon Sep 17 00:00:00 2001 From: Jack Lyu <63168620+JackL9u@users.noreply.github.com> Date: Wed, 21 Aug 2024 14:31:41 +0800 Subject: [PATCH 08/10] ctl: replace gc_safepoint call with PD HTTP SDK (#8504) ref tikv/pd#7300 replace gc_safepoint call with PD HTTP SDK Signed-off-by: Boyang Lyu Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- client/http/api.go | 6 + client/http/interface.go | 30 ++++ client/http/request_info.go | 2 + client/http/types.go | 16 ++ pkg/storage/endpoint/gc_safe_point.go | 1 + server/api/service_gc_safepoint.go | 1 + tests/integrations/client/http_client_test.go | 82 +++++++++++ .../pdctl/command/gc_safepoint_command.go | 34 +---- .../pd-ctl/tests/safepoint/safepoint_test.go | 138 ++++++++++++++++++ 9 files changed, 284 insertions(+), 26 deletions(-) create mode 100644 tools/pd-ctl/tests/safepoint/safepoint_test.go diff --git a/client/http/api.go b/client/http/api.go index 3376a48770d..d1bce99f4f9 100644 --- a/client/http/api.go +++ b/client/http/api.go @@ -79,6 +79,7 @@ const ( Status = "/pd/api/v1/status" Version = "/pd/api/v1/version" operators = "/pd/api/v1/operators" + safepoint = "/pd/api/v1/gc/safepoint" // Micro Service microServicePrefix = "/pd/api/v2/ms" // Keyspace @@ -215,3 +216,8 @@ func GetUpdateKeyspaceConfigURL(keyspaceName string) string { func GetKeyspaceMetaByNameURL(keyspaceName string) string { return fmt.Sprintf(GetKeyspaceMetaByName, keyspaceName) } + +// GetDeleteSafePointURI returns the URI for delete safepoint service +func GetDeleteSafePointURI(serviceID string) string { + return fmt.Sprintf("%s/%s", safepoint, serviceID) +} diff --git a/client/http/interface.go b/client/http/interface.go index cd9fc22702e..f5cd1a38211 100644 --- a/client/http/interface.go +++ b/client/http/interface.go @@ -100,6 +100,8 @@ type Client interface { /* Other interfaces */ GetMinResolvedTSByStoresIDs(context.Context, []uint64) (uint64, map[uint64]uint64, error) GetPDVersion(context.Context) (string, error) + GetGCSafePoint(context.Context) (ListServiceGCSafepoint, error) + DeleteGCSafePoint(context.Context, string) (string, error) /* Micro Service interfaces */ GetMicroServiceMembers(context.Context, string) ([]MicroServiceMember, error) GetMicroServicePrimary(context.Context, string) (string, error) @@ -1024,3 +1026,31 @@ func (c *client) GetKeyspaceMetaByName(ctx context.Context, keyspaceName string) } return &keyspaceMetaPB, nil } + +// GetGCSafePoint gets the GC safe point list. +func (c *client) GetGCSafePoint(ctx context.Context) (ListServiceGCSafepoint, error) { + var gcSafePoint ListServiceGCSafepoint + err := c.request(ctx, newRequestInfo(). + WithName(GetGCSafePointName). + WithURI(safepoint). + WithMethod(http.MethodGet). + WithResp(&gcSafePoint)) + if err != nil { + return gcSafePoint, err + } + return gcSafePoint, nil +} + +// DeleteGCSafePoint deletes a GC safe point with the given service ID. +func (c *client) DeleteGCSafePoint(ctx context.Context, serviceID string) (string, error) { + var msg string + err := c.request(ctx, newRequestInfo(). + WithName(DeleteGCSafePointName). + WithURI(GetDeleteSafePointURI(serviceID)). + WithMethod(http.MethodDelete). + WithResp(&msg)) + if err != nil { + return msg, err + } + return msg, nil +} diff --git a/client/http/request_info.go b/client/http/request_info.go index 783220bcc60..94f71c6186e 100644 --- a/client/http/request_info.go +++ b/client/http/request_info.go @@ -85,6 +85,8 @@ const ( deleteOperators = "DeleteOperators" UpdateKeyspaceGCManagementTypeName = "UpdateKeyspaceGCManagementType" GetKeyspaceMetaByNameName = "GetKeyspaceMetaByName" + GetGCSafePointName = "GetGCSafePoint" + DeleteGCSafePointName = "DeleteGCSafePoint" ) type requestInfo struct { diff --git a/client/http/types.go b/client/http/types.go index 55f9b65caad..4bc60978a0e 100644 --- a/client/http/types.go +++ b/client/http/types.go @@ -25,6 +25,22 @@ import ( pd "github.com/tikv/pd/client" ) +// ServiceSafePoint is the safepoint for a specific service +// NOTE: This type is in sync with pd/pkg/storage/endpoint/gc_safe_point.go +type ServiceSafePoint struct { + ServiceID string `json:"service_id"` + ExpiredAt int64 `json:"expired_at"` + SafePoint uint64 `json:"safe_point"` +} + +// ListServiceGCSafepoint is the response for list service GC safepoint. +// NOTE: This type is in sync with pd/server/api/service_gc_safepoint.go +type ListServiceGCSafepoint struct { + ServiceGCSafepoints []*ServiceSafePoint `json:"service_gc_safe_points"` + MinServiceGcSafepoint uint64 `json:"min_service_gc_safe_point,omitempty"` + GCSafePoint uint64 `json:"gc_safe_point"` +} + // ClusterState saves some cluster state information. // NOTE: This type sync with https://github.com/tikv/pd/blob/5eae459c01a797cbd0c416054c6f0cad16b8740a/server/cluster/cluster.go#L173 type ClusterState struct { diff --git a/pkg/storage/endpoint/gc_safe_point.go b/pkg/storage/endpoint/gc_safe_point.go index 8d59d827fa4..7b0b0bf86a7 100644 --- a/pkg/storage/endpoint/gc_safe_point.go +++ b/pkg/storage/endpoint/gc_safe_point.go @@ -28,6 +28,7 @@ import ( // ServiceSafePoint is the safepoint for a specific service // NOTE: This type is exported by HTTP API. Please pay more attention when modifying it. +// This type is in sync with `client/http/types.go`. type ServiceSafePoint struct { ServiceID string `json:"service_id"` ExpiredAt int64 `json:"expired_at"` diff --git a/server/api/service_gc_safepoint.go b/server/api/service_gc_safepoint.go index d6bb153eb6f..ca29f9c352f 100644 --- a/server/api/service_gc_safepoint.go +++ b/server/api/service_gc_safepoint.go @@ -38,6 +38,7 @@ func newServiceGCSafepointHandler(svr *server.Server, rd *render.Render) *servic // ListServiceGCSafepoint is the response for list service GC safepoint. // NOTE: This type is exported by HTTP API. Please pay more attention when modifying it. +// This type is in sync with `pd/client/http/types.go`. type ListServiceGCSafepoint struct { ServiceGCSafepoints []*endpoint.ServiceSafePoint `json:"service_gc_safe_points"` MinServiceGcSafepoint uint64 `json:"min_service_gc_safe_point,omitempty"` diff --git a/tests/integrations/client/http_client_test.go b/tests/integrations/client/http_client_test.go index fd8b65f01ba..fe0962012e6 100644 --- a/tests/integrations/client/http_client_test.go +++ b/tests/integrations/client/http_client_test.go @@ -37,9 +37,11 @@ import ( sc "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/placement" + "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/utils/tsoutil" "github.com/tikv/pd/pkg/versioninfo" + "github.com/tikv/pd/server/api" "github.com/tikv/pd/tests" ) @@ -835,3 +837,83 @@ func (suite *httpClientTestSuite) TestRetryOnLeaderChange() { cancel() wg.Wait() } + +func (suite *httpClientTestSuite) TestGetGCSafePoint() { + re := suite.Require() + client := suite.client + ctx, cancel := context.WithCancel(suite.ctx) + defer cancel() + + // adding some safepoints to the server + list := &api.ListServiceGCSafepoint{ + ServiceGCSafepoints: []*endpoint.ServiceSafePoint{ + { + ServiceID: "AAA", + ExpiredAt: time.Now().Unix() + 10, + SafePoint: 1, + }, + { + ServiceID: "BBB", + ExpiredAt: time.Now().Unix() + 10, + SafePoint: 2, + }, + { + ServiceID: "CCC", + ExpiredAt: time.Now().Unix() + 10, + SafePoint: 3, + }, + }, + GCSafePoint: 1, + MinServiceGcSafepoint: 1, + } + + storage := suite.cluster.GetLeaderServer().GetServer().GetStorage() + for _, ssp := range list.ServiceGCSafepoints { + err := storage.SaveServiceGCSafePoint(ssp) + re.NoError(err) + } + storage.SaveGCSafePoint(1) + + // get the safepoints and start testing + l, err := client.GetGCSafePoint(ctx) + re.NoError(err) + + re.Equal(uint64(1), l.GCSafePoint) + re.Equal(uint64(1), l.MinServiceGcSafepoint) + re.Len(l.ServiceGCSafepoints, 3) + + // sort the gc safepoints based on order of ServiceID + sort.Slice(l.ServiceGCSafepoints, func(i, j int) bool { + return l.ServiceGCSafepoints[i].ServiceID < l.ServiceGCSafepoints[j].ServiceID + }) + + for i, val := range l.ServiceGCSafepoints { + re.Equal(list.ServiceGCSafepoints[i].ServiceID, val.ServiceID) + re.Equal(list.ServiceGCSafepoints[i].SafePoint, val.SafePoint) + } + + // delete the safepoints + for i := 0; i < 3; i++ { + msg, err := client.DeleteGCSafePoint(ctx, list.ServiceGCSafepoints[i].ServiceID) + re.NoError(err) + re.Equal("Delete service GC safepoint successfully.", msg) + } + + // check that the safepoitns are indeed deleted + l, err = client.GetGCSafePoint(ctx) + re.NoError(err) + + re.Equal(uint64(1), l.GCSafePoint) + re.Equal(uint64(0), l.MinServiceGcSafepoint) + re.Empty(l.ServiceGCSafepoints) + + // try delete gc_worker, should get an error + _, err = client.DeleteGCSafePoint(ctx, "gc_worker") + re.Error(err) + + // try delete some non-exist safepoints, should return normally + var msg string + msg, err = client.DeleteGCSafePoint(ctx, "non_exist") + re.NoError(err) + re.Equal("Delete service GC safepoint successfully.", msg) +} diff --git a/tools/pd-ctl/pdctl/command/gc_safepoint_command.go b/tools/pd-ctl/pdctl/command/gc_safepoint_command.go index f4a6b6fcfd0..9a07d92937f 100644 --- a/tools/pd-ctl/pdctl/command/gc_safepoint_command.go +++ b/tools/pd-ctl/pdctl/command/gc_safepoint_command.go @@ -15,24 +15,18 @@ package command import ( - "encoding/json" - "net/http" "sort" "github.com/spf13/cobra" - "github.com/tikv/pd/server/api" -) - -var ( - serviceGCSafepointPrefix = "pd/api/v1/gc/safepoint" ) // NewServiceGCSafepointCommand return a service gc safepoint subcommand of rootCmd func NewServiceGCSafepointCommand() *cobra.Command { l := &cobra.Command{ - Use: "service-gc-safepoint", - Short: "show all service gc safepoint", - Run: showSSPs, + Use: "service-gc-safepoint", + Short: "show all service gc safepoint", + PersistentPreRunE: requirePDClient, + Run: showSSPs, } l.AddCommand(NewDeleteServiceGCSafepointCommand()) return l @@ -50,25 +44,15 @@ func NewDeleteServiceGCSafepointCommand() *cobra.Command { } func showSSPs(cmd *cobra.Command, _ []string) { - r, err := doRequest(cmd, serviceGCSafepointPrefix, http.MethodGet, http.Header{}) + safepoint, err := PDCli.GetGCSafePoint(cmd.Context()) if err != nil { cmd.Printf("Failed to get service GC safepoint: %s\n", err) return } - var safepoint api.ListServiceGCSafepoint - if err := json.Unmarshal([]byte(r), &safepoint); err != nil { - cmd.Printf("Failed to unmarshal service GC safepoint: %s\n", err) - return - } sort.Slice(safepoint.ServiceGCSafepoints, func(i, j int) bool { return safepoint.ServiceGCSafepoints[i].SafePoint < safepoint.ServiceGCSafepoints[j].SafePoint }) - data, err := json.MarshalIndent(safepoint, "", " ") - if err != nil { - cmd.Printf("Failed to marshal service GC safepoint: %s\n", err) - return - } - cmd.Println(string(data)) + jsonPrint(cmd, safepoint) } func deleteSSP(cmd *cobra.Command, args []string) { @@ -76,12 +60,10 @@ func deleteSSP(cmd *cobra.Command, args []string) { cmd.Usage() return } - serviceID := args[0] - deleteURL := serviceGCSafepointPrefix + "/" + serviceID - r, err := doRequest(cmd, deleteURL, http.MethodDelete, http.Header{}) + r, err := PDCli.DeleteGCSafePoint(cmd.Context(), args[0]) if err != nil { cmd.Printf("Failed to delete service GC safepoint: %s\n", err) return } - cmd.Println(r) + jsonPrint(cmd, r) } diff --git a/tools/pd-ctl/tests/safepoint/safepoint_test.go b/tools/pd-ctl/tests/safepoint/safepoint_test.go new file mode 100644 index 00000000000..5551cce1fff --- /dev/null +++ b/tools/pd-ctl/tests/safepoint/safepoint_test.go @@ -0,0 +1,138 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package safepoint_test + +import ( + "context" + "encoding/json" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tikv/pd/pkg/storage/endpoint" + "github.com/tikv/pd/server/api" + pdTests "github.com/tikv/pd/tests" + ctl "github.com/tikv/pd/tools/pd-ctl/pdctl" + "github.com/tikv/pd/tools/pd-ctl/tests" +) + +func TestSafepoint(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, err := pdTests.NewTestCluster(ctx, 3) + re.NoError(err) + defer tc.Destroy() + err = tc.RunInitialServers() + re.NoError(err) + tc.WaitLeader() + leaderServer := tc.GetLeaderServer() + re.NoError(leaderServer.BootstrapCluster()) + pdAddr := tc.GetConfig().GetClientURL() + cmd := ctl.GetRootCmd() + + // add some gc_safepoint to the server + list := &api.ListServiceGCSafepoint{ + ServiceGCSafepoints: []*endpoint.ServiceSafePoint{ + { + ServiceID: "AAA", + ExpiredAt: time.Now().Unix() + 10, + SafePoint: 1, + }, + { + ServiceID: "BBB", + ExpiredAt: time.Now().Unix() + 10, + SafePoint: 2, + }, + { + ServiceID: "CCC", + ExpiredAt: time.Now().Unix() + 10, + SafePoint: 3, + }, + }, + GCSafePoint: 1, + MinServiceGcSafepoint: 1, + } + + storage := leaderServer.GetServer().GetStorage() + for _, ssp := range list.ServiceGCSafepoints { + err := storage.SaveServiceGCSafePoint(ssp) + re.NoError(err) + } + storage.SaveGCSafePoint(1) + + // get the safepoints + args := []string{"-u", pdAddr, "service-gc-safepoint"} + output, err := tests.ExecuteCommand(cmd, args...) + re.NoError(err) + + // create an container to hold the received values + var l api.ListServiceGCSafepoint + re.NoError(json.Unmarshal(output, &l)) + + // test if the points are what we expected + re.Equal(uint64(1), l.GCSafePoint) + re.Equal(uint64(1), l.MinServiceGcSafepoint) + re.Len(l.ServiceGCSafepoints, 3) + + // sort the gc safepoints based on order of ServiceID + sort.Slice(l.ServiceGCSafepoints, func(i, j int) bool { + return l.ServiceGCSafepoints[i].ServiceID < l.ServiceGCSafepoints[j].ServiceID + }) + + for i, val := range l.ServiceGCSafepoints { + re.Equal(list.ServiceGCSafepoints[i].ServiceID, val.ServiceID) + re.Equal(list.ServiceGCSafepoints[i].SafePoint, val.SafePoint) + } + + // delete the safepoints + for i := 0; i < 3; i++ { + args = []string{"-u", pdAddr, "service-gc-safepoint", "delete", list.ServiceGCSafepoints[i].ServiceID} + output, err = tests.ExecuteCommand(cmd, args...) + re.NoError(err) + var msg string + re.NoError(json.Unmarshal(output, &msg)) + re.Equal("Delete service GC safepoint successfully.", msg) + } + + // do a second round of get safepoints to ensure that the safe points are indeed deleted + args = []string{"-u", pdAddr, "service-gc-safepoint"} + output, err = tests.ExecuteCommand(cmd, args...) + re.NoError(err) + + var ll api.ListServiceGCSafepoint + re.NoError(json.Unmarshal(output, &ll)) + + re.Equal(uint64(1), ll.GCSafePoint) + re.Equal(uint64(0), ll.MinServiceGcSafepoint) + re.Empty(ll.ServiceGCSafepoints) + + // try delete the "gc_worker", should get an error message + args = []string{"-u", pdAddr, "service-gc-safepoint", "delete", "gc_worker"} + output, err = tests.ExecuteCommand(cmd, args...) + re.NoError(err) + + // output should be an error message + re.Equal("Failed to delete service GC safepoint: request pd http api failed with status: '500 Internal Server Error', body: '\"cannot remove service safe point of gc_worker\"'\n", string(output)) + + // try delete a non-exist safepoint, should return normally + args = []string{"-u", pdAddr, "service-gc-safepoint", "delete", "non_exist"} + output, err = tests.ExecuteCommand(cmd, args...) + re.NoError(err) + var msg string + re.NoError(json.Unmarshal(output, &msg)) + re.Equal("Delete service GC safepoint successfully.", msg) +} From cbb5a5bfd549d5a80891ce88e40aba3d499f3422 Mon Sep 17 00:00:00 2001 From: Hu# Date: Wed, 21 Aug 2024 15:45:12 +0800 Subject: [PATCH 09/10] ms: change the default name of the microservice to lowercase (#8551) close tikv/pd#8550 ms: change the default name of the microservice to lowercase Signed-off-by: husharp --- pkg/mcs/resourcemanager/server/config.go | 2 +- pkg/mcs/scheduling/server/config/config.go | 2 +- pkg/mcs/tso/server/config.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/mcs/resourcemanager/server/config.go b/pkg/mcs/resourcemanager/server/config.go index 9a899c9dc07..47f07486c1d 100644 --- a/pkg/mcs/resourcemanager/server/config.go +++ b/pkg/mcs/resourcemanager/server/config.go @@ -35,7 +35,7 @@ import ( ) const ( - defaultName = "Resource Manager" + defaultName = "resource manager" defaultBackendEndpoints = "http://127.0.0.1:2379" defaultListenAddr = "http://127.0.0.1:3379" diff --git a/pkg/mcs/scheduling/server/config/config.go b/pkg/mcs/scheduling/server/config/config.go index 4d7cee91f4a..cca2651d99e 100644 --- a/pkg/mcs/scheduling/server/config/config.go +++ b/pkg/mcs/scheduling/server/config/config.go @@ -47,7 +47,7 @@ import ( ) const ( - defaultName = "Scheduling" + defaultName = "scheduling" defaultBackendEndpoints = "http://127.0.0.1:2379" defaultListenAddr = "http://127.0.0.1:3379" ) diff --git a/pkg/mcs/tso/server/config.go b/pkg/mcs/tso/server/config.go index 8a3fe1ca161..209a9deb949 100644 --- a/pkg/mcs/tso/server/config.go +++ b/pkg/mcs/tso/server/config.go @@ -37,7 +37,7 @@ import ( const ( defaultMaxResetTSGap = 24 * time.Hour - defaultName = "TSO" + defaultName = "tso" defaultBackendEndpoints = "http://127.0.0.1:2379" defaultListenAddr = "http://127.0.0.1:3379" From b132ea64d4f5fb5a53fa3fa18e5ed7c796e4d4ae Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Thu, 22 Aug 2024 10:58:44 +0800 Subject: [PATCH 10/10] api: remove duplicate `/debug/pprof/trace` register (#8554) close tikv/pd#8553 Signed-off-by: lhy1024 --- server/util.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/server/util.go b/server/util.go index 83455e2a6fe..b80a07ab28a 100644 --- a/server/util.go +++ b/server/util.go @@ -17,7 +17,6 @@ package server import ( "context" "net/http" - "net/http/pprof" "path/filepath" "strings" @@ -133,10 +132,6 @@ func combineBuilderServerHTTPService(ctx context.Context, svr *Server, serviceBu apiService.UseHandler(router) userHandlers[pdAPIPrefix] = apiService - - // fix issue https://github.com/tikv/pd/issues/7253 - // FIXME: remove me after upgrade - userHandlers["/debug/pprof/trace"] = http.HandlerFunc(pprof.Trace) return userHandlers, nil }