Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 36 additions & 11 deletions pkg/schedule/schedulers/balance_range.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,27 +90,52 @@ func (handler *balanceRangeSchedulerHandler) addJob(w http.ResponseWriter, r *ht
Status: pending,
Timeout: defaultJobTimeout,
}
job.Engine = input["engine"].(string)
engine, ok := input["engine"].(string)
if !ok || len(engine) == 0 {
handler.rd.JSON(w, http.StatusBadRequest, "engine is required and must be a string")
return
}
job.Engine = engine
if job.Engine != core.EngineTiFlash && job.Engine != core.EngineTiKV {
handler.rd.JSON(w, http.StatusBadRequest, fmt.Sprintf("engine:%s must be tikv or tiflash", input["engine"].(string)))
handler.rd.JSON(w, http.StatusBadRequest, fmt.Sprintf("engine:%s must be tikv or tiflash", job.Engine))
return
}
job.Rule = core.NewRule(input["rule"].(string))
ruleStr, ok := input["rule"].(string)
if !ok || len(ruleStr) == 0 {
handler.rd.JSON(w, http.StatusBadRequest, "rule is required and must be a string")
return
}
job.Rule = core.NewRule(ruleStr)
if job.Rule != core.LeaderScatter && job.Rule != core.PeerScatter && job.Rule != core.LearnerScatter {
handler.rd.JSON(w, http.StatusBadRequest, fmt.Sprintf("rule:%s must be leader-scatter, learner-scatter or peer-scatter",
input["engine"].(string)))
ruleStr))
return
}

job.Alias = input["alias"].(string)
timeoutStr, ok := input["timeout"].(string)
if ok && len(timeoutStr) > 0 {
timeout, err := time.ParseDuration(timeoutStr)
if err != nil {
handler.rd.JSON(w, http.StatusBadRequest, fmt.Sprintf("timeout:%s is invalid", input["timeout"].(string)))
alias, ok := input["alias"].(string)
if !ok || len(alias) == 0 {
handler.rd.JSON(w, http.StatusBadRequest, "alias is required and must be a string")
return
}
job.Alias = alias
if timeoutVal, exists := input["timeout"]; exists {
timeoutStr, ok := timeoutVal.(string)
if !ok {
handler.rd.JSON(w, http.StatusBadRequest, "timeout must be a string")
return
}
job.Timeout = timeout
if len(timeoutStr) > 0 {
timeout, err := time.ParseDuration(timeoutStr)
if err != nil {
handler.rd.JSON(w, http.StatusBadRequest, fmt.Sprintf("timeout:%s is invalid", timeoutStr))
return
}
if timeout <= 0 {
handler.rd.JSON(w, http.StatusBadRequest, "timeout must be positive")
return
}
job.Timeout = timeout
}
}
Comment on lines +121 to 139
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Consider validating that timeout is positive.

time.ParseDuration accepts negative durations (e.g., "-5m"). A negative timeout would cause shouldFinished() to return true immediately, finishing the job without performing any work. Add a check to reject non-positive timeouts.

🛡️ Suggested fix
 	if len(timeoutStr) > 0 {
 		timeout, err := time.ParseDuration(timeoutStr)
 		if err != nil {
 			handler.rd.JSON(w, http.StatusBadRequest, fmt.Sprintf("timeout:%s is invalid", timeoutStr))
 			return
 		}
+		if timeout <= 0 {
+			handler.rd.JSON(w, http.StatusBadRequest, "timeout must be positive")
+			return
+		}
 		job.Timeout = timeout
 	}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@pkg/schedule/schedulers/balance_range.go` around lines 121 - 135, The parsed
timeout can be negative which causes shouldFinished() to immediately return
true; after parsing timeoutStr with time.ParseDuration in the block surrounding
timeoutStr and err, add a validation that the resulting timeout is strictly
positive (e.g., timeout > 0) and if not, return a 400 via handler.rd.JSON with a
clear message (e.g., "timeout must be positive") and do not assign job.Timeout;
keep existing handling of empty string and parse errors intact and reference
timeoutStr, time.ParseDuration, job.Timeout and shouldFinished() when locating
the change.


keys, err := keyutil.DecodeHTTPKeyRanges(input)
Expand Down
33 changes: 33 additions & 0 deletions pkg/schedule/schedulers/balance_range_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,17 @@
package schedulers

import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"time"

"github.com/stretchr/testify/require"
"github.com/unrolled/render"

"github.com/pingcap/failpoint"

Expand Down Expand Up @@ -516,3 +521,31 @@ func TestPersistFail(t *testing.T) {
re.ErrorContains(conf.gcLocked(), errMsg)
re.Len(conf.jobs, 1)
}

func TestAddBalanceRangeJobWithInvalidFieldType(t *testing.T) {
re := require.New(t)
conf := &balanceRangeSchedulerConfig{
schedulerConfig: &baseSchedulerConfig{},
jobs: make([]*balanceRangeSchedulerJob, 0),
}
conf.init("test", storage.NewStorageWithMemoryBackend(), conf)
handler := &balanceRangeSchedulerHandler{
config: conf,
rd: render.New(render.Options{IndentJSON: true}),
}
body, err := json.Marshal(map[string]any{
"alias": "a",
"engine": 1,
"rule": "leader-scatter",
"start-key": "100",
"end-key": "200",
})
re.NoError(err)
req := httptest.NewRequest(http.MethodPut, "/job", bytes.NewReader(body))
resp := httptest.NewRecorder()
re.NotPanics(func() {
handler.addJob(resp, req)
})
re.Equal(http.StatusBadRequest, resp.Code)
re.Empty(conf.jobs)
}
33 changes: 27 additions & 6 deletions pkg/schedule/schedulers/evict_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,10 +173,11 @@ func (conf *evictLeaderSchedulerConfig) resumeLeaderTransfer(cluster sche.Schedu
func (conf *evictLeaderSchedulerConfig) pauseLeaderTransferIfStoreNotExist(id uint64) (bool, error) {
conf.RLock()
defer conf.RUnlock()
if _, exist := conf.StoreIDWithRanges[id]; !exist {
if err := conf.cluster.PauseLeaderTransfer(id, constant.In); err != nil {
return exist, err
}
if _, exist := conf.StoreIDWithRanges[id]; exist {
return true, nil
}
if err := conf.cluster.PauseLeaderTransfer(id, constant.In); err != nil {
return false, err
}
return true, nil
}
Expand Down Expand Up @@ -427,8 +428,28 @@ func (handler *evictLeaderHandler) updateConfig(w http.ResponseWriter, r *http.R
batch = (int)(batchFloat)
}

ranges, ok := (input["ranges"]).([]string)
if ok {
var ranges []string
rangesVal, hasRanges := input["ranges"]
if hasRanges {
switch val := rangesVal.(type) {
case []string:
ranges = val
case []any:
ranges = make([]string, 0, len(val))
for _, item := range val {
s, ok := item.(string)
if !ok {
handler.config.resumeLeaderTransferIfExist(id)
handler.rd.JSON(w, http.StatusBadRequest, errs.ErrSchedulerConfig.FastGenByArgs("ranges"))
return
}
ranges = append(ranges, s)
}
default:
handler.config.resumeLeaderTransferIfExist(id)
handler.rd.JSON(w, http.StatusBadRequest, errs.ErrSchedulerConfig.FastGenByArgs("ranges"))
return
}
if !inputHasStoreID {
handler.config.resumeLeaderTransferIfExist(id)
handler.rd.JSON(w, http.StatusBadRequest, errs.ErrSchedulerConfig.FastGenByArgs("id"))
Expand Down
59 changes: 59 additions & 0 deletions pkg/schedule/schedulers/evict_leader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,12 @@ package schedulers
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"

"github.com/stretchr/testify/require"
"github.com/unrolled/render"

"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
Expand Down Expand Up @@ -251,3 +254,59 @@ func TestEvictLeaderDeleteWithSaveFailure(t *testing.T) {
re.Equal(keyRanges, conf.StoreIDWithRanges[1], "key ranges should be restored")
re.Empty(resp)
}

func TestEvictLeaderUpdateConfigWithStringArrayRanges(t *testing.T) {
re := require.New(t)
cancel, _, tc, _ := prepareSchedulersTest()
defer cancel()

tc.AddLeaderStore(1, 0)
conf := &evictLeaderSchedulerConfig{
schedulerConfig: &baseSchedulerConfig{},
StoreIDWithRanges: make(map[uint64][]keyutil.KeyRange),
Batch: EvictLeaderBatchSize,
cluster: tc.GetBasicCluster(),
}
conf.init("evict-leader-test", storage.NewStorageWithMemoryBackend(), conf)
handler := &evictLeaderHandler{config: conf, rd: render.New(render.Options{IndentJSON: true})}
body, err := json.Marshal(map[string]any{
"store_id": 1,
"ranges": []string{"100", "200"},
})
re.NoError(err)
req := httptest.NewRequest(http.MethodPost, "/config", bytes.NewReader(body))
resp := httptest.NewRecorder()
re.NotPanics(func() {
handler.updateConfig(resp, req)
})
re.Equal(http.StatusOK, resp.Code)
re.Len(conf.StoreIDWithRanges[1], 1)
re.Equal(keyutil.NewKeyRange("100", "200"), conf.StoreIDWithRanges[1][0])
}

func TestEvictLeaderUpdateConfigWithInvalidRangesType(t *testing.T) {
re := require.New(t)
cancel, _, tc, _ := prepareSchedulersTest()
defer cancel()

tc.AddLeaderStore(1, 0)
conf := &evictLeaderSchedulerConfig{
schedulerConfig: &baseSchedulerConfig{},
StoreIDWithRanges: make(map[uint64][]keyutil.KeyRange),
Batch: EvictLeaderBatchSize,
cluster: tc.GetBasicCluster(),
}
conf.init("evict-leader-test", storage.NewStorageWithMemoryBackend(), conf)
handler := &evictLeaderHandler{config: conf, rd: render.New(render.Options{IndentJSON: true})}
body, err := json.Marshal(map[string]any{
"store_id": 1,
"ranges": []any{"100", 200},
})
re.NoError(err)
req := httptest.NewRequest(http.MethodPost, "/config", bytes.NewReader(body))
resp := httptest.NewRecorder()
re.NotPanics(func() {
handler.updateConfig(resp, req)
})
re.Equal(http.StatusBadRequest, resp.Code)
}
7 changes: 6 additions & 1 deletion pkg/schedule/schedulers/grant_hot_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,12 @@ func (handler *grantHotRegionHandler) updateConfig(w http.ResponseWriter, r *htt
}
storeIDs = append(storeIDs, id)
}
leaderID, err := strconv.ParseUint(input["store-leader-id"].(string), 10, 64)
leaderStr, ok := input["store-leader-id"].(string)
if !ok {
handler.rd.JSON(w, http.StatusBadRequest, errs.ErrSchedulerConfig)
return
}
leaderID, err := strconv.ParseUint(leaderStr, 10, 64)
if err != nil {
handler.rd.JSON(w, http.StatusBadRequest, errs.ErrBytesToUint64)
return
Expand Down
45 changes: 45 additions & 0 deletions pkg/schedule/schedulers/grant_hot_region_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
// Copyright 2026 TiKV Project Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package schedulers

import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"

"github.com/stretchr/testify/require"
"github.com/unrolled/render"
)

func TestGrantHotRegionUpdateConfigWithInvalidLeaderIDType(t *testing.T) {
re := require.New(t)
handler := &grantHotRegionHandler{
config: &grantHotRegionSchedulerConfig{},
rd: render.New(render.Options{IndentJSON: true}),
}
body, err := json.Marshal(map[string]any{
"store-id": "1,2",
"store-leader-id": 1,
})
re.NoError(err)
req := httptest.NewRequest(http.MethodPost, "/config", bytes.NewReader(body))
resp := httptest.NewRecorder()
re.NotPanics(func() {
handler.updateConfig(resp, req)
})
re.Equal(http.StatusBadRequest, resp.Code)
}
7 changes: 6 additions & 1 deletion pkg/schedule/schedulers/scheduler_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -429,8 +429,13 @@ func (c *Controller) CheckTransferWitnessLeader(region *core.RegionInfo) {
s, ok := c.schedulers[types.TransferWitnessLeaderScheduler.String()]
c.RUnlock()
if ok {
regionC := RecvRegionInfo(s.Scheduler)
if regionC == nil {
log.Warn("invalid scheduler type for transfer witness leader", zap.String("scheduler", s.GetName()))
return
}
select {
case RecvRegionInfo(s.Scheduler) <- region:
case regionC <- region:
default:
log.Warn("drop transfer witness leader due to recv region channel full", zap.Uint64("region-id", region.GetID()))
}
Expand Down
5 changes: 4 additions & 1 deletion pkg/schedule/schedulers/transfer_witness_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,5 +114,8 @@ func scheduleTransferWitnessLeader(name string, cluster sche.SchedulerCluster, r

// RecvRegionInfo receives a checked region from coordinator
func RecvRegionInfo(s Scheduler) chan<- *core.RegionInfo {
return s.(*transferWitnessLeaderScheduler).regions
if scheduler, ok := s.(*transferWitnessLeaderScheduler); ok {
return scheduler.regions
}
return nil
}
8 changes: 8 additions & 0 deletions pkg/schedule/schedulers/transfer_witness_leader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,3 +89,11 @@ func TestTransferWitnessLeaderWithUnhealthyPeer(t *testing.T) {
ops, _ = sl.Schedule(tc, false)
re.Empty(ops)
}

func TestRecvRegionInfoWithWrongSchedulerType(t *testing.T) {
re := require.New(t)
var s Scheduler = &balanceRangeScheduler{}
re.NotPanics(func() {
re.Nil(RecvRegionInfo(s))
})
}
26 changes: 19 additions & 7 deletions server/api/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,15 +125,19 @@ func (h *schedulerHandler) CreateScheduler(w http.ResponseWriter, r *http.Reques
handler.ServeHTTP(w, r)
return
}
h.r.JSON(w, http.StatusNotAcceptable, err.Error())
if err != nil {
h.r.JSON(w, http.StatusNotAcceptable, err.Error())
return
}
h.r.JSON(w, http.StatusNotAcceptable, "scheduler config handler is unavailable")
return
}
if err := apiutil.CollectStringOption("rule", input, collector); err != nil {
h.r.JSON(w, http.StatusInternalServerError, err.Error())
h.r.JSON(w, http.StatusBadRequest, err.Error())
return
}
if err := apiutil.CollectStringOption("engine", input, collector); err != nil {
h.r.JSON(w, http.StatusInternalServerError, err.Error())
h.r.JSON(w, http.StatusBadRequest, err.Error())
return
}

Expand All @@ -142,13 +146,13 @@ func (h *schedulerHandler) CreateScheduler(w http.ResponseWriter, r *http.Reques
if errors.ErrorEqual(err, errs.ErrOptionNotExist) {
collector(defaultTimeout)
} else {
h.r.JSON(w, http.StatusInternalServerError, err.Error())
h.r.JSON(w, http.StatusBadRequest, err.Error())
return
}
}

if err := apiutil.CollectStringOption("alias", input, collector); err != nil {
h.r.JSON(w, http.StatusInternalServerError, err.Error())
h.r.JSON(w, http.StatusBadRequest, err.Error())
return
}

Expand Down Expand Up @@ -298,7 +302,11 @@ func (h *schedulerHandler) redirectSchedulerDelete(w http.ResponseWriter, name,
}
resp, err := apiutil.DoDelete(h.svr.GetHTTPClient(), deleteURL)
if err != nil {
h.r.JSON(w, resp.StatusCode, err.Error())
status := http.StatusInternalServerError
if resp != nil {
status = resp.StatusCode
}
h.r.JSON(w, status, err.Error())
return
}
defer resp.Body.Close()
Expand Down Expand Up @@ -373,5 +381,9 @@ func (h *schedulerConfigHandler) handleSchedulerConfig(w http.ResponseWriter, r
sh.ServeHTTP(w, r)
return
}
h.rd.JSON(w, http.StatusNotAcceptable, err.Error())
if err != nil {
h.rd.JSON(w, http.StatusNotAcceptable, err.Error())
return
}
h.rd.JSON(w, http.StatusNotAcceptable, "scheduler config handler is unavailable")
}
Loading