Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

in Job.Scale, ensure that new count is within [min,max] configured in scaling policy #9761

Merged
merged 4 commits into from
Jan 8, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ BUG FIXES:
* consul/connect: Fixed a bug where in-place upgrade of Nomad client running Connect enabled jobs would panic [[GH-9738](https://github.com/hashicorp/nomad/issues/9738)]
* template: Fixed multiple issues in template src/dest and artifact dest interpolation [[GH-9671](https://github.com/hashicorp/nomad/issues/9671)]
* template: Fixed a bug where dynamic secrets did not trigger the template `change_mode` after a client restart. [[GH-9636](https://github.com/hashicorp/nomad/issues/9636)]
* scaling: Fixed a bug where job scaling endpoint did not enforce scaling policy min/max [[GH-9761](https://github.com/hashicorp/nomad/issues/9761)]
* server: Fixed a bug where new servers may bootstrap prematurely when configured with `bootstrap_expect = 0`.

## 1.0.1 (December 16, 2020)
Expand Down
2 changes: 1 addition & 1 deletion api/util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ func testJobWithScalingPolicy() *Job {
job.TaskGroups[0].Scaling = &ScalingPolicy{
Policy: map[string]interface{}{},
Min: int64ToPtr(1),
Max: int64ToPtr(1),
Max: int64ToPtr(5),
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixing the bug meant that a test which had previously (incorrectly) passed started to fail. this makes room for that test to properly succeed.

Enabled: boolToPtr(true),
}
return job
Expand Down
14 changes: 14 additions & 0 deletions nomad/job_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -1039,6 +1039,20 @@ func (j *Job) Scale(args *structs.JobScaleRequest, reply *structs.JobRegisterRes
prevCount := found.Count
if args.Count != nil {

// if there is a scaling policy, check that the new count is within bounds
if found.Scaling != nil {
if *args.Count < found.Scaling.Min {
return structs.NewErrRPCCoded(400,
fmt.Sprintf("group count was less than scaling policy minimum: %d < %d",
*args.Count, found.Scaling.Min))
}
if found.Scaling.Max < *args.Count {
return structs.NewErrRPCCoded(400,
fmt.Sprintf("group count was greater than scaling policy maximum: %d > %d",
*args.Count, found.Scaling.Max))
}
}

// Lookup the latest deployment, to see whether this scaling event should be blocked
d, err := snap.LatestDeploymentByJobID(ws, namespace, args.JobID)
if err != nil {
Expand Down
43 changes: 43 additions & 0 deletions nomad/job_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6705,6 +6705,49 @@ func TestJobEndpoint_Scale_Invalid(t *testing.T) {
require.Contains(err.Error(), "should not contain count if error is true")
}

func TestJobEndpoint_Scale_OutOfBounds(t *testing.T) {
t.Parallel()
require := require.New(t)

s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()

job, pol := mock.JobWithScalingPolicy()
pol.Min = 3
pol.Max = 10
job.TaskGroups[0].Count = 5

// register the job
err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job)
require.Nil(err)

var resp structs.JobRegisterResponse
scale := &structs.JobScaleRequest{
JobID: job.ID,
Target: map[string]string{
structs.ScalingTargetGroup: job.TaskGroups[0].Name,
},
Count: helper.Int64ToPtr(pol.Max + 1),
Message: "out of bounds",
PolicyOverride: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.Error(err)
require.Contains(err.Error(), "group count was greater than scaling policy maximum: 11 > 10")

scale.Count = helper.Int64ToPtr(2)
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.Error(err)
require.Contains(err.Error(), "group count was less than scaling policy minimum: 2 < 3")
}

func TestJobEndpoint_Scale_NoEval(t *testing.T) {
t.Parallel()
require := require.New(t)
Expand Down