diff --git a/CHANGELOG.md b/CHANGELOG.md index 8401d8e3e0ff..5e26e66058da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ BUG FIXES: * consul/connect: Fixed a bug where in-place upgrade of Nomad client running Connect enabled jobs would panic [[GH-9738](https://github.com/hashicorp/nomad/issues/9738)] * template: Fixed multiple issues in template src/dest and artifact dest interpolation [[GH-9671](https://github.com/hashicorp/nomad/issues/9671)] * template: Fixed a bug where dynamic secrets did not trigger the template `change_mode` after a client restart. [[GH-9636](https://github.com/hashicorp/nomad/issues/9636)] + * scaling: Fixed a bug where job scaling endpoint did not enforce scaling policy min/max [[GH-9761](https://github.com/hashicorp/nomad/issues/9761)] * server: Fixed a bug where new servers may bootstrap prematurely when configured with `bootstrap_expect = 0`. ## 1.0.1 (December 16, 2020) diff --git a/api/util_test.go b/api/util_test.go index 5c0fd524f715..7e5f2e1b5754 100644 --- a/api/util_test.go +++ b/api/util_test.go @@ -53,7 +53,7 @@ func testJobWithScalingPolicy() *Job { job.TaskGroups[0].Scaling = &ScalingPolicy{ Policy: map[string]interface{}{}, Min: int64ToPtr(1), - Max: int64ToPtr(1), + Max: int64ToPtr(5), Enabled: boolToPtr(true), } return job diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index 379c759d4c45..333071ad6afc 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -1039,6 +1039,20 @@ func (j *Job) Scale(args *structs.JobScaleRequest, reply *structs.JobRegisterRes prevCount := found.Count if args.Count != nil { + // if there is a scaling policy, check that the new count is within bounds + if found.Scaling != nil { + if *args.Count < found.Scaling.Min { + return structs.NewErrRPCCoded(400, + fmt.Sprintf("group count was less than scaling policy minimum: %d < %d", + *args.Count, found.Scaling.Min)) + } + if found.Scaling.Max < *args.Count { + return structs.NewErrRPCCoded(400, + fmt.Sprintf("group count was greater than scaling policy maximum: %d > %d", + *args.Count, found.Scaling.Max)) + } + } + // Lookup the latest deployment, to see whether this scaling event should be blocked d, err := snap.LatestDeploymentByJobID(ws, namespace, args.JobID) if err != nil { diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 684557060f63..62bad2c33eb6 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -6705,6 +6705,49 @@ func TestJobEndpoint_Scale_Invalid(t *testing.T) { require.Contains(err.Error(), "should not contain count if error is true") } +func TestJobEndpoint_Scale_OutOfBounds(t *testing.T) { + t.Parallel() + require := require.New(t) + + s1, cleanupS1 := TestServer(t, nil) + defer cleanupS1() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + state := s1.fsm.State() + + job, pol := mock.JobWithScalingPolicy() + pol.Min = 3 + pol.Max = 10 + job.TaskGroups[0].Count = 5 + + // register the job + err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) + require.Nil(err) + + var resp structs.JobRegisterResponse + scale := &structs.JobScaleRequest{ + JobID: job.ID, + Target: map[string]string{ + structs.ScalingTargetGroup: job.TaskGroups[0].Name, + }, + Count: helper.Int64ToPtr(pol.Max + 1), + Message: "out of bounds", + PolicyOverride: false, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: job.Namespace, + }, + } + err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp) + require.Error(err) + require.Contains(err.Error(), "group count was greater than scaling policy maximum: 11 > 10") + + scale.Count = helper.Int64ToPtr(2) + err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp) + require.Error(err) + require.Contains(err.Error(), "group count was less than scaling policy minimum: 2 < 3") +} + func TestJobEndpoint_Scale_NoEval(t *testing.T) { t.Parallel() require := require.New(t)