Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove deprecated code #5894

Merged
merged 9 commits into from
Jul 2, 2019
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 2 additions & 26 deletions command/agent/node_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,7 @@ package agent

import (
"net/http"
"strconv"
"strings"
"time"

"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
Expand Down Expand Up @@ -108,30 +106,8 @@ func (s *HTTPServer) nodeToggleDrain(resp http.ResponseWriter, req *http.Request

var drainRequest api.NodeUpdateDrainRequest

// COMPAT: Remove in 0.9. Allow the old style enable query param.
// Get the enable parameter
enableRaw := req.URL.Query().Get("enable")
var enable bool
if enableRaw != "" {
var err error
enable, err = strconv.ParseBool(enableRaw)
if err != nil {
return nil, CodedError(400, "invalid enable value")
}

// Use the force drain to have it keep the same behavior as old clients.
if enable {
drainRequest.DrainSpec = &api.DrainSpec{
Deadline: -1 * time.Second,
}
} else {
// If drain is disabled on an old client, mark the node as eligible for backwards compatibility
drainRequest.MarkEligible = true
}
} else {
if err := decodeBody(req, &drainRequest); err != nil {
return nil, CodedError(400, err.Error())
}
if err := decodeBody(req, &drainRequest); err != nil {
return nil, CodedError(400, err.Error())
}

args := structs.NodeUpdateDrainRequest{
Expand Down
57 changes: 0 additions & 57 deletions command/agent/node_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,6 @@ func TestHTTP_NodeDrain(t *testing.T) {
state := s.Agent.server.State()
out, err := state.NodeByID(nil, node.ID)
require.Nil(err)
require.True(out.Drain)
require.NotNil(out.DrainStrategy)
require.Equal(10*time.Second, out.DrainStrategy.Deadline)

Expand All @@ -297,66 +296,10 @@ func TestHTTP_NodeDrain(t *testing.T) {

out, err = state.NodeByID(nil, node.ID)
require.Nil(err)
require.False(out.Drain)
require.Nil(out.DrainStrategy)
})
}

// Tests backwards compatibility code to support pre 0.8 clients
func TestHTTP_NodeDrain_Compat(t *testing.T) {
t.Parallel()
require := require.New(t)
httpTest(t, nil, func(s *TestAgent) {
// Create the node
node := mock.Node()
args := structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
var resp structs.NodeUpdateResponse
require.Nil(s.Agent.RPC("Node.Register", &args, &resp))

// Make the HTTP request
req, err := http.NewRequest("POST", "/v1/node/"+node.ID+"/drain?enable=true", nil)
require.Nil(err)
respW := httptest.NewRecorder()

// Make the request
obj, err := s.Server.NodeSpecificRequest(respW, req)
require.Nil(err)

// Check for the index
require.NotZero(respW.HeaderMap.Get("X-Nomad-Index"))

// Check the response
_, ok := obj.(structs.NodeDrainUpdateResponse)
require.True(ok)

// Check that the node has been updated
state := s.Agent.server.State()
out, err := state.NodeByID(nil, node.ID)
require.Nil(err)
require.True(out.Drain)
require.NotNil(out.DrainStrategy)
require.Equal(-1*time.Second, out.DrainStrategy.Deadline)

// Make the HTTP request to unset drain
req, err = http.NewRequest("POST", "/v1/node/"+node.ID+"/drain?enable=false", nil)
require.Nil(err)
respW = httptest.NewRecorder()

// Make the request
_, err = s.Server.NodeSpecificRequest(respW, req)
require.Nil(err)

out, err = state.NodeByID(nil, node.ID)
require.Nil(err)
require.False(out.Drain)
require.Nil(out.DrainStrategy)
require.Equal(structs.NodeSchedulingEligible, out.SchedulingEligibility)
})
}

func TestHTTP_NodeEligible(t *testing.T) {
t.Parallel()
require := require.New(t)
Expand Down
2 changes: 1 addition & 1 deletion command/alloc_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ func formatAllocBasicInfo(alloc *api.Allocation, client *api.Client, uuidLength
fmt.Sprintf("Node ID|%s", limit(alloc.NodeID, uuidLength)),
fmt.Sprintf("Node Name|%s", alloc.NodeName),
fmt.Sprintf("Job ID|%s", alloc.JobID),
fmt.Sprintf("Job Version|%d", getVersion(alloc.Job)),
fmt.Sprintf("Job Version|%d", alloc.Job.Version),
fmt.Sprintf("Client Status|%s", alloc.ClientStatus),
fmt.Sprintf("Client Description|%s", alloc.ClientDescription),
fmt.Sprintf("Desired Status|%s", alloc.DesiredStatus),
Expand Down
26 changes: 0 additions & 26 deletions command/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -438,32 +438,6 @@ func (j *JobGetter) ApiJob(jpath string) (*api.Job, error) {
return jobStruct, nil
}

// COMPAT: Remove in 0.7.0
// Nomad 0.6.0 introduces the submit time field so CLI's interacting with
// older versions of Nomad would SEGFAULT as reported here:
// https://github.com/hashicorp/nomad/issues/2918
// getSubmitTime returns a submit time of the job converting to time.Time
func getSubmitTime(job *api.Job) time.Time {
if job.SubmitTime != nil {
return time.Unix(0, *job.SubmitTime)
}

return time.Time{}
}

// COMPAT: Remove in 0.7.0
// Nomad 0.6.0 introduces job Versions so CLI's interacting with
// older versions of Nomad would SEGFAULT as reported here:
// https://github.com/hashicorp/nomad/issues/2918
// getVersion returns a version of the job in safely.
func getVersion(job *api.Job) uint64 {
if job.Version != nil {
return *job.Version
}

return 0
}

// mergeAutocompleteFlags is used to join multiple flag completion sets.
func mergeAutocompleteFlags(flags ...complete.Flags) complete.Flags {
merged := make(map[string]complete.Predictor, len(flags))
Expand Down
6 changes: 3 additions & 3 deletions command/job_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ func (c *JobStatusCommand) Run(args []string) int {
basic := []string{
fmt.Sprintf("ID|%s", *job.ID),
fmt.Sprintf("Name|%s", *job.Name),
fmt.Sprintf("Submit Date|%s", formatTime(getSubmitTime(job))),
fmt.Sprintf("Submit Date|%s", formatTime(time.Unix(0, *job.SubmitTime))),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I like the utility function here, as it's not obvious that job.Submit is in nano seconds.

fmt.Sprintf("Type|%s", *job.Type),
fmt.Sprintf("Priority|%d", *job.Priority),
fmt.Sprintf("Datacenters|%s", strings.Join(job.Datacenters, ",")),
Expand Down Expand Up @@ -462,7 +462,7 @@ func formatAllocList(allocations []*api.Allocation, verbose bool, uuidLength int
limit(alloc.EvalID, uuidLength),
limit(alloc.NodeID, uuidLength),
alloc.TaskGroup,
getVersion(alloc.Job),
alloc.Job.Version,
alloc.DesiredStatus,
alloc.ClientStatus,
formatUnixNanoTime(alloc.CreateTime),
Expand All @@ -478,7 +478,7 @@ func formatAllocList(allocations []*api.Allocation, verbose bool, uuidLength int
limit(alloc.ID, uuidLength),
limit(alloc.NodeID, uuidLength),
alloc.TaskGroup,
getVersion(alloc.Job),
alloc.Job.Version,
alloc.DesiredStatus,
alloc.ClientStatus,
createTimePretty,
Expand Down
54 changes: 0 additions & 54 deletions nomad/fsm.go
Original file line number Diff line number Diff line change
Expand Up @@ -1135,11 +1135,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
return err
}

// COMPAT: Handle upgrade to v0.7.0
if eval.Namespace == "" {
eval.Namespace = structs.DefaultNamespace
}

if err := restore.EvalRestore(eval); err != nil {
return err
}
Expand All @@ -1150,11 +1145,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
return err
}

// COMPAT: Handle upgrade to v0.7.0
if alloc.Namespace == "" {
alloc.Namespace = structs.DefaultNamespace
}

if err := restore.AllocRestore(alloc); err != nil {
return err
}
Expand All @@ -1174,11 +1164,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
return err
}

// COMPAT: Handle upgrade to v0.7.0
if launch.Namespace == "" {
launch.Namespace = structs.DefaultNamespace
}

if err := restore.PeriodicLaunchRestore(launch); err != nil {
return err
}
Expand All @@ -1189,11 +1174,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
return err
}

// COMPAT: Handle upgrade to v0.7.0
if summary.Namespace == "" {
summary.Namespace = structs.DefaultNamespace
}

if err := restore.JobSummaryRestore(summary); err != nil {
return err
}
Expand All @@ -1213,11 +1193,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
return err
}

// COMPAT: Handle upgrade to v0.7.0
if version.Namespace == "" {
version.Namespace = structs.DefaultNamespace
}

if err := restore.JobVersionRestore(version); err != nil {
return err
}
Expand All @@ -1228,11 +1203,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
return err
}

// COMPAT: Handle upgrade to v0.7.0
if deployment.Namespace == "" {
deployment.Namespace = structs.DefaultNamespace
}

if err := restore.DeploymentRestore(deployment); err != nil {
return err
}
Expand Down Expand Up @@ -1280,30 +1250,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {

restore.Commit()

// Create Job Summaries
// COMPAT 0.4 -> 0.4.1
// We can remove this in 0.5. This exists so that the server creates job
// summaries if they were not present previously. When users upgrade to 0.5
// from 0.4.1, the snapshot will contain job summaries so it will be safe to
// remove this block.
index, err := newState.Index("job_summary")
if err != nil {
return fmt.Errorf("couldn't fetch index of job summary table: %v", err)
}

// If the index is 0 that means there is no job summary in the snapshot so
// we will have to create them
if index == 0 {
// query the latest index
latestIndex, err := newState.LatestIndex()
if err != nil {
return fmt.Errorf("unable to query latest index: %v", index)
}
if err := newState.ReconcileJobSummaries(latestIndex); err != nil {
return fmt.Errorf("error reconciling summaries: %v", err)
}
}

// COMPAT Remove in 0.10
// Clean up active deployments that do not have a job
if err := n.failLeakedDeployments(newState); err != nil {
Expand Down
78 changes: 0 additions & 78 deletions nomad/fsm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,6 @@ func TestFSM_BatchUpdateNodeDrain(t *testing.T) {
ws := memdb.NewWatchSet()
node, err = fsm.State().NodeByID(ws, req.Node.ID)
require.Nil(err)
require.True(node.Drain)
require.Equal(node.DrainStrategy, strategy)
require.Len(node.Events, 2)
}
Expand Down Expand Up @@ -394,46 +393,10 @@ func TestFSM_UpdateNodeDrain(t *testing.T) {
ws := memdb.NewWatchSet()
node, err = fsm.State().NodeByID(ws, req.Node.ID)
require.Nil(err)
require.True(node.Drain)
require.Equal(node.DrainStrategy, strategy)
require.Len(node.Events, 2)
}

func TestFSM_UpdateNodeDrain_Pre08_Compatibility(t *testing.T) {
t.Parallel()
require := require.New(t)
fsm := testFSM(t)

// Force a node into the state store without eligiblity
node := mock.Node()
node.SchedulingEligibility = ""
require.Nil(fsm.State().UpsertNode(1, node))

// Do an old style drain
req := structs.NodeUpdateDrainRequest{
NodeID: node.ID,
Drain: true,
}
buf, err := structs.Encode(structs.NodeUpdateDrainRequestType, req)
require.Nil(err)

resp := fsm.Apply(makeLog(buf))
require.Nil(resp)

// Verify we have upgraded to a force drain
ws := memdb.NewWatchSet()
node, err = fsm.State().NodeByID(ws, req.NodeID)
require.Nil(err)
require.True(node.Drain)

expected := &structs.DrainStrategy{
DrainSpec: structs.DrainSpec{
Deadline: -1 * time.Second,
},
}
require.Equal(expected, node.DrainStrategy)
}

func TestFSM_UpdateNodeEligibility(t *testing.T) {
t.Parallel()
require := require.New(t)
Expand Down Expand Up @@ -2701,47 +2664,6 @@ func TestFSM_SnapshotRestore_SchedulerConfiguration(t *testing.T) {

}

func TestFSM_SnapshotRestore_AddMissingSummary(t *testing.T) {
t.Parallel()
// Add some state
fsm := testFSM(t)
state := fsm.State()

// make an allocation
alloc := mock.Alloc()
state.UpsertJob(1010, alloc.Job)
state.UpsertAllocs(1011, []*structs.Allocation{alloc})

// Delete the summary
state.DeleteJobSummary(1040, alloc.Namespace, alloc.Job.ID)

// Delete the index
if err := state.RemoveIndex("job_summary"); err != nil {
t.Fatalf("err: %v", err)
}

fsm2 := testSnapshotRestore(t, fsm)
state2 := fsm2.State()
latestIndex, _ := state.LatestIndex()

ws := memdb.NewWatchSet()
out, _ := state2.JobSummaryByID(ws, alloc.Namespace, alloc.Job.ID)
expected := structs.JobSummary{
JobID: alloc.Job.ID,
Namespace: alloc.Job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": {
Starting: 1,
},
},
CreateIndex: 1010,
ModifyIndex: latestIndex,
}
if !reflect.DeepEqual(&expected, out) {
t.Fatalf("expected: %#v, actual: %#v", &expected, out)
}
}

func TestFSM_ReconcileSummaries(t *testing.T) {
t.Parallel()
// Add some state
Expand Down
Loading