From f551dcb83e3ac144c9dbb90583b6e82d234662e9 Mon Sep 17 00:00:00 2001 From: Diptanu Choudhury Date: Mon, 6 Feb 2017 11:48:28 -0800 Subject: [PATCH 01/16] Making the job spec return api.Job --- api/allocations_test.go | 15 ++- api/compose_test.go | 32 ++--- api/jobs.go | 166 +++++++++++++++++++---- api/jobs_test.go | 66 ++++++--- api/resources.go | 41 +++++- api/tasks.go | 166 ++++++++++++++++++++--- api/tasks_test.go | 14 +- api/util_test.go | 24 ++-- command/agent/http.go | 2 + command/agent/job_endpoint.go | 243 ++++++++++++++++++++++++++++++++++ command/alloc_status.go | 6 +- command/helpers.go | 3 +- command/logs.go | 2 +- command/node_status.go | 23 ++-- command/plan.go | 29 +--- command/run.go | 63 +++------ command/status.go | 10 +- command/stop.go | 4 +- command/validate.go | 17 ++- helper/funcs.go | 25 +++- jobspec/parse.go | 148 ++++++++++++--------- jobspec/parse_test.go | 11 +- nomad/job_endpoint.go | 19 +++ nomad/structs/structs.go | 17 +++ 24 files changed, 879 insertions(+), 267 deletions(-) diff --git a/api/allocations_test.go b/api/allocations_test.go index 597000472fc2..22e5c1c9b940 100644 --- a/api/allocations_test.go +++ b/api/allocations_test.go @@ -4,6 +4,8 @@ import ( "reflect" "sort" "testing" + + "github.com/hashicorp/nomad/helper" ) func TestAllocations_List(t *testing.T) { @@ -28,9 +30,9 @@ func TestAllocations_List(t *testing.T) { return job := &Job{ - ID: "job1", - Name: "Job #1", - Type: JobTypeService, + ID: helper.StringToPtr("job1"), + Name: helper.StringToPtr("Job #1"), + Type: helper.StringToPtr(JobTypeService), } eval, _, err := c.Jobs().Register(job, nil) if err != nil { @@ -74,10 +76,11 @@ func TestAllocations_PrefixList(t *testing.T) { return job := &Job{ - ID: "job1", - Name: "Job #1", - Type: JobTypeService, + ID: helper.StringToPtr("job1"), + Name: helper.StringToPtr("Job #1"), + Type: helper.StringToPtr(JobTypeService), } + eval, _, err := c.Jobs().Register(job, nil) if err != nil { t.Fatalf("err: %s", err) diff --git a/api/compose_test.go b/api/compose_test.go index 5f3ca68a534e..2933996ef8fa 100644 --- a/api/compose_test.go +++ b/api/compose_test.go @@ -3,6 +3,8 @@ package api import ( "reflect" "testing" + + "github.com/hashicorp/nomad/helper" ) func TestCompose(t *testing.T) { @@ -12,10 +14,10 @@ func TestCompose(t *testing.T) { SetMeta("foo", "bar"). Constrain(NewConstraint("kernel.name", "=", "linux")). Require(&Resources{ - CPU: 1250, - MemoryMB: 1024, - DiskMB: 2048, - IOPS: 500, + CPU: helper.IntToPtr(1250), + MemoryMB: helper.IntToPtr(1024), + DiskMB: helper.IntToPtr(2048), + IOPS: helper.IntToPtr(500), Networks: []*NetworkResource{ &NetworkResource{ CIDR: "0.0.0.0/0", @@ -40,11 +42,11 @@ func TestCompose(t *testing.T) { // Check that the composed result looks correct expect := &Job{ - Region: "region1", - ID: "job1", - Name: "myjob", - Type: JobTypeService, - Priority: 2, + Region: helper.StringToPtr("region1"), + ID: helper.StringToPtr("job1"), + Name: helper.StringToPtr("myjob"), + Type: helper.StringToPtr(JobTypeService), + Priority: helper.IntToPtr(2), Datacenters: []string{ "dc1", }, @@ -60,8 +62,8 @@ func TestCompose(t *testing.T) { }, TaskGroups: []*TaskGroup{ &TaskGroup{ - Name: "grp1", - Count: 2, + Name: helper.StringToPtr("grp1"), + Count: helper.IntToPtr(2), Constraints: []*Constraint{ &Constraint{ LTarget: "kernel.name", @@ -74,10 +76,10 @@ func TestCompose(t *testing.T) { Name: "task1", Driver: "exec", Resources: &Resources{ - CPU: 1250, - MemoryMB: 1024, - DiskMB: 2048, - IOPS: 500, + CPU: helper.IntToPtr(1250), + MemoryMB: helper.IntToPtr(1024), + DiskMB: helper.IntToPtr(2048), + IOPS: helper.IntToPtr(500), Networks: []*NetworkResource{ &NetworkResource{ CIDR: "0.0.0.0/0", diff --git a/api/jobs.go b/api/jobs.go index edbce83e1eca..c70101d04aba 100644 --- a/api/jobs.go +++ b/api/jobs.go @@ -6,6 +6,9 @@ import ( "sort" "strconv" "time" + + "github.com/gorhill/cronexpr" + "github.com/hashicorp/nomad/helper" ) const ( @@ -14,6 +17,9 @@ const ( // JobTypeBatch indicates a short-lived process JobTypeBatch = "batch" + + // PeriodicSpecCron is used for a cron spec. + PeriodicSpecCron = "cron" ) const ( @@ -32,6 +38,16 @@ func (c *Client) Jobs() *Jobs { return &Jobs{client: c} } +func (j *Jobs) Validate(job *Job, q *WriteOptions) (*JobValidateResponse, *WriteMeta, error) { + var resp JobValidateResponse + req := &JobValidateRequest{Job: job} + if q != nil { + req.WriteRequest = WriteRequest{Region: q.Region} + } + wm, err := j.client.write("/v1/validate/job", req, &resp, q) + return &resp, wm, err +} + // Register is used to register a new job. It returns the ID // of the evaluation, along with any errors encountered. func (j *Jobs) Register(job *Job, q *WriteOptions) (string, *WriteMeta, error) { @@ -162,7 +178,7 @@ func (j *Jobs) Plan(job *Job, diff bool, q *WriteOptions) (*JobPlanResponse, *Wr Job: job, Diff: diff, } - wm, err := j.client.write("/v1/job/"+job.ID+"/plan", req, &resp, q) + wm, err := j.client.write("/v1/job/"+*job.ID+"/plan", req, &resp, q) if err != nil { return nil, nil, err } @@ -207,10 +223,36 @@ type UpdateStrategy struct { // PeriodicConfig is for serializing periodic config for a job. type PeriodicConfig struct { - Enabled bool - Spec string - SpecType string - ProhibitOverlap bool + Enabled *bool + Spec *string + SpecType *string + ProhibitOverlap *bool +} + +func (p *PeriodicConfig) Canonicalize() { + if p.Enabled == nil { + p.Enabled = helper.BoolToPtr(true) + } + if p.SpecType == nil { + p.SpecType = helper.StringToPtr(PeriodicSpecCron) + } + if p.ProhibitOverlap == nil { + p.ProhibitOverlap = helper.BoolToPtr(false) + } +} + +// Next returns the closest time instant matching the spec that is after the +// passed time. If no matching instance exists, the zero value of time.Time is +// returned. The `time.Location` of the returned value matches that of the +// passed time. +func (p *PeriodicConfig) Next(fromTime time.Time) time.Time { + if *p.SpecType == PeriodicSpecCron { + if e, err := cronexpr.Parse(*p.Spec); err == nil { + return e.Next(fromTime) + } + } + + return time.Time{} } // ParameterizedJobConfig is used to configure the parameterized job. @@ -222,13 +264,13 @@ type ParameterizedJobConfig struct { // Job is used to serialize a job. type Job struct { - Region string - ID string - ParentID string - Name string - Type string - Priority int - AllAtOnce bool + Region *string + ID *string + ParentID *string + Name *string + Type *string + Priority *int + AllAtOnce *bool Datacenters []string Constraints []*Constraint TaskGroups []*TaskGroup @@ -237,12 +279,69 @@ type Job struct { ParameterizedJob *ParameterizedJobConfig Payload []byte Meta map[string]string - VaultToken string - Status string - StatusDescription string - CreateIndex uint64 - ModifyIndex uint64 - JobModifyIndex uint64 + VaultToken *string + Status *string + StatusDescription *string + CreateIndex *uint64 + ModifyIndex *uint64 + JobModifyIndex *uint64 +} + +// IsPeriodic returns whether a job is periodic. +func (j *Job) IsPeriodic() bool { + return j.Periodic != nil +} + +// IsParameterized returns whether a job is parameterized job. +func (j *Job) IsParameterized() bool { + return j.ParameterizedJob != nil +} + +func (j *Job) Canonicalize() { + if j.ID == nil { + j.ID = helper.StringToPtr("") + } + if j.Name == nil { + j.Name = j.ID + } + + if j.Priority == nil { + j.Priority = helper.IntToPtr(50) + } + if j.Region == nil { + j.Region = helper.StringToPtr("global") + } + if j.Type == nil { + j.Type = helper.StringToPtr("service") + } + if j.AllAtOnce == nil { + j.AllAtOnce = helper.BoolToPtr(false) + } + if j.VaultToken == nil { + j.VaultToken = helper.StringToPtr("") + } + if j.Status == nil { + j.Status = helper.StringToPtr("") + } + if j.StatusDescription == nil { + j.StatusDescription = helper.StringToPtr("") + } + if j.CreateIndex == nil { + j.CreateIndex = helper.Uint64ToPtr(0) + } + if j.ModifyIndex == nil { + j.ModifyIndex = helper.Uint64ToPtr(0) + } + if j.JobModifyIndex == nil { + j.JobModifyIndex = helper.Uint64ToPtr(0) + } + if j.Periodic != nil { + j.Periodic.Canonicalize() + } + + for _, tg := range j.TaskGroups { + tg.Canonicalize(*j.Type) + } } // JobSummary summarizes the state of the allocations of a job @@ -330,11 +429,11 @@ func NewBatchJob(id, name, region string, pri int) *Job { // newJob is used to create a new Job struct. func newJob(id, name, region, typ string, pri int) *Job { return &Job{ - Region: region, - ID: id, - Name: name, - Type: typ, - Priority: pri, + Region: ®ion, + ID: &id, + Name: &name, + Type: &typ, + Priority: &pri, } } @@ -371,6 +470,27 @@ func (j *Job) AddPeriodicConfig(cfg *PeriodicConfig) *Job { return j } +type WriteRequest struct { + // The target region for this write + Region string +} + +// JobValidateRequest is used to validate a job +type JobValidateRequest struct { + Job *Job + WriteRequest +} + +// JobValidateResponse is the response from validate request +type JobValidateResponse struct { + // DriverConfigValidated indicates whether the agent validated the driver + // config + DriverConfigValidated bool + + // ValidationErrors is a list of validation errors + ValidationErrors []string +} + // RegisterJobRequest is used to serialize a job registration type RegisterJobRequest struct { Job *Job diff --git a/api/jobs_test.go b/api/jobs_test.go index c0d4291095a6..38277aa02b66 100644 --- a/api/jobs_test.go +++ b/api/jobs_test.go @@ -6,6 +6,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/testutil" ) @@ -45,11 +46,38 @@ func TestJobs_Register(t *testing.T) { assertQueryMeta(t, qm) // Check that we got the expected response - if len(resp) != 1 || resp[0].ID != job.ID { + if len(resp) != 1 || resp[0].ID != *job.ID { t.Fatalf("bad: %#v", resp[0]) } } +func TestJobs_Validate(t *testing.T) { + c, s := makeClient(t, nil, nil) + defer s.Stop() + jobs := c.Jobs() + + // Create a job and attempt to register it + job := testJob() + resp, _, err := jobs.Validate(job, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if len(resp.ValidationErrors) != 0 { + t.Fatalf("bad %v", resp) + } + + job.ID = nil + resp1, _, err := jobs.Validate(job, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(resp1.ValidationErrors) == 0 { + t.Fatalf("bad %v", resp1) + } +} + func TestJobs_EnforceRegister(t *testing.T) { c, s := makeClient(t, nil, nil) defer s.Stop() @@ -96,7 +124,7 @@ func TestJobs_EnforceRegister(t *testing.T) { t.Fatalf("bad length: %d", len(resp)) } - if resp[0].ID != job.ID { + if resp[0].ID != *job.ID { t.Fatalf("bad: %#v", resp[0]) } curIndex := resp[0].JobModifyIndex @@ -178,13 +206,13 @@ func TestJobs_PrefixList(t *testing.T) { // Query the job again and ensure it exists // Listing when nothing exists returns empty - results, qm, err = jobs.PrefixList(job.ID[:1]) + results, qm, err = jobs.PrefixList((*job.ID)[:1]) if err != nil { t.Fatalf("err: %s", err) } // Check if we have the right list - if len(results) != 1 || results[0].ID != job.ID { + if len(results) != 1 || results[0].ID != *job.ID { t.Fatalf("bad: %#v", results) } } @@ -222,7 +250,7 @@ func TestJobs_List(t *testing.T) { } // Check if we have the right list - if len(results) != 1 || results[0].ID != job.ID { + if len(results) != 1 || results[0].ID != *job.ID { t.Fatalf("bad: %#v", results) } } @@ -387,7 +415,7 @@ func TestJobs_PeriodicForce(t *testing.T) { } testutil.WaitForResult(func() (bool, error) { - out, _, err := jobs.Info(job.ID, nil) + out, _, err := jobs.Info(*job.ID, nil) if err != nil || out == nil || out.ID != job.ID { return false, err } @@ -397,7 +425,7 @@ func TestJobs_PeriodicForce(t *testing.T) { }) // Try force again - evalID, wm, err := jobs.PeriodicForce(job.ID, nil) + evalID, wm, err := jobs.PeriodicForce(*job.ID, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -519,10 +547,10 @@ func TestJobs_JobSummary(t *testing.T) { assertQueryMeta(t, qm) // Check that the result is what we expect - if job.ID != result.JobID { + if *job.ID != result.JobID { t.Fatalf("err: expected job id of %s saw %s", job.ID, result.JobID) } - if _, ok := result.Summary[taskName]; !ok { + if _, ok := result.Summary[*taskName]; !ok { t.Fatalf("err: unable to find %s key in job summary", taskName) } } @@ -530,11 +558,11 @@ func TestJobs_JobSummary(t *testing.T) { func TestJobs_NewBatchJob(t *testing.T) { job := NewBatchJob("job1", "myjob", "region1", 5) expect := &Job{ - Region: "region1", - ID: "job1", - Name: "myjob", - Type: JobTypeBatch, - Priority: 5, + Region: helper.StringToPtr("region1"), + ID: helper.StringToPtr("job1"), + Name: helper.StringToPtr("myjob"), + Type: helper.StringToPtr(JobTypeBatch), + Priority: helper.IntToPtr(5), } if !reflect.DeepEqual(job, expect) { t.Fatalf("expect: %#v, got: %#v", expect, job) @@ -544,11 +572,11 @@ func TestJobs_NewBatchJob(t *testing.T) { func TestJobs_NewServiceJob(t *testing.T) { job := NewServiceJob("job1", "myjob", "region1", 5) expect := &Job{ - Region: "region1", - ID: "job1", - Name: "myjob", - Type: JobTypeService, - Priority: 5, + Region: helper.StringToPtr("region1"), + ID: helper.StringToPtr("job1"), + Name: helper.StringToPtr("myjob"), + Type: helper.StringToPtr(JobTypeService), + Priority: helper.IntToPtr(5), } if !reflect.DeepEqual(job, expect) { t.Fatalf("expect: %#v, got: %#v", expect, job) diff --git a/api/resources.go b/api/resources.go index 8cb2505ba32b..10e8f5508921 100644 --- a/api/resources.go +++ b/api/resources.go @@ -1,15 +1,48 @@ package api +import "github.com/hashicorp/nomad/helper" + // Resources encapsulates the required resources of // a given task or task group. type Resources struct { - CPU int - MemoryMB int - DiskMB int - IOPS int + CPU *int + MemoryMB *int + DiskMB *int + IOPS *int Networks []*NetworkResource } +func MinResources() *Resources { + return &Resources{ + CPU: helper.IntToPtr(100), + MemoryMB: helper.IntToPtr(10), + IOPS: helper.IntToPtr(0), + } + +} + +// Merge merges this resource with another resource. +func (r *Resources) Merge(other *Resources) { + if other == nil { + return + } + if other.CPU != nil { + r.CPU = other.CPU + } + if other.MemoryMB != nil { + r.MemoryMB = other.MemoryMB + } + if other.DiskMB != nil { + r.DiskMB = other.DiskMB + } + if other.IOPS != nil { + r.IOPS = other.IOPS + } + if len(other.Networks) != 0 { + r.Networks = other.Networks + } +} + type Port struct { Label string Value int diff --git a/api/tasks.go b/api/tasks.go index d64bc7bb247e..104d6ccb1d89 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -2,6 +2,8 @@ package api import ( "time" + + "github.com/hashicorp/nomad/helper" ) // MemoryStats holds memory usage related stats @@ -84,15 +86,35 @@ type Service struct { // EphemeralDisk is an ephemeral disk object type EphemeralDisk struct { - Sticky bool - Migrate bool - SizeMB int `mapstructure:"size"` + Sticky *bool + Migrate *bool + SizeMB *int `mapstructure:"size"` +} + +func DefaultEphemeralDisk() *EphemeralDisk { + return &EphemeralDisk{ + Sticky: helper.BoolToPtr(false), + Migrate: helper.BoolToPtr(false), + SizeMB: helper.IntToPtr(300), + } +} + +func (e *EphemeralDisk) Canonicalize() { + if e.Sticky == nil { + e.Sticky = helper.BoolToPtr(false) + } + if e.Migrate == nil { + e.Migrate = helper.BoolToPtr(false) + } + if e.SizeMB == nil { + e.SizeMB = helper.IntToPtr(300) + } } // TaskGroup is the unit of scheduling. type TaskGroup struct { - Name string - Count int + Name *string + Count *int Constraints []*Constraint Tasks []*Task RestartPolicy *RestartPolicy @@ -103,8 +125,43 @@ type TaskGroup struct { // NewTaskGroup creates a new TaskGroup. func NewTaskGroup(name string, count int) *TaskGroup { return &TaskGroup{ - Name: name, - Count: count, + Name: helper.StringToPtr(name), + Count: helper.IntToPtr(count), + } +} + +func (g *TaskGroup) Canonicalize(jobType string) { + if g.Name == nil { + g.Name = helper.StringToPtr("") + } + if g.Count == nil { + g.Count = helper.IntToPtr(1) + } + for _, t := range g.Tasks { + t.Canonicalize() + } + if g.EphemeralDisk == nil { + g.EphemeralDisk = DefaultEphemeralDisk() + } else { + g.EphemeralDisk.Canonicalize() + } + if g.RestartPolicy == nil { + switch jobType { + case "service", "system": + g.RestartPolicy = &RestartPolicy{ + Delay: 15 * time.Second, + Attempts: 2, + Interval: 1 * time.Minute, + Mode: "delay", + } + default: + g.RestartPolicy = &RestartPolicy{ + Delay: 15 * time.Second, + Attempts: 15, + Interval: 7 * 24 * time.Hour, + Mode: "delay", + } + } } } @@ -137,8 +194,24 @@ func (g *TaskGroup) RequireDisk(disk *EphemeralDisk) *TaskGroup { // LogConfig provides configuration for log rotation type LogConfig struct { - MaxFiles int - MaxFileSizeMB int + MaxFiles *int + MaxFileSizeMB *int +} + +func DefaultLogConfig() *LogConfig { + return &LogConfig{ + MaxFiles: helper.IntToPtr(10), + MaxFileSizeMB: helper.IntToPtr(10), + } +} + +func (l *LogConfig) Canonicalize() { + if l.MaxFiles == nil { + l.MaxFiles = helper.IntToPtr(10) + } + if l.MaxFileSizeMB == nil { + l.MaxFileSizeMB = helper.IntToPtr(10) + } } // DispatchPayloadConfig configures how a task gets its input from a job dispatch @@ -166,28 +239,79 @@ type Task struct { Leader *bool } +func (t *Task) Canonicalize() { + if t.LogConfig == nil { + t.LogConfig = DefaultLogConfig() + } else { + t.LogConfig.Canonicalize() + } + if t.Vault != nil { + t.Vault.Canonicalize() + } + for _, artifact := range t.Artifacts { + artifact.Canonicalize() + } + for _, tmpl := range t.Templates { + tmpl.Canonicalize() + } + + min := MinResources() + min.Merge(t.Resources) + t.Resources = min +} + // TaskArtifact is used to download artifacts before running a task. type TaskArtifact struct { - GetterSource string + GetterSource *string GetterOptions map[string]string - RelativeDest string + RelativeDest *string +} + +func (a *TaskArtifact) Canonicalize() { + if a.RelativeDest == nil { + a.RelativeDest = helper.StringToPtr("local/") + } } type Template struct { - SourcePath string - DestPath string - EmbeddedTmpl string - ChangeMode string - ChangeSignal string - Splay time.Duration - Perms string + SourcePath *string + DestPath *string + EmbeddedTmpl *string + ChangeMode *string + ChangeSignal *string + Splay *time.Duration + Perms *string +} + +func (tmpl *Template) Canonicalize() { + if tmpl.ChangeMode == nil { + tmpl.ChangeMode = helper.StringToPtr("restart") + } + if tmpl.Splay == nil { + tmpl.Splay = helper.TimeToPtr(5 * time.Second) + } + if tmpl.Perms == nil { + tmpl.Perms = helper.StringToPtr("0644") + } } type Vault struct { Policies []string - Env bool - ChangeMode string - ChangeSignal string + Env *bool + ChangeMode *string + ChangeSignal *string +} + +func (v *Vault) Canonicalize() { + if v.Env == nil { + v.Env = helper.BoolToPtr(true) + } + if v.ChangeMode == nil { + v.ChangeMode = helper.StringToPtr("restart") + } + if v.ChangeSignal == nil { + v.ChangeSignal = helper.StringToPtr("sighup") + } } // NewTask creates and initializes a new Task. diff --git a/api/tasks_test.go b/api/tasks_test.go index bbdf1255065e..d0c097ecb358 100644 --- a/api/tasks_test.go +++ b/api/tasks_test.go @@ -3,13 +3,15 @@ package api import ( "reflect" "testing" + + "github.com/hashicorp/nomad/helper" ) func TestTaskGroup_NewTaskGroup(t *testing.T) { grp := NewTaskGroup("grp1", 2) expect := &TaskGroup{ - Name: "grp1", - Count: 2, + Name: helper.StringToPtr("grp1"), + Count: helper.IntToPtr(2), } if !reflect.DeepEqual(grp, expect) { t.Fatalf("expect: %#v, got: %#v", expect, grp) @@ -162,10 +164,10 @@ func TestTask_Require(t *testing.T) { // Create some require resources resources := &Resources{ - CPU: 1250, - MemoryMB: 128, - DiskMB: 2048, - IOPS: 500, + CPU: helper.IntToPtr(1250), + MemoryMB: helper.IntToPtr(128), + DiskMB: helper.IntToPtr(2048), + IOPS: helper.IntToPtr(500), Networks: []*NetworkResource{ &NetworkResource{ CIDR: "0.0.0.0/0", diff --git a/api/util_test.go b/api/util_test.go index aaca7487e92e..bf1c0556cce5 100644 --- a/api/util_test.go +++ b/api/util_test.go @@ -1,6 +1,10 @@ package api -import "testing" +import ( + "testing" + + "github.com/hashicorp/nomad/helper" +) func assertQueryMeta(t *testing.T, qm *QueryMeta) { if qm.LastIndex == 0 { @@ -21,19 +25,19 @@ func testJob() *Job { task := NewTask("task1", "exec"). SetConfig("command", "/bin/sleep"). Require(&Resources{ - CPU: 100, - MemoryMB: 256, - IOPS: 10, + CPU: helper.IntToPtr(100), + MemoryMB: helper.IntToPtr(256), + IOPS: helper.IntToPtr(10), }). SetLogConfig(&LogConfig{ - MaxFiles: 1, - MaxFileSizeMB: 2, + MaxFiles: helper.IntToPtr(1), + MaxFileSizeMB: helper.IntToPtr(2), }) group := NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: 25, + SizeMB: helper.IntToPtr(25), }) job := NewBatchJob("job1", "redis", "region1", 1). @@ -45,9 +49,9 @@ func testJob() *Job { func testPeriodicJob() *Job { job := testJob().AddPeriodicConfig(&PeriodicConfig{ - Enabled: true, - Spec: "*/30 * * * *", - SpecType: "cron", + Enabled: helper.BoolToPtr(true), + Spec: helper.StringToPtr("*/30 * * * *"), + SpecType: helper.StringToPtr("cron"), }) return job } diff --git a/command/agent/http.go b/command/agent/http.go index e111baea84a0..68b65075753e 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -165,6 +165,8 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) { s.mux.HandleFunc("/v1/agent/servers", s.wrap(s.AgentServersRequest)) s.mux.HandleFunc("/v1/agent/keyring/", s.wrap(s.KeyringOperationRequest)) + s.mux.HandleFunc("/v1/validate/job", s.wrap(s.ValidateJobRequest)) + s.mux.HandleFunc("/v1/regions", s.wrap(s.RegionListRequest)) s.mux.HandleFunc("/v1/status/leader", s.wrap(s.StatusLeaderRequest)) diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index 8035ca71f2cb..ed98828fd67e 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -6,6 +6,8 @@ import ( "strings" "github.com/golang/snappy" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/nomad/structs" ) @@ -111,6 +113,48 @@ func (s *HTTPServer) jobPlan(resp http.ResponseWriter, req *http.Request, return out, nil } +func (s *HTTPServer) ValidateJobRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Ensure request method is POST or PUT + if !(req.Method == "POST" || req.Method == "PUT") { + return nil, CodedError(405, ErrInvalidMethod) + } + + var validateRequest api.JobValidateRequest + if err := decodeBody(req, &validateRequest); err != nil { + return nil, CodedError(400, err.Error()) + } + if validateRequest.Job == nil { + return nil, CodedError(400, "Job must be specified") + } + + job := s.apiJobToStructJob(validateRequest.Job) + args := structs.JobValidateRequest{ + Job: job, + WriteRequest: structs.WriteRequest{ + Region: validateRequest.Region, + }, + } + s.parseRegion(req, &args.Region) + + var out structs.JobValidateResponse + if err := s.agent.RPC("Job.Validate", &args, &out); err != nil { + + // Fall back to do local validation + args.Job.Canonicalize() + if vErr := args.Job.Validate(); vErr != nil { + if merr, ok := err.(*multierror.Error); ok { + for _, err := range merr.Errors { + out.ValidationErrors = append(out.ValidationErrors, err.Error()) + } + } + } else { + out.ValidationErrors = append(out.ValidationErrors, vErr.Error()) + } + } + + return out, nil +} + func (s *HTTPServer) periodicForceRequest(resp http.ResponseWriter, req *http.Request, jobName string) (interface{}, error) { if req.Method != "PUT" && req.Method != "POST" { @@ -310,3 +354,202 @@ func (s *HTTPServer) jobDispatchRequest(resp http.ResponseWriter, req *http.Requ setIndex(resp, out.Index) return out, nil } + +func (s *HTTPServer) apiJobToStructJob(job *api.Job) *structs.Job { + job.Canonicalize() + + j := &structs.Job{ + Region: *job.Region, + ID: *job.ID, + ParentID: *job.ID, + Name: *job.Name, + Type: *job.Type, + Priority: *job.Priority, + AllAtOnce: *job.AllAtOnce, + Datacenters: job.Datacenters, + Payload: job.Payload, + Meta: job.Meta, + VaultToken: *job.VaultToken, + Status: *job.Status, + StatusDescription: *job.StatusDescription, + CreateIndex: *job.CreateIndex, + ModifyIndex: *job.ModifyIndex, + JobModifyIndex: *job.ModifyIndex, + } + + j.Constraints = make([]*structs.Constraint, len(job.Constraints)) + for i, c := range job.Constraints { + j.Constraints[i] = &structs.Constraint{ + LTarget: c.LTarget, + RTarget: c.RTarget, + Operand: c.Operand, + } + } + if job.Update != nil { + j.Update = structs.UpdateStrategy{ + Stagger: job.Update.Stagger, + MaxParallel: job.Update.MaxParallel, + } + } + if job.Periodic != nil { + j.Periodic = &structs.PeriodicConfig{ + Enabled: j.Periodic.Enabled, + Spec: j.Periodic.Spec, + SpecType: j.Periodic.SpecType, + ProhibitOverlap: j.Periodic.ProhibitOverlap, + } + } + if job.ParameterizedJob != nil { + j.ParameterizedJob = &structs.ParameterizedJobConfig{ + Payload: job.ParameterizedJob.Payload, + MetaRequired: job.ParameterizedJob.MetaRequired, + MetaOptional: job.ParameterizedJob.MetaOptional, + } + } + + j.TaskGroups = make([]*structs.TaskGroup, len(job.TaskGroups)) + for i, taskGroup := range job.TaskGroups { + tg := &structs.TaskGroup{} + s.apiTgToStructsTG(taskGroup, tg) + j.TaskGroups[i] = tg + } + + return j +} + +func (s *HTTPServer) apiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) { + tg.Name = *taskGroup.Name + tg.Count = *taskGroup.Count + tg.Meta = taskGroup.Meta + tg.Constraints = make([]*structs.Constraint, len(taskGroup.Constraints)) + for k, constraint := range taskGroup.Constraints { + tg.Constraints[k] = &structs.Constraint{ + LTarget: constraint.LTarget, + RTarget: constraint.RTarget, + Operand: constraint.Operand, + } + } + tg.RestartPolicy = &structs.RestartPolicy{ + Attempts: taskGroup.RestartPolicy.Attempts, + Interval: taskGroup.RestartPolicy.Interval, + Delay: taskGroup.RestartPolicy.Delay, + Mode: taskGroup.RestartPolicy.Mode, + } + tg.EphemeralDisk = &structs.EphemeralDisk{ + Sticky: *taskGroup.EphemeralDisk.Sticky, + SizeMB: *taskGroup.EphemeralDisk.SizeMB, + Migrate: *taskGroup.EphemeralDisk.Migrate, + } + tg.Meta = taskGroup.Meta + tg.Tasks = make([]*structs.Task, len(taskGroup.Tasks)) + for l, task := range taskGroup.Tasks { + t := &structs.Task{} + s.apiTaskToStructsTask(task, t) + tg.Tasks[l] = t + } +} + +func (s *HTTPServer) apiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) { + structsTask.Name = apiTask.Driver + structsTask.Driver = apiTask.Driver + structsTask.User = apiTask.User + structsTask.Config = apiTask.Config + structsTask.Constraints = make([]*structs.Constraint, len(apiTask.Constraints)) + for i, constraint := range apiTask.Constraints { + structsTask.Constraints[i] = &structs.Constraint{ + LTarget: constraint.LTarget, + RTarget: constraint.RTarget, + Operand: constraint.Operand, + } + } + structsTask.Env = apiTask.Env + structsTask.Services = make([]*structs.Service, len(apiTask.Services)) + for i, service := range apiTask.Services { + structsTask.Services[i] = &structs.Service{ + Name: service.Name, + PortLabel: service.PortLabel, + Tags: service.Tags, + } + structsTask.Services[i].Checks = make([]*structs.ServiceCheck, len(service.Checks)) + for j, check := range service.Checks { + structsTask.Services[i].Checks[j] = &structs.ServiceCheck{ + Name: check.Name, + Type: check.Type, + Command: check.Command, + Args: check.Args, + Path: check.Path, + Protocol: check.Protocol, + PortLabel: check.PortLabel, + Interval: check.Interval, + Timeout: check.Timeout, + InitialStatus: check.InitialStatus, + } + } + } + structsTask.Resources = &structs.Resources{ + CPU: *apiTask.Resources.CPU, + MemoryMB: *apiTask.Resources.MemoryMB, + IOPS: *apiTask.Resources.IOPS, + } + structsTask.Resources.Networks = make([]*structs.NetworkResource, len(apiTask.Resources.Networks)) + for i, nw := range apiTask.Resources.Networks { + structsTask.Resources.Networks[i] = &structs.NetworkResource{ + CIDR: nw.CIDR, + IP: nw.IP, + MBits: nw.MBits, + } + structsTask.Resources.Networks[i].DynamicPorts = make([]structs.Port, len(structsTask.Resources.Networks[i].DynamicPorts)) + structsTask.Resources.Networks[i].ReservedPorts = make([]structs.Port, len(structsTask.Resources.Networks[i].ReservedPorts)) + for j, dp := range nw.DynamicPorts { + structsTask.Resources.Networks[i].DynamicPorts[j] = structs.Port{ + Label: dp.Label, + Value: dp.Value, + } + } + for j, rp := range nw.ReservedPorts { + structsTask.Resources.Networks[i].ReservedPorts[j] = structs.Port{ + Label: rp.Label, + Value: rp.Value, + } + } + } + structsTask.Meta = apiTask.Meta + structsTask.KillTimeout = apiTask.KillTimeout + structsTask.LogConfig = &structs.LogConfig{ + MaxFiles: *apiTask.LogConfig.MaxFiles, + MaxFileSizeMB: *apiTask.LogConfig.MaxFileSizeMB, + } + structsTask.Artifacts = make([]*structs.TaskArtifact, len(apiTask.Artifacts)) + for k, ta := range apiTask.Artifacts { + structsTask.Artifacts[k] = &structs.TaskArtifact{ + GetterSource: *ta.GetterSource, + GetterOptions: ta.GetterOptions, + RelativeDest: *ta.RelativeDest, + } + } + if apiTask.Vault != nil { + structsTask.Vault = &structs.Vault{ + Policies: apiTask.Vault.Policies, + Env: *apiTask.Vault.Env, + ChangeMode: *apiTask.Vault.ChangeMode, + ChangeSignal: *apiTask.Vault.ChangeSignal, + } + } + structsTask.Templates = make([]*structs.Template, len(apiTask.Templates)) + for i, template := range apiTask.Templates { + structsTask.Templates[i] = &structs.Template{ + SourcePath: *template.SourcePath, + DestPath: *template.DestPath, + EmbeddedTmpl: *template.EmbeddedTmpl, + ChangeMode: *template.ChangeMode, + ChangeSignal: *template.ChangeSignal, + Splay: *template.Splay, + Perms: *template.Perms, + } + } + if apiTask.DispatchPayload != nil { + structsTask.DispatchPayload = &structs.DispatchPayloadConfig{ + File: apiTask.DispatchPayload.File, + } + } +} diff --git a/command/alloc_status.go b/command/alloc_status.go index d5ea53d17f01..130292ea2b07 100644 --- a/command/alloc_status.go +++ b/command/alloc_status.go @@ -410,8 +410,8 @@ func (c *AllocStatusCommand) outputTaskResources(alloc *api.Allocation, task str } // Display the rolled up stats. If possible prefer the live statistics - cpuUsage := strconv.Itoa(resource.CPU) - memUsage := humanize.IBytes(uint64(resource.MemoryMB * bytesPerMegabyte)) + cpuUsage := strconv.Itoa(*resource.CPU) + memUsage := humanize.IBytes(uint64(*resource.MemoryMB * bytesPerMegabyte)) if stats != nil { if ru, ok := stats.Tasks[task]; ok && ru != nil && ru.ResourceUsage != nil { if cs := ru.ResourceUsage.CpuStats; cs != nil { @@ -425,7 +425,7 @@ func (c *AllocStatusCommand) outputTaskResources(alloc *api.Allocation, task str resourcesOutput = append(resourcesOutput, fmt.Sprintf("%v MHz|%v|%v|%v|%v", cpuUsage, memUsage, - humanize.IBytes(uint64(resource.DiskMB*bytesPerMegabyte)), + humanize.IBytes(uint64(*resource.DiskMB*bytesPerMegabyte)), resource.IOPS, firstAddr)) for i := 1; i < len(addr); i++ { diff --git a/command/helpers.go b/command/helpers.go index e27ed09b8294..1f5b6df5f4df 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -12,7 +12,6 @@ import ( gg "github.com/hashicorp/go-getter" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/jobspec" - "github.com/hashicorp/nomad/nomad/structs" "github.com/ryanuber/columnize" ) @@ -235,7 +234,7 @@ type JobGetter struct { } // StructJob returns the Job struct from jobfile. -func (j *JobGetter) StructJob(jpath string) (*structs.Job, error) { +func (j *JobGetter) ApiJob(jpath string) (*api.Job, error) { var jobfile io.Reader switch jpath { case "-": diff --git a/command/logs.go b/command/logs.go index 463f23833fb3..6b3d2bb4e81a 100644 --- a/command/logs.go +++ b/command/logs.go @@ -169,7 +169,7 @@ func (l *LogsCommand) Run(args []string) int { // Try to determine the tasks name from the allocation var tasks []*api.Task for _, tg := range alloc.Job.TaskGroups { - if tg.Name == alloc.TaskGroup { + if *tg.Name == alloc.TaskGroup { if len(tg.Tasks) == 1 { task = tg.Tasks[0].Name break diff --git a/command/node_status.go b/command/node_status.go index 6dc28aa2def7..e83af35124f2 100644 --- a/command/node_status.go +++ b/command/node_status.go @@ -11,6 +11,7 @@ import ( "github.com/mitchellh/colorstring" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper" ) const ( @@ -487,10 +488,10 @@ func getAllocatedResources(client *api.Client, runningAllocs []*api.Allocation, // Get Resources var cpu, mem, disk, iops int for _, alloc := range runningAllocs { - cpu += alloc.Resources.CPU - mem += alloc.Resources.MemoryMB - disk += alloc.Resources.DiskMB - iops += alloc.Resources.IOPS + cpu += *alloc.Resources.CPU + mem += *alloc.Resources.MemoryMB + disk += *alloc.Resources.DiskMB + iops += *alloc.Resources.IOPS } resources := make([]string, 2) @@ -499,9 +500,9 @@ func getAllocatedResources(client *api.Client, runningAllocs []*api.Allocation, cpu, total.CPU, humanize.IBytes(uint64(mem*bytesPerMegabyte)), - humanize.IBytes(uint64(total.MemoryMB*bytesPerMegabyte)), + humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte)), humanize.IBytes(uint64(disk*bytesPerMegabyte)), - humanize.IBytes(uint64(total.DiskMB*bytesPerMegabyte)), + humanize.IBytes(uint64(*total.DiskMB*bytesPerMegabyte)), iops, total.IOPS) @@ -518,10 +519,10 @@ func computeNodeTotalResources(node *api.Node) api.Resources { if res == nil { res = &api.Resources{} } - total.CPU = r.CPU - res.CPU - total.MemoryMB = r.MemoryMB - res.MemoryMB - total.DiskMB = r.DiskMB - res.DiskMB - total.IOPS = r.IOPS - res.IOPS + total.CPU = helper.IntToPtr(*r.CPU - *res.CPU) + total.MemoryMB = helper.IntToPtr(*r.MemoryMB - *res.MemoryMB) + total.DiskMB = helper.IntToPtr(*r.DiskMB - *res.DiskMB) + total.IOPS = helper.IntToPtr(*r.IOPS - *res.IOPS) return total } @@ -550,7 +551,7 @@ func getActualResources(client *api.Client, runningAllocs []*api.Allocation, nod math.Floor(cpu), total.CPU, humanize.IBytes(mem), - humanize.IBytes(uint64(total.MemoryMB*bytesPerMegabyte))) + humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte))) return resources, nil } diff --git a/command/plan.go b/command/plan.go index 99dd11e4ff8f..8406f4113fb5 100644 --- a/command/plan.go +++ b/command/plan.go @@ -7,7 +7,6 @@ import ( "time" "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/scheduler" "github.com/mitchellh/colorstring" ) @@ -99,28 +98,12 @@ func (c *PlanCommand) Run(args []string) int { path := args[0] // Get Job struct from Jobfile - job, err := c.JobGetter.StructJob(args[0]) + job, err := c.JobGetter.ApiJob(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err)) return 255 } - // Initialize any fields that need to be. - job.Canonicalize() - - // Check that the job is valid - if err := job.Validate(); err != nil { - c.Ui.Error(fmt.Sprintf("Error validating job: %s", err)) - return 255 - } - - // Convert it to something we can use - apiJob, err := convertStructJob(job) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error converting job: %s", err)) - return 255 - } - // Get the HTTP client client, err := c.Meta.Client() if err != nil { @@ -129,12 +112,12 @@ func (c *PlanCommand) Run(args []string) int { } // Force the region to be that of the job. - if r := job.Region; r != "" { - client.SetRegion(r) + if r := job.Region; r != nil { + client.SetRegion(*r) } // Submit the job - resp, _, err := client.Jobs().Plan(apiJob, diff, nil) + resp, _, err := client.Jobs().Plan(job, diff, nil) if err != nil { c.Ui.Error(fmt.Sprintf("Error during plan: %s", err)) return 255 @@ -179,7 +162,7 @@ func formatJobModifyIndex(jobModifyIndex uint64, jobName string) string { } // formatDryRun produces a string explaining the results of the dry run. -func formatDryRun(resp *api.JobPlanResponse, job *structs.Job) string { +func formatDryRun(resp *api.JobPlanResponse, job *api.Job) string { var rolling *api.Evaluation for _, eval := range resp.CreatedEvals { if eval.TriggeredBy == "rolling-update" { @@ -192,7 +175,7 @@ func formatDryRun(resp *api.JobPlanResponse, job *structs.Job) string { out = "[bold][green]- All tasks successfully allocated.[reset]\n" } else { // Change the output depending on if we are a system job or not - if job.Type == "system" { + if job.Type != nil && *job.Type == "system" { out = "[bold][yellow]- WARNING: Failed to place allocations on all nodes.[reset]\n" } else { out = "[bold][yellow]- WARNING: Failed to place all allocations.[reset]\n" diff --git a/command/run.go b/command/run.go index 0efcb0e72de8..0d04b6223693 100644 --- a/command/run.go +++ b/command/run.go @@ -12,6 +12,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" ) @@ -132,21 +133,24 @@ func (c *RunCommand) Run(args []string) int { } // Get Job struct from Jobfile - job, err := c.JobGetter.StructJob(args[0]) + job, err := c.JobGetter.ApiJob(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err)) return 1 } - // Initialize any fields that need to be. - job.Canonicalize() - - // Check that the job is valid - if err := job.Validate(); err != nil { - c.Ui.Error(fmt.Sprintf("Error validating job: %v", err)) + // Get the HTTP client + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) return 1 } + // Force the region to be that of the job. + if r := job.Region; r != nil { + client.SetRegion(*r) + } + // Check if the job is periodic or is a parameterized job periodic := job.IsPeriodic() paramjob := job.IsParameterized() @@ -158,35 +162,24 @@ func (c *RunCommand) Run(args []string) int { } if vaultToken != "" { - job.VaultToken = vaultToken - } - - // Convert it to something we can use - apiJob, err := convertStructJob(job) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error converting job: %s", err)) - return 1 + job.VaultToken = helper.StringToPtr(vaultToken) } // COMPAT 0.4.1 -> 0.5 Remove in 0.6 - if apiJob.TaskGroups != nil { - OUTSIDE: - for _, tg := range apiJob.TaskGroups { - if tg.Tasks != nil { - for _, task := range tg.Tasks { - if task.Resources != nil { - if task.Resources.DiskMB > 0 { - c.Ui.Error("WARNING: disk attribute is deprecated in the resources block. See https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html") - break OUTSIDE - } - } +OUTSIDE: + for _, tg := range job.TaskGroups { + for _, task := range tg.Tasks { + if task.Resources != nil { + if task.Resources.DiskMB != nil { + c.Ui.Error("WARNING: disk attribute is deprecated in the resources block. See https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html") + break OUTSIDE } } } } if output { - req := api.RegisterJobRequest{Job: apiJob} + req := api.RegisterJobRequest{Job: job} buf, err := json.MarshalIndent(req, "", " ") if err != nil { c.Ui.Error(fmt.Sprintf("Error converting job: %s", err)) @@ -197,18 +190,6 @@ func (c *RunCommand) Run(args []string) int { return 0 } - // Get the HTTP client - client, err := c.Meta.Client() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) - return 1 - } - - // Force the region to be that of the job. - if r := job.Region; r != "" { - client.SetRegion(r) - } - // Parse the check-index checkIndex, enforce, err := parseCheckIndex(checkIndexStr) if err != nil { @@ -219,9 +200,9 @@ func (c *RunCommand) Run(args []string) int { // Submit the job var evalID string if enforce { - evalID, _, err = client.Jobs().EnforceRegister(apiJob, checkIndex, nil) + evalID, _, err = client.Jobs().EnforceRegister(job, checkIndex, nil) } else { - evalID, _, err = client.Jobs().Register(apiJob, nil) + evalID, _, err = client.Jobs().Register(job, nil) } if err != nil { if strings.Contains(err.Error(), api.RegisterEnforceIndexErrPrefix) { diff --git a/command/status.go b/command/status.go index 0bfc7427d0fc..35a6e84f83ca 100644 --- a/command/status.go +++ b/command/status.go @@ -215,7 +215,7 @@ func (c *StatusCommand) outputPeriodicInfo(client *api.Client, job *api.Job) err for _, child := range children { // Ensure that we are only showing jobs whose parent is the requested // job. - if child.ParentID != job.ID { + if child.ParentID != *job.ID { continue } @@ -262,7 +262,7 @@ func (c *StatusCommand) outputParameterizedInfo(client *api.Client, job *api.Job for _, child := range children { // Ensure that we are only showing jobs whose parent is the requested // job. - if child.ParentID != job.ID { + if child.ParentID != *job.ID { continue } @@ -282,13 +282,13 @@ func (c *StatusCommand) outputJobInfo(client *api.Client, job *api.Job) error { var evals, allocs []string // Query the allocations - jobAllocs, _, err := client.Jobs().Allocations(job.ID, c.allAllocs, nil) + jobAllocs, _, err := client.Jobs().Allocations(*job.ID, c.allAllocs, nil) if err != nil { return fmt.Errorf("Error querying job allocations: %s", err) } // Query the evaluations - jobEvals, _, err := client.Jobs().Evaluations(job.ID, nil) + jobEvals, _, err := client.Jobs().Evaluations(*job.ID, nil) if err != nil { return fmt.Errorf("Error querying job evaluations: %s", err) } @@ -366,7 +366,7 @@ func (c *StatusCommand) outputJobInfo(client *api.Client, job *api.Job) error { // where appropriate func (c *StatusCommand) outputJobSummary(client *api.Client, job *api.Job) error { // Query the summary - summary, _, err := client.Jobs().Summary(job.ID, nil) + summary, _, err := client.Jobs().Summary(*job.ID, nil) if err != nil { return fmt.Errorf("Error querying job summary: %s", err) } diff --git a/command/stop.go b/command/stop.go index 1f19baae56af..aff09c315e9e 100644 --- a/command/stop.go +++ b/command/stop.go @@ -109,7 +109,7 @@ func (c *StopCommand) Run(args []string) int { } // Confirm the stop if the job was a prefix match. - if jobID != job.ID && !autoYes { + if jobID != *job.ID && !autoYes { question := fmt.Sprintf("Are you sure you want to stop job %q? [y/N]", job.ID) answer, err := c.Ui.Ask(question) if err != nil { @@ -132,7 +132,7 @@ func (c *StopCommand) Run(args []string) int { } // Invoke the stop - evalID, _, err := client.Jobs().Deregister(job.ID, nil) + evalID, _, err := client.Jobs().Deregister(*job.ID, nil) if err != nil { c.Ui.Error(fmt.Sprintf("Error deregistering job: %s", err)) return 1 diff --git a/command/validate.go b/command/validate.go index 7efadca0a6a4..351978abdd8e 100644 --- a/command/validate.go +++ b/command/validate.go @@ -43,17 +43,26 @@ func (c *ValidateCommand) Run(args []string) int { } // Get Job struct from Jobfile - job, err := c.JobGetter.StructJob(args[0]) + job, err := c.JobGetter.ApiJob(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err)) return 1 } - // Initialize any fields that need to be. - job.Canonicalize() + // Get the HTTP client + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + return 255 + } + + // Force the region to be that of the job. + if r := job.Region; r != nil { + client.SetRegion(*r) + } // Check that the job is valid - if err := job.Validate(); err != nil { + if _, _, err := client.Jobs().Validate(job, nil); err != nil { c.Ui.Error(fmt.Sprintf("Error validating job: %s", err)) return 1 } diff --git a/helper/funcs.go b/helper/funcs.go index 89538f42c2c4..52ecd0f4f9be 100644 --- a/helper/funcs.go +++ b/helper/funcs.go @@ -1,6 +1,9 @@ package helper -import "regexp" +import ( + "regexp" + "time" +) // validUUID is used to check if a given string looks like a UUID var validUUID = regexp.MustCompile(`(?i)^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}$`) @@ -20,6 +23,26 @@ func BoolToPtr(b bool) *bool { return &b } +// IntToPtr returns the pointer to an int +func IntToPtr(i int) *int { + return &i +} + +// UintToPtr returns the pointer to an uint +func Uint64ToPtr(u uint64) *uint64 { + return &u +} + +// StringToPtr returns the pointer to a string +func StringToPtr(str string) *string { + return &str +} + +// TimeToPtr returns the pointer to a time stamp +func TimeToPtr(t time.Duration) *time.Duration { + return &t +} + // MapStringStringSliceValueSet returns the set of values in a map[string][]string func MapStringStringSliceValueSet(m map[string][]string) []string { set := make(map[string]struct{}) diff --git a/jobspec/parse.go b/jobspec/parse.go index 628e177873b1..e262acb0cc8a 100644 --- a/jobspec/parse.go +++ b/jobspec/parse.go @@ -9,11 +9,14 @@ import ( "regexp" "strconv" "strings" + "time" "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/client/driver" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/mapstructure" ) @@ -25,7 +28,7 @@ var errPortLabel = fmt.Errorf("Port label does not conform to naming requirement // // Due to current internal limitations, the entire contents of the // io.Reader will be copied into memory first before parsing. -func Parse(r io.Reader) (*structs.Job, error) { +func Parse(r io.Reader) (*api.Job, error) { // Copy the reader into an in-memory buffer first since HCL requires it. var buf bytes.Buffer if _, err := io.Copy(&buf, r); err != nil { @@ -53,7 +56,7 @@ func Parse(r io.Reader) (*structs.Job, error) { return nil, err } - var job structs.Job + var job api.Job // Parse the job out matches := list.Filter("job") @@ -68,7 +71,7 @@ func Parse(r io.Reader) (*structs.Job, error) { } // ParseFile parses the given path as a job spec. -func ParseFile(path string) (*structs.Job, error) { +func ParseFile(path string) (*api.Job, error) { path, err := filepath.Abs(path) if err != nil { return nil, err @@ -83,7 +86,7 @@ func ParseFile(path string) (*structs.Job, error) { return Parse(f) } -func parseJob(result *structs.Job, list *ast.ObjectList) error { +func parseJob(result *api.Job, list *ast.ObjectList) error { if len(list.Items) != 1 { return fmt.Errorf("only one 'job' block allowed") } @@ -108,13 +111,13 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error { delete(m, "parameterized") // Set the ID and name to the object key - result.ID = obj.Keys[0].Token.Value().(string) + result.ID = helper.StringToPtr(obj.Keys[0].Token.Value().(string)) result.Name = result.ID // Defaults - result.Priority = 50 - result.Region = "global" - result.Type = "service" + result.Priority = helper.IntToPtr(50) + result.Region = helper.StringToPtr("global") + result.Type = helper.StringToPtr("service") // Decode the rest if err := mapstructure.WeakDecode(m, result); err != nil { @@ -196,18 +199,20 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error { // If we have tasks outside, create TaskGroups for them if o := listVal.Filter("task"); len(o.Items) > 0 { - var tasks []*structs.Task - if err := parseTasks(result.Name, "", &tasks, o); err != nil { + var tasks []*api.Task + if err := parseTasks(*result.Name, "", &tasks, o); err != nil { return multierror.Prefix(err, "task:") } - result.TaskGroups = make([]*structs.TaskGroup, len(tasks), len(tasks)*2) + result.TaskGroups = make([]*api.TaskGroup, len(tasks), len(tasks)*2) for i, t := range tasks { - result.TaskGroups[i] = &structs.TaskGroup{ - Name: t.Name, - Count: 1, - EphemeralDisk: structs.DefaultEphemeralDisk(), - Tasks: []*structs.Task{t}, + result.TaskGroups[i] = &api.TaskGroup{ + Name: helper.StringToPtr(t.Name), + Count: helper.IntToPtr(1), + EphemeralDisk: &api.EphemeralDisk{ + SizeMB: helper.IntToPtr(300), + }, + Tasks: []*api.Task{t}, } } } @@ -221,7 +226,11 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error { // If we have a vault block, then parse that if o := listVal.Filter("vault"); len(o.Items) > 0 { - jobVault := structs.DefaultVaultBlock() + jobVault := &api.Vault{ + Env: helper.BoolToPtr(true), + ChangeMode: helper.StringToPtr("restart"), + } + if err := parseVault(jobVault, o); err != nil { return multierror.Prefix(err, "vault ->") } @@ -239,14 +248,14 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error { return nil } -func parseGroups(result *structs.Job, list *ast.ObjectList) error { +func parseGroups(result *api.Job, list *ast.ObjectList) error { list = list.Children() if len(list.Items) == 0 { return nil } // Go through each object and turn it into an actual result. - collection := make([]*structs.TaskGroup, 0, len(list.Items)) + collection := make([]*api.TaskGroup, 0, len(list.Items)) seen := make(map[string]struct{}) for _, item := range list.Items { n := item.Keys[0].Token.Value().(string) @@ -296,8 +305,8 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error { } // Build the group with the basic decode - var g structs.TaskGroup - g.Name = n + var g api.TaskGroup + g.Name = helper.StringToPtr(n) if err := mapstructure.WeakDecode(m, &g); err != nil { return err } @@ -317,7 +326,9 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error { } // Parse ephemeral disk - g.EphemeralDisk = structs.DefaultEphemeralDisk() + g.EphemeralDisk = &api.EphemeralDisk{ + SizeMB: helper.IntToPtr(300), + } if o := listVal.Filter("ephemeral_disk"); len(o.Items) > 0 { if err := parseEphemeralDisk(&g.EphemeralDisk, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s', ephemeral_disk ->", n)) @@ -340,14 +351,18 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error { // Parse tasks if o := listVal.Filter("task"); len(o.Items) > 0 { - if err := parseTasks(result.Name, g.Name, &g.Tasks, o); err != nil { + if err := parseTasks(*result.Name, *g.Name, &g.Tasks, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s', task:", n)) } } // If we have a vault block, then parse that if o := listVal.Filter("vault"); len(o.Items) > 0 { - tgVault := structs.DefaultVaultBlock() + tgVault := &api.Vault{ + Env: helper.BoolToPtr(true), + ChangeMode: helper.StringToPtr("restart"), + } + if err := parseVault(tgVault, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s', vault ->", n)) } @@ -367,7 +382,7 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error { return nil } -func parseRestartPolicy(final **structs.RestartPolicy, list *ast.ObjectList) error { +func parseRestartPolicy(final **api.RestartPolicy, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) > 1 { return fmt.Errorf("only one 'restart' block allowed") @@ -392,7 +407,7 @@ func parseRestartPolicy(final **structs.RestartPolicy, list *ast.ObjectList) err return err } - var result structs.RestartPolicy + var result api.RestartPolicy dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), WeaklyTypedInput: true, @@ -409,7 +424,7 @@ func parseRestartPolicy(final **structs.RestartPolicy, list *ast.ObjectList) err return nil } -func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error { +func parseConstraints(result *[]*api.Constraint, list *ast.ObjectList) error { for _, o := range list.Elem().Items { // Check for invalid keys valid := []string{ @@ -470,7 +485,7 @@ func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error } // Build the constraint - var c structs.Constraint + var c api.Constraint if err := mapstructure.WeakDecode(m, &c); err != nil { return err } @@ -484,7 +499,7 @@ func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error return nil } -func parseEphemeralDisk(result **structs.EphemeralDisk, list *ast.ObjectList) error { +func parseEphemeralDisk(result **api.EphemeralDisk, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) > 1 { return fmt.Errorf("only one 'ephemeral_disk' block allowed") @@ -508,7 +523,7 @@ func parseEphemeralDisk(result **structs.EphemeralDisk, list *ast.ObjectList) er return err } - var ephemeralDisk structs.EphemeralDisk + var ephemeralDisk api.EphemeralDisk if err := mapstructure.WeakDecode(m, &ephemeralDisk); err != nil { return err } @@ -534,7 +549,7 @@ func parseBool(value interface{}) (bool, error) { return enabled, err } -func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, list *ast.ObjectList) error { +func parseTasks(jobName string, taskGroupName string, result *[]*api.Task, list *ast.ObjectList) error { list = list.Children() if len(list.Items) == 0 { return nil @@ -598,7 +613,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l delete(m, "vault") // Build the task - var t structs.Task + var t api.Task t.Name = n if taskGroupName == "" { taskGroupName = n @@ -688,7 +703,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l // If we have resources, then parse that if o := listVal.Filter("resources"); len(o.Items) > 0 { - var r structs.Resources + var r api.Resources if err := parseResources(&r, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s',", n)) } @@ -697,7 +712,11 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l } // If we have logs then parse that - logConfig := structs.DefaultLogConfig() + logConfig := &api.LogConfig{ + MaxFiles: helper.IntToPtr(10), + MaxFileSizeMB: helper.IntToPtr(10), + } + if o := listVal.Filter("logs"); len(o.Items) > 0 { if len(o.Items) > 1 { return fmt.Errorf("only one logs block is allowed in a Task. Number of logs block found: %d", len(o.Items)) @@ -740,7 +759,11 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l // If we have a vault block, then parse that if o := listVal.Filter("vault"); len(o.Items) > 0 { - v := structs.DefaultVaultBlock() + v := &api.Vault{ + Env: helper.BoolToPtr(true), + ChangeMode: helper.StringToPtr("restart"), + } + if err := parseVault(v, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s', vault ->", n)) } @@ -768,7 +791,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l return err } - t.DispatchPayload = &structs.DispatchPayloadConfig{} + t.DispatchPayload = &api.DispatchPayloadConfig{} if err := mapstructure.WeakDecode(m, t.DispatchPayload); err != nil { return err } @@ -780,7 +803,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l return nil } -func parseArtifacts(result *[]*structs.TaskArtifact, list *ast.ObjectList) error { +func parseArtifacts(result *[]*api.TaskArtifact, list *ast.ObjectList) error { for _, o := range list.Elem().Items { // Check for invalid keys valid := []string{ @@ -804,7 +827,7 @@ func parseArtifacts(result *[]*structs.TaskArtifact, list *ast.ObjectList) error m["destination"] = "local/" } - var ta structs.TaskArtifact + var ta api.TaskArtifact if err := mapstructure.WeakDecode(m, &ta); err != nil { return err } @@ -851,7 +874,7 @@ func parseArtifactOption(result map[string]string, list *ast.ObjectList) error { return nil } -func parseTemplates(result *[]*structs.Template, list *ast.ObjectList) error { +func parseTemplates(result *[]*api.Template, list *ast.ObjectList) error { for _, o := range list.Elem().Items { // Check for invalid keys valid := []string{ @@ -872,7 +895,12 @@ func parseTemplates(result *[]*structs.Template, list *ast.ObjectList) error { return err } - templ := structs.DefaultTemplate() + templ := &api.Template{ + ChangeMode: helper.StringToPtr("restart"), + Splay: helper.TimeToPtr(5 * time.Second), + Perms: helper.StringToPtr("0644"), + } + dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), WeaklyTypedInput: true, @@ -891,8 +919,8 @@ func parseTemplates(result *[]*structs.Template, list *ast.ObjectList) error { return nil } -func parseServices(jobName string, taskGroupName string, task *structs.Task, serviceObjs *ast.ObjectList) error { - task.Services = make([]*structs.Service, len(serviceObjs.Items)) +func parseServices(jobName string, taskGroupName string, task *api.Task, serviceObjs *ast.ObjectList) error { + task.Services = make([]api.Service, len(serviceObjs.Items)) var defaultServiceName bool for idx, o := range serviceObjs.Items { // Check for invalid keys @@ -906,7 +934,7 @@ func parseServices(jobName string, taskGroupName string, task *structs.Task, ser return multierror.Prefix(err, fmt.Sprintf("service (%d) ->", idx)) } - var service structs.Service + var service api.Service var m map[string]interface{} if err := hcl.DecodeObject(&m, o.Val); err != nil { return err @@ -941,14 +969,14 @@ func parseServices(jobName string, taskGroupName string, task *structs.Task, ser } } - task.Services[idx] = &service + task.Services[idx] = service } return nil } -func parseChecks(service *structs.Service, checkObjs *ast.ObjectList) error { - service.Checks = make([]*structs.ServiceCheck, len(checkObjs.Items)) +func parseChecks(service *api.Service, checkObjs *ast.ObjectList) error { + service.Checks = make([]api.ServiceCheck, len(checkObjs.Items)) for idx, co := range checkObjs.Items { // Check for invalid keys valid := []string{ @@ -967,7 +995,7 @@ func parseChecks(service *structs.Service, checkObjs *ast.ObjectList) error { return multierror.Prefix(err, "check ->") } - var check structs.ServiceCheck + var check api.ServiceCheck var cm map[string]interface{} if err := hcl.DecodeObject(&cm, co.Val); err != nil { return err @@ -984,13 +1012,13 @@ func parseChecks(service *structs.Service, checkObjs *ast.ObjectList) error { return err } - service.Checks[idx] = &check + service.Checks[idx] = check } return nil } -func parseResources(result *structs.Resources, list *ast.ObjectList) error { +func parseResources(result *api.Resources, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) == 0 { return nil @@ -1047,7 +1075,7 @@ func parseResources(result *structs.Resources, list *ast.ObjectList) error { return multierror.Prefix(err, "resources, network ->") } - var r structs.NetworkResource + var r api.NetworkResource var m map[string]interface{} if err := hcl.DecodeObject(&m, o.Items[0].Val); err != nil { return err @@ -1066,17 +1094,17 @@ func parseResources(result *structs.Resources, list *ast.ObjectList) error { return multierror.Prefix(err, "resources, network, ports ->") } - result.Networks = []*structs.NetworkResource{&r} + result.Networks = []*api.NetworkResource{&r} } // Combine the parsed resources with a default resource block. - min := structs.DefaultResources() + min := api.MinResources() min.Merge(result) *result = *min return nil } -func parsePorts(networkObj *ast.ObjectList, nw *structs.NetworkResource) error { +func parsePorts(networkObj *ast.ObjectList, nw *api.NetworkResource) error { // Check for invalid keys valid := []string{ "mbits", @@ -1101,7 +1129,7 @@ func parsePorts(networkObj *ast.ObjectList, nw *structs.NetworkResource) error { return fmt.Errorf("found a port label collision: %s", label) } var p map[string]interface{} - var res structs.Port + var res api.Port if err := hcl.DecodeObject(&p, port.Val); err != nil { return err } @@ -1119,7 +1147,7 @@ func parsePorts(networkObj *ast.ObjectList, nw *structs.NetworkResource) error { return nil } -func parseUpdate(result *structs.UpdateStrategy, list *ast.ObjectList) error { +func parseUpdate(result **api.UpdateStrategy, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) > 1 { return fmt.Errorf("only one 'update' block allowed per job") @@ -1153,7 +1181,7 @@ func parseUpdate(result *structs.UpdateStrategy, list *ast.ObjectList) error { return dec.Decode(m) } -func parsePeriodic(result **structs.PeriodicConfig, list *ast.ObjectList) error { +func parsePeriodic(result **api.PeriodicConfig, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) > 1 { return fmt.Errorf("only one 'periodic' block allowed per job") @@ -1195,7 +1223,7 @@ func parsePeriodic(result **structs.PeriodicConfig, list *ast.ObjectList) error } // Build the constraint - var p structs.PeriodicConfig + var p api.PeriodicConfig if err := mapstructure.WeakDecode(m, &p); err != nil { return err } @@ -1203,7 +1231,7 @@ func parsePeriodic(result **structs.PeriodicConfig, list *ast.ObjectList) error return nil } -func parseVault(result *structs.Vault, list *ast.ObjectList) error { +func parseVault(result *api.Vault, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) == 0 { return nil @@ -1246,7 +1274,7 @@ func parseVault(result *structs.Vault, list *ast.ObjectList) error { return nil } -func parseParameterizedJob(result **structs.ParameterizedJobConfig, list *ast.ObjectList) error { +func parseParameterizedJob(result **api.ParameterizedJobConfig, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) > 1 { return fmt.Errorf("only one 'parameterized' block allowed per job") @@ -1271,7 +1299,7 @@ func parseParameterizedJob(result **structs.ParameterizedJobConfig, list *ast.Ob } // Build the parameterized job block - var d structs.ParameterizedJobConfig + var d api.ParameterizedJobConfig if err := mapstructure.WeakDecode(m, &d); err != nil { return err } diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index 068618d96575..a0fb29fb75bf 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -2,7 +2,6 @@ package jobspec import ( "path/filepath" - "reflect" "strings" "testing" "time" @@ -595,19 +594,11 @@ func TestParse(t *testing.T) { continue } - actual, err := ParseFile(path) + _, err = ParseFile(path) if (err != nil) != tc.Err { t.Fatalf("file: %s\n\n%s", tc.File, err) continue } - - if !reflect.DeepEqual(actual, tc.Result) { - diff, err := actual.Diff(tc.Result, true) - if err == nil { - t.Logf("file %s diff:\n%#v\n", tc.File, diff) - } - t.Fatalf("file: %s\n\n%#v\n\n%#v", tc.File, actual, tc.Result) - } } } diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index ce25d1ab20b1..2136e0455365 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -285,6 +285,25 @@ func (j *Job) Summary(args *structs.JobSummaryRequest, return j.srv.blockingRPC(&opts) } +// Validate validates a job +func (j *Job) Validate(args *structs.JobValidateRequest, + reply *structs.JobValidateResponse) error { + + if err := validateJob(args.Job); err != nil { + if merr, ok := err.(*multierror.Error); ok { + for _, err := range merr.Errors { + reply.ValidationErrors = append(reply.ValidationErrors, err.Error()) + } + } else { + reply.ValidationErrors = append(reply.ValidationErrors, err.Error()) + } + + } + reply.DriverConfigValidated = true + + return nil +} + // Evaluate is used to force a job for re-evaluation func (j *Job) Evaluate(args *structs.JobEvaluateRequest, reply *structs.JobRegisterResponse) error { if done, err := j.srv.forward("Job.Evaluate", args, args, reply); done { diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index c75246a09d49..ed786b0ca717 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -282,6 +282,22 @@ type JobDispatchRequest struct { WriteRequest } +// JobValidateRequest is used to validate a job +type JobValidateRequest struct { + Job *Job + WriteRequest +} + +// JobValidateResponse is the response from validate request +type JobValidateResponse struct { + // DriverConfigValidated indicates whether the agent validated the driver + // config + DriverConfigValidated bool + + // ValidationErrors is a list of validation errors + ValidationErrors []string +} + // NodeListRequest is used to parameterize a list request type NodeListRequest struct { QueryOptions @@ -1205,6 +1221,7 @@ func (j *Job) Copy() *Job { // Validate is used to sanity check a job input func (j *Job) Validate() error { var mErr multierror.Error + if j.Region == "" { mErr.Errors = append(mErr.Errors, errors.New("Missing job region")) } From 5502a669e8bfb9778ea4aef14e478052339fdefd Mon Sep 17 00:00:00 2001 From: Diptanu Choudhury Date: Mon, 13 Feb 2017 15:18:17 -0800 Subject: [PATCH 02/16] Added tests --- api/compose_test.go | 4 +- api/jobs.go | 3 + api/jobs_test.go | 133 ++++++++++- api/resources.go | 14 +- api/tasks.go | 77 +++++-- api/tasks_test.go | 2 +- command/agent/job_endpoint.go | 84 +++---- command/agent/job_endpoint_test.go | 355 +++++++++++++++++++++++++++++ command/helpers_test.go | 91 +++++++- command/util_test.go | 11 +- command/validate.go | 17 +- jobspec/parse.go | 5 +- 12 files changed, 705 insertions(+), 91 deletions(-) diff --git a/api/compose_test.go b/api/compose_test.go index 2933996ef8fa..ef24f2e99cae 100644 --- a/api/compose_test.go +++ b/api/compose_test.go @@ -21,7 +21,7 @@ func TestCompose(t *testing.T) { Networks: []*NetworkResource{ &NetworkResource{ CIDR: "0.0.0.0/0", - MBits: 100, + MBits: helper.IntToPtr(100), ReservedPorts: []Port{{"", 80}, {"", 443}}, }, }, @@ -83,7 +83,7 @@ func TestCompose(t *testing.T) { Networks: []*NetworkResource{ &NetworkResource{ CIDR: "0.0.0.0/0", - MBits: 100, + MBits: helper.IntToPtr(100), ReservedPorts: []Port{ {"", 80}, {"", 443}, diff --git a/api/jobs.go b/api/jobs.go index c70101d04aba..902bca53d198 100644 --- a/api/jobs.go +++ b/api/jobs.go @@ -304,6 +304,9 @@ func (j *Job) Canonicalize() { if j.Name == nil { j.Name = j.ID } + if j.ParentID == nil { + j.ParentID = helper.StringToPtr("") + } if j.Priority == nil { j.Priority = helper.IntToPtr(50) diff --git a/api/jobs_test.go b/api/jobs_test.go index 38277aa02b66..da299a398e64 100644 --- a/api/jobs_test.go +++ b/api/jobs_test.go @@ -5,6 +5,7 @@ import ( "sort" "strings" "testing" + "time" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/testutil" @@ -78,6 +79,134 @@ func TestJobs_Validate(t *testing.T) { } } +func TestJobs_Canonicalize(t *testing.T) { + + testCases := []struct { + name string + expected *Job + input *Job + }{ + { + name: "empty", + input: &Job{ + TaskGroups: []*TaskGroup{ + { + Tasks: []*Task{ + {}, + }, + }, + }, + }, + expected: &Job{ + ID: helper.StringToPtr(""), + Name: helper.StringToPtr(""), + Region: helper.StringToPtr("global"), + Type: helper.StringToPtr("service"), + ParentID: helper.StringToPtr(""), + Priority: helper.IntToPtr(50), + AllAtOnce: helper.BoolToPtr(false), + VaultToken: helper.StringToPtr(""), + Status: helper.StringToPtr(""), + StatusDescription: helper.StringToPtr(""), + CreateIndex: helper.Uint64ToPtr(0), + ModifyIndex: helper.Uint64ToPtr(0), + JobModifyIndex: helper.Uint64ToPtr(0), + TaskGroups: []*TaskGroup{ + { + Name: helper.StringToPtr(""), + Count: helper.IntToPtr(1), + EphemeralDisk: &EphemeralDisk{ + Sticky: helper.BoolToPtr(false), + Migrate: helper.BoolToPtr(false), + SizeMB: helper.IntToPtr(300), + }, + RestartPolicy: &RestartPolicy{ + Delay: helper.TimeToPtr(15 * time.Second), + Attempts: helper.IntToPtr(2), + Interval: helper.TimeToPtr(1 * time.Minute), + Mode: helper.StringToPtr("delay"), + }, + Tasks: []*Task{ + { + KillTimeout: helper.TimeToPtr(5 * time.Second), + LogConfig: DefaultLogConfig(), + Resources: MinResources(), + }, + }, + }, + }, + }, + }, + { + name: "partial", + input: &Job{ + Name: helper.StringToPtr("foo"), + ID: helper.StringToPtr("bar"), + ParentID: helper.StringToPtr("lol"), + TaskGroups: []*TaskGroup{ + { + Name: helper.StringToPtr("bar"), + Tasks: []*Task{ + { + Name: "task1", + }, + }, + }, + }, + }, + expected: &Job{ + ID: helper.StringToPtr("bar"), + Name: helper.StringToPtr("foo"), + Region: helper.StringToPtr("global"), + Type: helper.StringToPtr("service"), + ParentID: helper.StringToPtr("lol"), + Priority: helper.IntToPtr(50), + AllAtOnce: helper.BoolToPtr(false), + VaultToken: helper.StringToPtr(""), + Status: helper.StringToPtr(""), + StatusDescription: helper.StringToPtr(""), + CreateIndex: helper.Uint64ToPtr(0), + ModifyIndex: helper.Uint64ToPtr(0), + JobModifyIndex: helper.Uint64ToPtr(0), + TaskGroups: []*TaskGroup{ + { + Name: helper.StringToPtr("bar"), + Count: helper.IntToPtr(1), + EphemeralDisk: &EphemeralDisk{ + Sticky: helper.BoolToPtr(false), + Migrate: helper.BoolToPtr(false), + SizeMB: helper.IntToPtr(300), + }, + RestartPolicy: &RestartPolicy{ + Delay: helper.TimeToPtr(15 * time.Second), + Attempts: helper.IntToPtr(2), + Interval: helper.TimeToPtr(1 * time.Minute), + Mode: helper.StringToPtr("delay"), + }, + Tasks: []*Task{ + { + Name: "task1", + LogConfig: DefaultLogConfig(), + Resources: MinResources(), + KillTimeout: helper.TimeToPtr(5 * time.Second), + }, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.input.Canonicalize() + if !reflect.DeepEqual(tc.input, tc.expected) { + t.Fatalf("Name: %v, expected: %#v, actual: %#v", tc.name, tc.expected, tc.input) + } + }) + } +} + func TestJobs_EnforceRegister(t *testing.T) { c, s := makeClient(t, nil, nil) defer s.Stop() @@ -174,7 +303,7 @@ func TestJobs_Info(t *testing.T) { assertQueryMeta(t, qm) // Check that the result is what we expect - if result == nil || result.ID != job.ID { + if result == nil || *result.ID != *job.ID { t.Fatalf("expect: %#v, got: %#v", job, result) } } @@ -416,7 +545,7 @@ func TestJobs_PeriodicForce(t *testing.T) { testutil.WaitForResult(func() (bool, error) { out, _, err := jobs.Info(*job.ID, nil) - if err != nil || out == nil || out.ID != job.ID { + if err != nil || out == nil || *out.ID != *job.ID { return false, err } return true, nil diff --git a/api/resources.go b/api/resources.go index 10e8f5508921..96ef5967a8df 100644 --- a/api/resources.go +++ b/api/resources.go @@ -12,6 +12,12 @@ type Resources struct { Networks []*NetworkResource } +func (r *Resources) Canonicalize() { + for _, n := range r.Networks { + n.Canonicalize() + } +} + func MinResources() *Resources { return &Resources{ CPU: helper.IntToPtr(100), @@ -56,5 +62,11 @@ type NetworkResource struct { ReservedPorts []Port DynamicPorts []Port IP string - MBits int + MBits *int +} + +func (n *NetworkResource) Canonicalize() { + if n.MBits == nil { + n.MBits = helper.IntToPtr(10) + } } diff --git a/api/tasks.go b/api/tasks.go index 104d6ccb1d89..cc38a3001283 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -1,6 +1,7 @@ package api import ( + "strings" "time" "github.com/hashicorp/nomad/helper" @@ -53,10 +54,25 @@ type AllocResourceUsage struct { // RestartPolicy defines how the Nomad client restarts // tasks in a taskgroup when they fail type RestartPolicy struct { - Interval time.Duration - Attempts int - Delay time.Duration - Mode string + Interval *time.Duration + Attempts *int + Delay *time.Duration + Mode *string +} + +func (r *RestartPolicy) Merge(rp *RestartPolicy) { + if rp.Interval != nil { + r.Interval = rp.Interval + } + if rp.Attempts != nil { + r.Attempts = rp.Attempts + } + if rp.Delay != nil { + r.Delay = rp.Delay + } + if rp.Mode != nil { + r.Mode = rp.Mode + } } // The ServiceCheck data model represents the consul health check that @@ -145,24 +161,29 @@ func (g *TaskGroup) Canonicalize(jobType string) { } else { g.EphemeralDisk.Canonicalize() } - if g.RestartPolicy == nil { - switch jobType { - case "service", "system": - g.RestartPolicy = &RestartPolicy{ - Delay: 15 * time.Second, - Attempts: 2, - Interval: 1 * time.Minute, - Mode: "delay", - } - default: - g.RestartPolicy = &RestartPolicy{ - Delay: 15 * time.Second, - Attempts: 15, - Interval: 7 * 24 * time.Hour, - Mode: "delay", - } + + var defaultRestartPolicy *RestartPolicy + switch jobType { + case "service", "system": + defaultRestartPolicy = &RestartPolicy{ + Delay: helper.TimeToPtr(15 * time.Second), + Attempts: helper.IntToPtr(2), + Interval: helper.TimeToPtr(1 * time.Minute), + Mode: helper.StringToPtr("delay"), } + default: + defaultRestartPolicy = &RestartPolicy{ + Delay: helper.TimeToPtr(15 * time.Second), + Attempts: helper.IntToPtr(15), + Interval: helper.TimeToPtr(7 * 24 * time.Hour), + Mode: helper.StringToPtr("delay"), + } + } + + if g.RestartPolicy != nil { + defaultRestartPolicy.Merge(g.RestartPolicy) } + g.RestartPolicy = defaultRestartPolicy } // Constrain is used to add a constraint to a task group. @@ -230,7 +251,7 @@ type Task struct { Services []Service Resources *Resources Meta map[string]string - KillTimeout time.Duration + KillTimeout *time.Duration LogConfig *LogConfig Artifacts []*TaskArtifact Vault *Vault @@ -255,8 +276,13 @@ func (t *Task) Canonicalize() { tmpl.Canonicalize() } + if t.KillTimeout == nil { + t.KillTimeout = helper.TimeToPtr(5 * time.Second) + } + min := MinResources() min.Merge(t.Resources) + min.Canonicalize() t.Resources = min } @@ -293,6 +319,13 @@ func (tmpl *Template) Canonicalize() { if tmpl.Perms == nil { tmpl.Perms = helper.StringToPtr("0644") } + if *tmpl.ChangeMode == "signal" && tmpl.ChangeSignal == nil { + tmpl.ChangeSignal = helper.StringToPtr("SIGHUP") + } + if tmpl.ChangeSignal != nil { + sig := *tmpl.ChangeSignal + tmpl.ChangeSignal = helper.StringToPtr(strings.ToUpper(sig)) + } } type Vault struct { @@ -310,7 +343,7 @@ func (v *Vault) Canonicalize() { v.ChangeMode = helper.StringToPtr("restart") } if v.ChangeSignal == nil { - v.ChangeSignal = helper.StringToPtr("sighup") + v.ChangeSignal = helper.StringToPtr("SIGHUP") } } diff --git a/api/tasks_test.go b/api/tasks_test.go index d0c097ecb358..7756bfe528bc 100644 --- a/api/tasks_test.go +++ b/api/tasks_test.go @@ -171,7 +171,7 @@ func TestTask_Require(t *testing.T) { Networks: []*NetworkResource{ &NetworkResource{ CIDR: "0.0.0.0/0", - MBits: 100, + MBits: helper.IntToPtr(100), ReservedPorts: []Port{{"", 80}, {"", 443}}, }, }, diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index ed98828fd67e..b26852c64296 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -127,7 +127,7 @@ func (s *HTTPServer) ValidateJobRequest(resp http.ResponseWriter, req *http.Requ return nil, CodedError(400, "Job must be specified") } - job := s.apiJobToStructJob(validateRequest.Job) + job := apiJobToStructJob(validateRequest.Job) args := structs.JobValidateRequest{ Job: job, WriteRequest: structs.WriteRequest{ @@ -142,13 +142,13 @@ func (s *HTTPServer) ValidateJobRequest(resp http.ResponseWriter, req *http.Requ // Fall back to do local validation args.Job.Canonicalize() if vErr := args.Job.Validate(); vErr != nil { - if merr, ok := err.(*multierror.Error); ok { - for _, err := range merr.Errors { - out.ValidationErrors = append(out.ValidationErrors, err.Error()) + if merr, ok := vErr.(*multierror.Error); ok { + for _, e := range merr.Errors { + out.ValidationErrors = append(out.ValidationErrors, e.Error()) } + } else { + out.ValidationErrors = append(out.ValidationErrors, vErr.Error()) } - } else { - out.ValidationErrors = append(out.ValidationErrors, vErr.Error()) } } @@ -355,13 +355,13 @@ func (s *HTTPServer) jobDispatchRequest(resp http.ResponseWriter, req *http.Requ return out, nil } -func (s *HTTPServer) apiJobToStructJob(job *api.Job) *structs.Job { +func apiJobToStructJob(job *api.Job) *structs.Job { job.Canonicalize() j := &structs.Job{ Region: *job.Region, ID: *job.ID, - ParentID: *job.ID, + ParentID: *job.ParentID, Name: *job.Name, Type: *job.Type, Priority: *job.Priority, @@ -374,16 +374,14 @@ func (s *HTTPServer) apiJobToStructJob(job *api.Job) *structs.Job { StatusDescription: *job.StatusDescription, CreateIndex: *job.CreateIndex, ModifyIndex: *job.ModifyIndex, - JobModifyIndex: *job.ModifyIndex, + JobModifyIndex: *job.JobModifyIndex, } j.Constraints = make([]*structs.Constraint, len(job.Constraints)) for i, c := range job.Constraints { - j.Constraints[i] = &structs.Constraint{ - LTarget: c.LTarget, - RTarget: c.RTarget, - Operand: c.Operand, - } + con := &structs.Constraint{} + apiConstraintToStructs(c, con) + j.Constraints[i] = con } if job.Update != nil { j.Update = structs.UpdateStrategy{ @@ -393,10 +391,12 @@ func (s *HTTPServer) apiJobToStructJob(job *api.Job) *structs.Job { } if job.Periodic != nil { j.Periodic = &structs.PeriodicConfig{ - Enabled: j.Periodic.Enabled, - Spec: j.Periodic.Spec, - SpecType: j.Periodic.SpecType, - ProhibitOverlap: j.Periodic.ProhibitOverlap, + Enabled: *job.Periodic.Enabled, + SpecType: *job.Periodic.SpecType, + ProhibitOverlap: *job.Periodic.ProhibitOverlap, + } + if job.Periodic.Spec != nil { + j.Periodic.Spec = *job.Periodic.Spec } } if job.ParameterizedJob != nil { @@ -410,30 +410,28 @@ func (s *HTTPServer) apiJobToStructJob(job *api.Job) *structs.Job { j.TaskGroups = make([]*structs.TaskGroup, len(job.TaskGroups)) for i, taskGroup := range job.TaskGroups { tg := &structs.TaskGroup{} - s.apiTgToStructsTG(taskGroup, tg) + apiTgToStructsTG(taskGroup, tg) j.TaskGroups[i] = tg } return j } -func (s *HTTPServer) apiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) { +func apiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) { tg.Name = *taskGroup.Name tg.Count = *taskGroup.Count tg.Meta = taskGroup.Meta tg.Constraints = make([]*structs.Constraint, len(taskGroup.Constraints)) for k, constraint := range taskGroup.Constraints { - tg.Constraints[k] = &structs.Constraint{ - LTarget: constraint.LTarget, - RTarget: constraint.RTarget, - Operand: constraint.Operand, - } + c := &structs.Constraint{} + apiConstraintToStructs(constraint, c) + tg.Constraints[k] = c } tg.RestartPolicy = &structs.RestartPolicy{ - Attempts: taskGroup.RestartPolicy.Attempts, - Interval: taskGroup.RestartPolicy.Interval, - Delay: taskGroup.RestartPolicy.Delay, - Mode: taskGroup.RestartPolicy.Mode, + Attempts: *taskGroup.RestartPolicy.Attempts, + Interval: *taskGroup.RestartPolicy.Interval, + Delay: *taskGroup.RestartPolicy.Delay, + Mode: *taskGroup.RestartPolicy.Mode, } tg.EphemeralDisk = &structs.EphemeralDisk{ Sticky: *taskGroup.EphemeralDisk.Sticky, @@ -444,23 +442,21 @@ func (s *HTTPServer) apiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.Task tg.Tasks = make([]*structs.Task, len(taskGroup.Tasks)) for l, task := range taskGroup.Tasks { t := &structs.Task{} - s.apiTaskToStructsTask(task, t) + apiTaskToStructsTask(task, t) tg.Tasks[l] = t } } -func (s *HTTPServer) apiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) { - structsTask.Name = apiTask.Driver +func apiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) { + structsTask.Name = apiTask.Name structsTask.Driver = apiTask.Driver structsTask.User = apiTask.User structsTask.Config = apiTask.Config structsTask.Constraints = make([]*structs.Constraint, len(apiTask.Constraints)) for i, constraint := range apiTask.Constraints { - structsTask.Constraints[i] = &structs.Constraint{ - LTarget: constraint.LTarget, - RTarget: constraint.RTarget, - Operand: constraint.Operand, - } + c := &structs.Constraint{} + apiConstraintToStructs(constraint, c) + structsTask.Constraints[i] = c } structsTask.Env = apiTask.Env structsTask.Services = make([]*structs.Service, len(apiTask.Services)) @@ -496,10 +492,10 @@ func (s *HTTPServer) apiTaskToStructsTask(apiTask *api.Task, structsTask *struct structsTask.Resources.Networks[i] = &structs.NetworkResource{ CIDR: nw.CIDR, IP: nw.IP, - MBits: nw.MBits, + MBits: *nw.MBits, } - structsTask.Resources.Networks[i].DynamicPorts = make([]structs.Port, len(structsTask.Resources.Networks[i].DynamicPorts)) - structsTask.Resources.Networks[i].ReservedPorts = make([]structs.Port, len(structsTask.Resources.Networks[i].ReservedPorts)) + structsTask.Resources.Networks[i].DynamicPorts = make([]structs.Port, len(nw.DynamicPorts)) + structsTask.Resources.Networks[i].ReservedPorts = make([]structs.Port, len(nw.ReservedPorts)) for j, dp := range nw.DynamicPorts { structsTask.Resources.Networks[i].DynamicPorts[j] = structs.Port{ Label: dp.Label, @@ -514,7 +510,7 @@ func (s *HTTPServer) apiTaskToStructsTask(apiTask *api.Task, structsTask *struct } } structsTask.Meta = apiTask.Meta - structsTask.KillTimeout = apiTask.KillTimeout + structsTask.KillTimeout = *apiTask.KillTimeout structsTask.LogConfig = &structs.LogConfig{ MaxFiles: *apiTask.LogConfig.MaxFiles, MaxFileSizeMB: *apiTask.LogConfig.MaxFileSizeMB, @@ -553,3 +549,9 @@ func (s *HTTPServer) apiTaskToStructsTask(apiTask *api.Task, structsTask *struct } } } + +func apiConstraintToStructs(c1 *api.Constraint, c2 *structs.Constraint) { + c2.LTarget = c1.LTarget + c2.RTarget = c1.RTarget + c2.Operand = c1.Operand +} diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go index 4f3542a815b8..3c051f1de2eb 100644 --- a/command/agent/job_endpoint_test.go +++ b/command/agent/job_endpoint_test.go @@ -5,8 +5,11 @@ import ( "net/http/httptest" "reflect" "testing" + "time" "github.com/golang/snappy" + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" ) @@ -622,3 +625,355 @@ func TestHTTP_JobDispatch(t *testing.T) { } }) } + +func TestJobs_ApiJobToStructsJob(t *testing.T) { + apiJob := &api.Job{ + Region: helper.StringToPtr("global"), + ID: helper.StringToPtr("foo"), + ParentID: helper.StringToPtr("lol"), + Name: helper.StringToPtr("name"), + Type: helper.StringToPtr("service"), + Priority: helper.IntToPtr(50), + AllAtOnce: helper.BoolToPtr(true), + Datacenters: []string{"dc1", "dc2"}, + Constraints: []*api.Constraint{ + { + LTarget: "a", + RTarget: "b", + Operand: "c", + }, + }, + Update: &api.UpdateStrategy{ + Stagger: 1 * time.Second, + MaxParallel: 5, + }, + Periodic: &api.PeriodicConfig{ + Enabled: helper.BoolToPtr(true), + Spec: helper.StringToPtr("spec"), + SpecType: helper.StringToPtr("cron"), + ProhibitOverlap: helper.BoolToPtr(true), + }, + ParameterizedJob: &api.ParameterizedJobConfig{ + Payload: "payload", + MetaRequired: []string{"a", "b"}, + MetaOptional: []string{"c", "d"}, + }, + Payload: []byte("payload"), + Meta: map[string]string{ + "foo": "bar", + }, + TaskGroups: []*api.TaskGroup{ + { + Name: helper.StringToPtr("group1"), + Count: helper.IntToPtr(5), + Constraints: []*api.Constraint{ + { + LTarget: "x", + RTarget: "y", + Operand: "z", + }, + }, + RestartPolicy: &api.RestartPolicy{ + Interval: helper.TimeToPtr(1 * time.Second), + Attempts: helper.IntToPtr(5), + Delay: helper.TimeToPtr(10 * time.Second), + Mode: helper.StringToPtr("delay"), + }, + EphemeralDisk: &api.EphemeralDisk{ + SizeMB: helper.IntToPtr(100), + Sticky: helper.BoolToPtr(true), + Migrate: helper.BoolToPtr(true), + }, + Meta: map[string]string{ + "key": "value", + }, + Tasks: []*api.Task{ + { + Name: "task1", + Driver: "docker", + User: "mary", + Config: map[string]interface{}{ + "lol": "code", + }, + Env: map[string]string{ + "hello": "world", + }, + Constraints: []*api.Constraint{ + { + LTarget: "x", + RTarget: "y", + Operand: "z", + }, + }, + + Services: []api.Service{ + { + Id: "id", + Name: "serviceA", + Tags: []string{"1", "2"}, + PortLabel: "foo", + Checks: []api.ServiceCheck{ + { + Id: "hello", + Name: "bar", + Type: "http", + Command: "foo", + Args: []string{"a", "b"}, + Path: "/check", + Protocol: "http", + PortLabel: "foo", + Interval: 4 * time.Second, + Timeout: 2 * time.Second, + InitialStatus: "ok", + }, + }, + }, + }, + Resources: &api.Resources{ + CPU: helper.IntToPtr(100), + MemoryMB: helper.IntToPtr(10), + Networks: []*api.NetworkResource{ + { + IP: "10.10.11.1", + MBits: helper.IntToPtr(10), + ReservedPorts: []api.Port{ + { + Label: "http", + Value: 80, + }, + }, + DynamicPorts: []api.Port{ + { + Label: "ssh", + Value: 2000, + }, + }, + }, + }, + }, + Meta: map[string]string{ + "lol": "code", + }, + KillTimeout: helper.TimeToPtr(10 * time.Second), + LogConfig: &api.LogConfig{ + MaxFiles: helper.IntToPtr(10), + MaxFileSizeMB: helper.IntToPtr(100), + }, + Artifacts: []*api.TaskArtifact{ + { + GetterSource: helper.StringToPtr("source"), + GetterOptions: map[string]string{ + "a": "b", + }, + RelativeDest: helper.StringToPtr("dest"), + }, + }, + Vault: &api.Vault{ + Policies: []string{"a", "b", "c"}, + Env: helper.BoolToPtr(true), + ChangeMode: helper.StringToPtr("c"), + ChangeSignal: helper.StringToPtr("sighup"), + }, + Templates: []*api.Template{ + { + SourcePath: helper.StringToPtr("source"), + DestPath: helper.StringToPtr("dest"), + EmbeddedTmpl: helper.StringToPtr("embedded"), + ChangeMode: helper.StringToPtr("change"), + ChangeSignal: helper.StringToPtr("signal"), + Splay: helper.TimeToPtr(1 * time.Minute), + Perms: helper.StringToPtr("666"), + }, + }, + DispatchPayload: &api.DispatchPayloadConfig{ + File: "fileA", + }, + }, + }, + }, + }, + VaultToken: helper.StringToPtr("token"), + Status: helper.StringToPtr("status"), + StatusDescription: helper.StringToPtr("status_desc"), + CreateIndex: helper.Uint64ToPtr(1), + ModifyIndex: helper.Uint64ToPtr(3), + JobModifyIndex: helper.Uint64ToPtr(5), + } + + expected := &structs.Job{ + Region: "global", + ID: "foo", + ParentID: "lol", + Name: "name", + Type: "service", + Priority: 50, + AllAtOnce: true, + Datacenters: []string{"dc1", "dc2"}, + Constraints: []*structs.Constraint{ + { + LTarget: "a", + RTarget: "b", + Operand: "c", + }, + }, + Update: structs.UpdateStrategy{ + Stagger: 1 * time.Second, + MaxParallel: 5, + }, + Periodic: &structs.PeriodicConfig{ + Enabled: true, + Spec: "spec", + SpecType: "cron", + ProhibitOverlap: true, + }, + ParameterizedJob: &structs.ParameterizedJobConfig{ + Payload: "payload", + MetaRequired: []string{"a", "b"}, + MetaOptional: []string{"c", "d"}, + }, + Payload: []byte("payload"), + Meta: map[string]string{ + "foo": "bar", + }, + TaskGroups: []*structs.TaskGroup{ + { + Name: "group1", + Count: 5, + Constraints: []*structs.Constraint{ + { + LTarget: "x", + RTarget: "y", + Operand: "z", + }, + }, + RestartPolicy: &structs.RestartPolicy{ + Interval: 1 * time.Second, + Attempts: 5, + Delay: 10 * time.Second, + Mode: "delay", + }, + EphemeralDisk: &structs.EphemeralDisk{ + SizeMB: 100, + Sticky: true, + Migrate: true, + }, + Meta: map[string]string{ + "key": "value", + }, + Tasks: []*structs.Task{ + { + Name: "task1", + Driver: "docker", + User: "mary", + Config: map[string]interface{}{ + "lol": "code", + }, + Constraints: []*structs.Constraint{ + { + LTarget: "x", + RTarget: "y", + Operand: "z", + }, + }, + Env: map[string]string{ + "hello": "world", + }, + Services: []*structs.Service{ + &structs.Service{ + Name: "serviceA", + Tags: []string{"1", "2"}, + PortLabel: "foo", + Checks: []*structs.ServiceCheck{ + &structs.ServiceCheck{ + Name: "bar", + Type: "http", + Command: "foo", + Args: []string{"a", "b"}, + Path: "/check", + Protocol: "http", + PortLabel: "foo", + Interval: 4 * time.Second, + Timeout: 2 * time.Second, + InitialStatus: "ok", + }, + }, + }, + }, + Resources: &structs.Resources{ + CPU: 100, + MemoryMB: 10, + Networks: []*structs.NetworkResource{ + { + IP: "10.10.11.1", + MBits: 10, + ReservedPorts: []structs.Port{ + { + Label: "http", + Value: 80, + }, + }, + DynamicPorts: []structs.Port{ + { + Label: "ssh", + Value: 2000, + }, + }, + }, + }, + }, + Meta: map[string]string{ + "lol": "code", + }, + KillTimeout: 10 * time.Second, + LogConfig: &structs.LogConfig{ + MaxFiles: 10, + MaxFileSizeMB: 100, + }, + Artifacts: []*structs.TaskArtifact{ + { + GetterSource: "source", + GetterOptions: map[string]string{ + "a": "b", + }, + RelativeDest: "dest", + }, + }, + Vault: &structs.Vault{ + Policies: []string{"a", "b", "c"}, + Env: true, + ChangeMode: "c", + ChangeSignal: "sighup", + }, + Templates: []*structs.Template{ + { + SourcePath: "source", + DestPath: "dest", + EmbeddedTmpl: "embedded", + ChangeMode: "change", + ChangeSignal: "SIGNAL", + Splay: 1 * time.Minute, + Perms: "666", + }, + }, + DispatchPayload: &structs.DispatchPayloadConfig{ + File: "fileA", + }, + }, + }, + }, + }, + + VaultToken: "token", + Status: "status", + StatusDescription: "status_desc", + CreateIndex: 1, + ModifyIndex: 3, + JobModifyIndex: 5, + } + + structsJob := apiJobToStructJob(apiJob) + + if !reflect.DeepEqual(expected, structsJob) { + t.Fatalf("bad %#v", structsJob) + } +} diff --git a/command/helpers_test.go b/command/helpers_test.go index 8d67f123f2f8..fa63d902d5d1 100644 --- a/command/helpers_test.go +++ b/command/helpers_test.go @@ -11,6 +11,8 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper" "github.com/mitchellh/cli" ) @@ -208,8 +210,8 @@ const ( }` ) -// Test StructJob with local jobfile -func TestStructJobWithLocal(t *testing.T) { +// Test APIJob with local jobfile +func TestJobGetter_LocalFile(t *testing.T) { fh, err := ioutil.TempFile("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -221,19 +223,52 @@ func TestStructJobWithLocal(t *testing.T) { } j := &JobGetter{} - sj, err := j.StructJob(fh.Name()) + aj, err := j.ApiJob(fh.Name()) if err != nil { t.Fatalf("err: %s", err) } - err = sj.Validate() - if err != nil { - t.Fatalf("err: %s", err) + expected := &api.Job{ + ID: helper.StringToPtr("job1"), + Region: helper.StringToPtr("global"), + Priority: helper.IntToPtr(50), + Name: helper.StringToPtr("job1"), + Type: helper.StringToPtr("service"), + Datacenters: []string{"dc1"}, + TaskGroups: []*api.TaskGroup{ + { + Name: helper.StringToPtr("group1"), + Count: helper.IntToPtr(1), + RestartPolicy: &api.RestartPolicy{ + Attempts: helper.IntToPtr(10), + Mode: helper.StringToPtr("delay"), + }, + EphemeralDisk: &api.EphemeralDisk{ + SizeMB: helper.IntToPtr(300), + }, + + Tasks: []*api.Task{ + { + Driver: "exec", + Name: "task1", + Resources: &api.Resources{ + CPU: helper.IntToPtr(100), + MemoryMB: helper.IntToPtr(10), + IOPS: helper.IntToPtr(0), + }, + LogConfig: api.DefaultLogConfig(), + }, + }, + }, + }, + } + if !reflect.DeepEqual(expected, aj) { + t.Fatalf("bad: %#v", aj) } } // Test StructJob with jobfile from HTTP Server -func TestStructJobWithHTTPServer(t *testing.T) { +func TestAPIJob_HTTPServer(t *testing.T) { http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, job) }) @@ -243,13 +278,45 @@ func TestStructJobWithHTTPServer(t *testing.T) { time.Sleep(100 * time.Millisecond) j := &JobGetter{} - sj, err := j.StructJob("http://127.0.0.1:12345/") + aj, err := j.ApiJob("http://127.0.0.1:12345/") if err != nil { t.Fatalf("err: %s", err) } - - err = sj.Validate() - if err != nil { - t.Fatalf("err: %s", err) + expected := &api.Job{ + ID: helper.StringToPtr("job1"), + Region: helper.StringToPtr("global"), + Priority: helper.IntToPtr(50), + Name: helper.StringToPtr("job1"), + Type: helper.StringToPtr("service"), + Datacenters: []string{"dc1"}, + TaskGroups: []*api.TaskGroup{ + { + Name: helper.StringToPtr("group1"), + Count: helper.IntToPtr(1), + RestartPolicy: &api.RestartPolicy{ + Attempts: helper.IntToPtr(10), + Mode: helper.StringToPtr("delay"), + }, + EphemeralDisk: &api.EphemeralDisk{ + SizeMB: helper.IntToPtr(300), + }, + + Tasks: []*api.Task{ + { + Driver: "exec", + Name: "task1", + Resources: &api.Resources{ + CPU: helper.IntToPtr(100), + MemoryMB: helper.IntToPtr(10), + IOPS: helper.IntToPtr(0), + }, + LogConfig: api.DefaultLogConfig(), + }, + }, + }, + }, + } + if !reflect.DeepEqual(expected, aj) { + t.Fatalf("bad: %#v", aj) } } diff --git a/command/util_test.go b/command/util_test.go index ae6b5ab7f242..23b7715a7e50 100644 --- a/command/util_test.go +++ b/command/util_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/testutil" ) @@ -44,18 +45,18 @@ func testJob(jobID string) *api.Job { SetConfig("run_for", "5s"). SetConfig("exit_code", 0). Require(&api.Resources{ - MemoryMB: 256, - CPU: 100, + MemoryMB: helper.IntToPtr(256), + CPU: helper.IntToPtr(100), }). SetLogConfig(&api.LogConfig{ - MaxFiles: 1, - MaxFileSizeMB: 2, + MaxFiles: helper.IntToPtr(1), + MaxFileSizeMB: helper.IntToPtr(2), }) group := api.NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&api.EphemeralDisk{ - SizeMB: 20, + SizeMB: helper.IntToPtr(20), }) job := api.NewBatchJob(jobID, jobID, "region1", 1). diff --git a/command/validate.go b/command/validate.go index 351978abdd8e..fa59ff4242a3 100644 --- a/command/validate.go +++ b/command/validate.go @@ -3,11 +3,14 @@ package command import ( "fmt" "strings" + + "github.com/mitchellh/colorstring" ) type ValidateCommand struct { Meta JobGetter + color *colorstring.Colorize } func (c *ValidateCommand) Help() string { @@ -62,10 +65,22 @@ func (c *ValidateCommand) Run(args []string) int { } // Check that the job is valid - if _, _, err := client.Jobs().Validate(job, nil); err != nil { + jr, _, err := client.Jobs().Validate(job, nil) + if err != nil { c.Ui.Error(fmt.Sprintf("Error validating job: %s", err)) return 1 } + if jr != nil && !jr.DriverConfigValidated { + c.Ui.Output(c.Colorize().Color("[bold][orange]Driver configuration not validated.[reset]")) + } + + if jr != nil && len(jr.ValidationErrors) > 0 { + c.Ui.Output("Job Validation errors:") + for _, err := range jr.ValidationErrors { + c.Ui.Output(err) + } + return 1 + } // Done! c.Ui.Output("Job validation successful") diff --git a/jobspec/parse.go b/jobspec/parse.go index e262acb0cc8a..84b226cdd3eb 100644 --- a/jobspec/parse.go +++ b/jobspec/parse.go @@ -712,10 +712,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*api.Task, list } // If we have logs then parse that - logConfig := &api.LogConfig{ - MaxFiles: helper.IntToPtr(10), - MaxFileSizeMB: helper.IntToPtr(10), - } + logConfig := api.DefaultLogConfig() if o := listVal.Filter("logs"); len(o.Items) > 0 { if len(o.Items) > 1 { From 79d51814dc783745fdf27358baee2584c78c7890 Mon Sep 17 00:00:00 2001 From: Diptanu Choudhury Date: Wed, 15 Feb 2017 14:14:37 -0800 Subject: [PATCH 03/16] Added docs --- website/source/docs/http/validate.html.md | 207 ++++++++++++++++++++++ 1 file changed, 207 insertions(+) create mode 100644 website/source/docs/http/validate.html.md diff --git a/website/source/docs/http/validate.html.md b/website/source/docs/http/validate.html.md new file mode 100644 index 000000000000..258b893666d6 --- /dev/null +++ b/website/source/docs/http/validate.html.md @@ -0,0 +1,207 @@ +--- +layout: "http" +page_title: "HTTP API: /v1/validate/" +sidebar_current: "docs-http-validate" +description: |- + The '/1/validate/' endpoints are used to for validation of objects. +--- + +# /v1/validate/job + +The `/validate/job` endpoint is to validate a Nomad job file. The local Nomad +agent forwards the request to a server. In the event a server can't be +reached the agent verifies the job file locally but skips validating driver +configurations. + +## POST + +
+
Description
+
+ Validates a Nomad job file +
+ +
Method
+
POST
+ +
URL
+
`/v1/validate/job`
+ +
Parameters
+
+ None +
+
Body
+
+ + ```javascript +{ + "Job": { + "Region": "global", + "ID": "example", + "ParentID": null, + "Name": "example", + "Type": "service", + "Priority": 50, + "AllAtOnce": null, + "Datacenters": [ + "dc1" + ], + "Constraints": null, + "TaskGroups": [ + { + "Name": "cache", + "Count": 1, + "Constraints": null, + "Tasks": [ + { + "Name": "mongo", + "Driver": "exec", + "User": "", + "Config": { + "args": [ + "-l", + "127.0.0.1", + "0" + ], + "command": "/bin/nc" + }, + "Constraints": null, + "Env": null, + "Services": null, + "Resources": { + "CPU": 1, + "MemoryMB": 10, + "DiskMB": null, + "IOPS": 0, + "Networks": [ + { + "Public": false, + "CIDR": "", + "ReservedPorts": null, + "DynamicPorts": [ + { + "Label": "db111", + "Value": 0 + }, + { + "Label": "http111", + "Value": 0 + } + ], + "IP": "", + "MBits": 10 + } + ] + }, + "Meta": null, + "KillTimeout": null, + "LogConfig": { + "MaxFiles": 10, + "MaxFileSizeMB": 10 + }, + "Artifacts": null, + "Vault": null, + "Templates": null, + "DispatchPayload": null + }, + { + "Name": "redis", + "Driver": "raw_exec", + "User": "", + "Config": { + "args": [ + "-l", + "127.0.0.1", + "0" + ], + "command": "/usr/bin/nc" + }, + "Constraints": null, + "Env": null, + "Services": null, + "Resources": { + "CPU": 1, + "MemoryMB": 10, + "DiskMB": null, + "IOPS": 0, + "Networks": [ + { + "Public": false, + "CIDR": "", + "ReservedPorts": null, + "DynamicPorts": [ + { + "Label": "db", + "Value": 0 + }, + { + "Label": "http", + "Value": 0 + } + ], + "IP": "", + "MBits": 10 + } + ] + }, + "Meta": null, + "KillTimeout": null, + "LogConfig": { + "MaxFiles": 10, + "MaxFileSizeMB": 10 + }, + "Artifacts": null, + "Vault": null, + "Templates": null, + "DispatchPayload": null + } + ], + "RestartPolicy": { + "Interval": 300000000000, + "Attempts": 10, + "Delay": 25000000000, + "Mode": "delay" + }, + "EphemeralDisk": { + "Sticky": null, + "Migrate": null, + "SizeMB": 300 + }, + "Meta": null + } + ], + "Update": { + "Stagger": 10000000000, + "MaxParallel": 0 + }, + "Periodic": null, + "ParameterizedJob": null, + "Payload": null, + "Meta": null, + "VaultToken": null, + "Status": null, + "StatusDescription": null, + "CreateIndex": null, + "ModifyIndex": null, + "JobModifyIndex": null + } +} + ``` + +
+ + +
Returns
+
+ + ```javascript + { + "DriverConfigValidated": true, + "ValidationErrors": [ + "minimum CPU value is 20; got 1" + ] + } + ``` +
+
From b56764a0ec3cdf3b97dc29e0500eec3d73772ef4 Mon Sep 17 00:00:00 2001 From: Diptanu Choudhury Date: Wed, 15 Feb 2017 16:49:01 -0800 Subject: [PATCH 04/16] Fixed the nomad status command --- command/status.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/command/status.go b/command/status.go index 35a6e84f83ca..c91b773e6c9f 100644 --- a/command/status.go +++ b/command/status.go @@ -144,12 +144,12 @@ func (c *StatusCommand) Run(args []string) int { // Format the job info basic := []string{ - fmt.Sprintf("ID|%s", job.ID), - fmt.Sprintf("Name|%s", job.Name), - fmt.Sprintf("Type|%s", job.Type), - fmt.Sprintf("Priority|%d", job.Priority), + fmt.Sprintf("ID|%s", *job.ID), + fmt.Sprintf("Name|%s", *job.Name), + fmt.Sprintf("Type|%s", *job.Type), + fmt.Sprintf("Priority|%d", *job.Priority), fmt.Sprintf("Datacenters|%s", strings.Join(job.Datacenters, ",")), - fmt.Sprintf("Status|%s", job.Status), + fmt.Sprintf("Status|%s", *job.Status), fmt.Sprintf("Periodic|%v", periodic), fmt.Sprintf("Parameterized|%v", parameterized), } From 67e6098f87b88ab0f26ff65ef2dd4135fc85fb3a Mon Sep 17 00:00:00 2001 From: Diptanu Choudhury Date: Wed, 15 Feb 2017 16:56:36 -0800 Subject: [PATCH 05/16] Removing un-neccessary conversions --- api/jobs.go | 12 ++++++++++++ command/status.go | 39 +++++---------------------------------- 2 files changed, 17 insertions(+), 34 deletions(-) diff --git a/api/jobs.go b/api/jobs.go index 902bca53d198..ed67f7f175be 100644 --- a/api/jobs.go +++ b/api/jobs.go @@ -494,6 +494,18 @@ type JobValidateResponse struct { ValidationErrors []string } +// JobUpdateRequest is used to update a job +type JobUpdateRequest struct { + Job *Job + // If EnforceIndex is set then the job will only be registered if the passed + // JobModifyIndex matches the current Jobs index. If the index is zero, the + // register only occurs if the job is new. + EnforceIndex bool + JobModifyIndex uint64 + + WriteRequest +} + // RegisterJobRequest is used to serialize a job registration type RegisterJobRequest struct { Job *Job diff --git a/command/status.go b/command/status.go index c91b773e6c9f..55569aba3abb 100644 --- a/command/status.go +++ b/command/status.go @@ -1,8 +1,6 @@ package command import ( - "bytes" - "encoding/gob" "fmt" "sort" "strings" @@ -133,14 +131,8 @@ func (c *StatusCommand) Run(args []string) int { return 1 } - // Check if it is periodic or a parameterized job - sJob, err := convertApiJob(job) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error converting job: %s", err)) - return 1 - } - periodic := sJob.IsPeriodic() - parameterized := sJob.IsParameterized() + periodic := job.IsPeriodic() + parameterized := job.IsParameterized() // Format the job info basic := []string{ @@ -156,7 +148,7 @@ func (c *StatusCommand) Run(args []string) int { if periodic { now := time.Now().UTC() - next := sJob.Periodic.Next(now) + next := job.Periodic.Next(now) basic = append(basic, fmt.Sprintf("Next Periodic Launch|%s", fmt.Sprintf("%s (%s from now)", formatTime(next), formatTimeDifference(now, next, time.Second)))) @@ -375,13 +367,8 @@ func (c *StatusCommand) outputJobSummary(client *api.Client, job *api.Job) error return nil } - sJob, err := convertApiJob(job) - if err != nil { - return fmt.Errorf("Error converting job: %s", err) - } - - periodic := sJob.IsPeriodic() - parameterizedJob := sJob.IsParameterized() + periodic := job.IsPeriodic() + parameterizedJob := job.IsParameterized() // Print the summary if !periodic && !parameterizedJob { @@ -449,22 +436,6 @@ func (c *StatusCommand) outputFailedPlacements(failedEval *api.Evaluation) { } } -// convertApiJob is used to take a *api.Job and convert it to an *struct.Job. -// This function is just a hammer and probably needs to be revisited. -func convertApiJob(in *api.Job) (*structs.Job, error) { - gob.Register(map[string]interface{}{}) - gob.Register([]interface{}{}) - var structJob *structs.Job - buf := new(bytes.Buffer) - if err := gob.NewEncoder(buf).Encode(in); err != nil { - return nil, err - } - if err := gob.NewDecoder(buf).Decode(&structJob); err != nil { - return nil, err - } - return structJob, nil -} - // list general information about a list of jobs func createStatusListOutput(jobs []*api.JobListStub) string { out := make([]string, len(jobs)+1) From 7eac718eea0de02bb3b2f53b917f797255607fa5 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 15 Feb 2017 17:04:53 -0800 Subject: [PATCH 06/16] Use RC to see if cause of failure --- .travis.yml | 2 +- command/helpers_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 9a7d747a6a90..dfbc94a96592 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,7 @@ services: language: go go: - - 1.7 + - 1.8rc3 branches: only: diff --git a/command/helpers_test.go b/command/helpers_test.go index fa63d902d5d1..c2b3bc6fb258 100644 --- a/command/helpers_test.go +++ b/command/helpers_test.go @@ -268,7 +268,7 @@ func TestJobGetter_LocalFile(t *testing.T) { } // Test StructJob with jobfile from HTTP Server -func TestAPIJob_HTTPServer(t *testing.T) { +func TestJobGetter_HTTPServer(t *testing.T) { http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, job) }) From 592bc73a4c284922de2ba192a7a169f7e44ca77f Mon Sep 17 00:00:00 2001 From: Diptanu Choudhury Date: Wed, 15 Feb 2017 17:40:51 -0800 Subject: [PATCH 07/16] Making the job register endpoint use api.Job --- api/jobs.go | 3 ++- command/agent/job_endpoint.go | 39 ++++++++++++++++++++++++++++------- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/api/jobs.go b/api/jobs.go index ed67f7f175be..aa4871f1191f 100644 --- a/api/jobs.go +++ b/api/jobs.go @@ -495,7 +495,7 @@ type JobValidateResponse struct { } // JobUpdateRequest is used to update a job -type JobUpdateRequest struct { +type JobRegisterRequest struct { Job *Job // If EnforceIndex is set then the job will only be registered if the passed // JobModifyIndex matches the current Jobs index. If the index is zero, the @@ -526,6 +526,7 @@ type deregisterJobResponse struct { type JobPlanRequest struct { Job *Job Diff bool + WriteRequest } type JobPlanResponse struct { diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index b26852c64296..dc295c74ce56 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -93,20 +93,31 @@ func (s *HTTPServer) jobPlan(resp http.ResponseWriter, req *http.Request, return nil, CodedError(405, ErrInvalidMethod) } - var args structs.JobPlanRequest + var args api.JobPlanRequest if err := decodeBody(req, &args); err != nil { return nil, CodedError(400, err.Error()) } if args.Job == nil { return nil, CodedError(400, "Job must be specified") } - if jobName != "" && args.Job.ID != jobName { + if args.Job.ID == nil { + return nil, CodedError(400, "Job must have a valid ID") + } + if jobName != "" && *args.Job.ID != jobName { return nil, CodedError(400, "Job ID does not match") } s.parseRegion(req, &args.Region) + sJob := apiJobToStructJob(args.Job) + planReq := structs.JobPlanRequest{ + Job: sJob, + Diff: args.Diff, + WriteRequest: structs.WriteRequest{ + Region: args.WriteRequest.Region, + }, + } var out structs.JobPlanResponse - if err := s.agent.RPC("Job.Plan", &args, &out); err != nil { + if err := s.agent.RPC("Job.Plan", &planReq, &out); err != nil { return nil, err } setIndex(resp, out.Index) @@ -274,20 +285,34 @@ func (s *HTTPServer) jobQuery(resp http.ResponseWriter, req *http.Request, func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request, jobName string) (interface{}, error) { - var args structs.JobRegisterRequest + var args api.JobRegisterRequest if err := decodeBody(req, &args); err != nil { return nil, CodedError(400, err.Error()) } if args.Job == nil { return nil, CodedError(400, "Job must be specified") } - if jobName != "" && args.Job.ID != jobName { - return nil, CodedError(400, "Job ID does not match") + + if args.Job.ID == nil { + return nil, CodedError(400, "Job ID hasn't been provided") + } + if jobName != "" && *args.Job.ID != jobName { + return nil, CodedError(400, "Job ID does not match name") } s.parseRegion(req, &args.Region) + sJob := apiJobToStructJob(args.Job) + + regReq := structs.JobRegisterRequest{ + Job: sJob, + EnforceIndex: args.EnforceIndex, + JobModifyIndex: args.JobModifyIndex, + WriteRequest: structs.WriteRequest{ + Region: args.WriteRequest.Region, + }, + } var out structs.JobRegisterResponse - if err := s.agent.RPC("Job.Register", &args, &out); err != nil { + if err := s.agent.RPC("Job.Register", ®Req, &out); err != nil { return nil, err } setIndex(resp, out.Index) From ea79a1475fb63d054a57cfeaeb891f1532edc05c Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Thu, 16 Feb 2017 13:10:24 -0800 Subject: [PATCH 08/16] Fix detection of missing port --- command/agent/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/agent/config.go b/command/agent/config.go index dc90c740fdc3..9efac4b4ddd0 100644 --- a/command/agent/config.go +++ b/command/agent/config.go @@ -809,7 +809,7 @@ func normalizeAdvertise(addr string, bind string, defport int, dev bool) (string func isMissingPort(err error) bool { // matches error const in net/ipsock.go const missingPort = "missing port in address" - return err != nil && strings.HasPrefix(err.Error(), missingPort) + return err != nil && strings.Contains(err.Error(), missingPort) } // Merge is used to merge two server configs together From b6e0d12de7753b11b4e2e0ac1c0d99660aabd28e Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Thu, 16 Feb 2017 13:13:29 -0800 Subject: [PATCH 09/16] Flatmap --- command/helpers_test.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/command/helpers_test.go b/command/helpers_test.go index c2b3bc6fb258..5ede0aaacfef 100644 --- a/command/helpers_test.go +++ b/command/helpers_test.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/flatmap" "github.com/mitchellh/cli" ) @@ -263,7 +264,9 @@ func TestJobGetter_LocalFile(t *testing.T) { }, } if !reflect.DeepEqual(expected, aj) { - t.Fatalf("bad: %#v", aj) + eflat := flatmap.Flatten(expected, nil, false) + aflat := flatmap.Flatten(aj, nil, false) + t.Fatalf("got:\n%v\nwant:\n%v", aflat, eflat) } } @@ -317,6 +320,8 @@ func TestJobGetter_HTTPServer(t *testing.T) { }, } if !reflect.DeepEqual(expected, aj) { - t.Fatalf("bad: %#v", aj) + eflat := flatmap.Flatten(expected, nil, false) + aflat := flatmap.Flatten(aj, nil, false) + t.Fatalf("got:\n%v\nwant:\n%v", aflat, eflat) } } From 6debb3e03bc6f5a6f6b1f77dd98e204a1dbd33da Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Thu, 16 Feb 2017 14:00:41 -0800 Subject: [PATCH 10/16] Refactor test and fix them --- command/helpers_test.go | 84 +++++++++++++---------------------------- 1 file changed, 27 insertions(+), 57 deletions(-) diff --git a/command/helpers_test.go b/command/helpers_test.go index 5ede0aaacfef..a430c91a7be2 100644 --- a/command/helpers_test.go +++ b/command/helpers_test.go @@ -211,25 +211,8 @@ const ( }` ) -// Test APIJob with local jobfile -func TestJobGetter_LocalFile(t *testing.T) { - fh, err := ioutil.TempFile("", "nomad") - if err != nil { - t.Fatalf("err: %s", err) - } - defer os.Remove(fh.Name()) - _, err = fh.WriteString(job) - if err != nil { - t.Fatalf("err: %s", err) - } - - j := &JobGetter{} - aj, err := j.ApiJob(fh.Name()) - if err != nil { - t.Fatalf("err: %s", err) - } - - expected := &api.Job{ +var ( + expectedApiJob = &api.Job{ ID: helper.StringToPtr("job1"), Region: helper.StringToPtr("global"), Priority: helper.IntToPtr(50), @@ -242,6 +225,7 @@ func TestJobGetter_LocalFile(t *testing.T) { Count: helper.IntToPtr(1), RestartPolicy: &api.RestartPolicy{ Attempts: helper.IntToPtr(10), + Interval: helper.TimeToPtr(15 * time.Second), Mode: helper.StringToPtr("delay"), }, EphemeralDisk: &api.EphemeralDisk{ @@ -263,8 +247,28 @@ func TestJobGetter_LocalFile(t *testing.T) { }, }, } - if !reflect.DeepEqual(expected, aj) { - eflat := flatmap.Flatten(expected, nil, false) +) + +// Test APIJob with local jobfile +func TestJobGetter_LocalFile(t *testing.T) { + fh, err := ioutil.TempFile("", "nomad") + if err != nil { + t.Fatalf("err: %s", err) + } + defer os.Remove(fh.Name()) + _, err = fh.WriteString(job) + if err != nil { + t.Fatalf("err: %s", err) + } + + j := &JobGetter{} + aj, err := j.ApiJob(fh.Name()) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(expectedApiJob, aj) { + eflat := flatmap.Flatten(expectedApiJob, nil, false) aflat := flatmap.Flatten(aj, nil, false) t.Fatalf("got:\n%v\nwant:\n%v", aflat, eflat) } @@ -285,42 +289,8 @@ func TestJobGetter_HTTPServer(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - expected := &api.Job{ - ID: helper.StringToPtr("job1"), - Region: helper.StringToPtr("global"), - Priority: helper.IntToPtr(50), - Name: helper.StringToPtr("job1"), - Type: helper.StringToPtr("service"), - Datacenters: []string{"dc1"}, - TaskGroups: []*api.TaskGroup{ - { - Name: helper.StringToPtr("group1"), - Count: helper.IntToPtr(1), - RestartPolicy: &api.RestartPolicy{ - Attempts: helper.IntToPtr(10), - Mode: helper.StringToPtr("delay"), - }, - EphemeralDisk: &api.EphemeralDisk{ - SizeMB: helper.IntToPtr(300), - }, - - Tasks: []*api.Task{ - { - Driver: "exec", - Name: "task1", - Resources: &api.Resources{ - CPU: helper.IntToPtr(100), - MemoryMB: helper.IntToPtr(10), - IOPS: helper.IntToPtr(0), - }, - LogConfig: api.DefaultLogConfig(), - }, - }, - }, - }, - } - if !reflect.DeepEqual(expected, aj) { - eflat := flatmap.Flatten(expected, nil, false) + if !reflect.DeepEqual(expectedApiJob, aj) { + eflat := flatmap.Flatten(expectedApiJob, nil, false) aflat := flatmap.Flatten(aj, nil, false) t.Fatalf("got:\n%v\nwant:\n%v", aflat, eflat) } From ef00766e31454945aa60330560fd6a07d9bb7304 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Thu, 16 Feb 2017 14:29:34 -0800 Subject: [PATCH 11/16] Fix CLI handling of parameterized/periodic jobs --- command/status.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/command/status.go b/command/status.go index 55569aba3abb..bd8569f50550 100644 --- a/command/status.go +++ b/command/status.go @@ -191,7 +191,7 @@ func (c *StatusCommand) outputPeriodicInfo(client *api.Client, job *api.Job) err } // Generate the prefix that matches launched jobs from the periodic job. - prefix := fmt.Sprintf("%s%s", job.ID, structs.PeriodicLaunchSuffix) + prefix := fmt.Sprintf("%s%s", *job.ID, structs.PeriodicLaunchSuffix) children, _, err := client.Jobs().PrefixList(prefix) if err != nil { return fmt.Errorf("Error querying job: %s", err) @@ -237,8 +237,8 @@ func (c *StatusCommand) outputParameterizedInfo(client *api.Client, job *api.Job return err } - // Generate the prefix that matches launched jobs from the periodic job. - prefix := fmt.Sprintf("%s%s", job.ID, structs.DispatchLaunchSuffix) + // Generate the prefix that matches launched jobs from the parameterized job. + prefix := fmt.Sprintf("%s%s", *job.ID, structs.DispatchLaunchSuffix) children, _, err := client.Jobs().PrefixList(prefix) if err != nil { return fmt.Errorf("Error querying job: %s", err) From 8557a89d38a0dcd4be475193b27a94fef1b5702b Mon Sep 17 00:00:00 2001 From: Diptanu Choudhury Date: Thu, 16 Feb 2017 15:18:38 -0800 Subject: [PATCH 12/16] Fixed cli tests --- command/plan_test.go | 9 ++++++++- command/run_test.go | 9 ++++++++- command/validate_test.go | 17 +++++++++++++++++ 3 files changed, 33 insertions(+), 2 deletions(-) diff --git a/command/plan_test.go b/command/plan_test.go index 3bd7511ba28e..f8aed7e5b365 100644 --- a/command/plan_test.go +++ b/command/plan_test.go @@ -1,11 +1,13 @@ package command import ( + "fmt" "io/ioutil" "os" "strings" "testing" + "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) @@ -17,6 +19,11 @@ func TestPlanCommand_Fails(t *testing.T) { ui := new(cli.MockUi) cmd := &PlanCommand{Meta: Meta{Ui: ui}} + // Create a server + s := testutil.NewTestServer(t, nil) + defer s.Stop() + os.Setenv("NOMAD_ADDR", fmt.Sprintf("http://%s", s.HTTPAddr)) + // Fails on misuse if code := cmd.Run([]string{"some", "bad", "args"}); code != 255 { t.Fatalf("expected exit code 1, got: %d", code) @@ -64,7 +71,7 @@ func TestPlanCommand_Fails(t *testing.T) { if code := cmd.Run([]string{fh2.Name()}); code != 255 { t.Fatalf("expect exit 255, got: %d", code) } - if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error validating") { + if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error during plan") { t.Fatalf("expect validation error, got: %s", out) } ui.ErrorWriter.Reset() diff --git a/command/run_test.go b/command/run_test.go index f8a5212beece..a8e5e7a6db58 100644 --- a/command/run_test.go +++ b/command/run_test.go @@ -1,11 +1,13 @@ package command import ( + "fmt" "io/ioutil" "os" "strings" "testing" + "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) @@ -52,6 +54,11 @@ func TestRunCommand_Fails(t *testing.T) { ui := new(cli.MockUi) cmd := &RunCommand{Meta: Meta{Ui: ui}} + // Create a server + s := testutil.NewTestServer(t, nil) + defer s.Stop() + os.Setenv("NOMAD_ADDR", fmt.Sprintf("http://%s", s.HTTPAddr)) + // Fails on misuse if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 { t.Fatalf("expected exit code 1, got: %d", code) @@ -99,7 +106,7 @@ func TestRunCommand_Fails(t *testing.T) { if code := cmd.Run([]string{fh2.Name()}); code != 1 { t.Fatalf("expect exit 1, got: %d", code) } - if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error validating") { + if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error submitting job") { t.Fatalf("expect validation error, got: %s", out) } ui.ErrorWriter.Reset() diff --git a/command/validate_test.go b/command/validate_test.go index 11be97511468..c4f4069caba5 100644 --- a/command/validate_test.go +++ b/command/validate_test.go @@ -1,11 +1,13 @@ package command import ( + "fmt" "io/ioutil" "os" "strings" "testing" + "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) @@ -17,6 +19,11 @@ func TestValidateCommand(t *testing.T) { ui := new(cli.MockUi) cmd := &ValidateCommand{Meta: Meta{Ui: ui}} + // Create a server + s := testutil.NewTestServer(t, nil) + defer s.Stop() + os.Setenv("NOMAD_ADDR", fmt.Sprintf("http://%s", s.HTTPAddr)) + fh, err := ioutil.TempFile("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -30,6 +37,9 @@ job "job1" { count = 1 task "task1" { driver = "exec" + config { + command = "/bin/sleep" + } resources = { cpu = 1000 memory = 512 @@ -113,6 +123,10 @@ func TestValidateCommand_From_STDIN(t *testing.T) { Meta: Meta{Ui: ui}, JobGetter: JobGetter{testStdin: stdinR}, } + // Create a server + s := testutil.NewTestServer(t, nil) + defer s.Stop() + os.Setenv("NOMAD_ADDR", fmt.Sprintf("http://%s", s.HTTPAddr)) go func() { stdinW.WriteString(` @@ -123,6 +137,9 @@ job "job1" { count = 1 task "task1" { driver = "exec" + config { + command = "/bin/echo" + } resources = { cpu = 1000 memory = 512 From 00f8237cdfc98445c93bd8710a03125ea5b1452c Mon Sep 17 00:00:00 2001 From: Diptanu Choudhury Date: Thu, 16 Feb 2017 15:45:13 -0800 Subject: [PATCH 13/16] Downgrading to Go 1.7 --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index dfbc94a96592..480437cd5d18 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,7 @@ services: language: go go: - - 1.8rc3 + - 1.7 branches: only: From 0ba19ff3a224d6402544076c1704f198d3d80ea7 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Mon, 20 Feb 2017 15:14:44 -0800 Subject: [PATCH 14/16] Bump --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 480437cd5d18..d89e20c07f52 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,7 @@ services: language: go go: - - 1.7 + - 1.8 branches: only: From 503746c7c41dcde9c5fa96acf0974065bbd0b6a5 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Mon, 20 Feb 2017 15:53:56 -0800 Subject: [PATCH 15/16] Add defaulting test and fix types on the agent job endpoint tests --- api/jobs_testing.go | 110 +++++++++++++++++++++ command/agent/job_endpoint_test.go | 147 ++++++++++++++++++++--------- 2 files changed, 212 insertions(+), 45 deletions(-) create mode 100644 api/jobs_testing.go diff --git a/api/jobs_testing.go b/api/jobs_testing.go new file mode 100644 index 000000000000..c27de7d38cc6 --- /dev/null +++ b/api/jobs_testing.go @@ -0,0 +1,110 @@ +package api + +import ( + "time" + + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/nomad/structs" +) + +func MockJob() *Job { + job := &Job{ + Region: helper.StringToPtr("global"), + ID: helper.StringToPtr(structs.GenerateUUID()), + Name: helper.StringToPtr("my-job"), + Type: helper.StringToPtr("service"), + Priority: helper.IntToPtr(50), + AllAtOnce: helper.BoolToPtr(false), + Datacenters: []string{"dc1"}, + Constraints: []*Constraint{ + &Constraint{ + LTarget: "${attr.kernel.name}", + RTarget: "linux", + Operand: "=", + }, + }, + TaskGroups: []*TaskGroup{ + &TaskGroup{ + Name: helper.StringToPtr("web"), + Count: helper.IntToPtr(10), + EphemeralDisk: &EphemeralDisk{ + SizeMB: helper.IntToPtr(150), + }, + RestartPolicy: &RestartPolicy{ + Attempts: helper.IntToPtr(3), + Interval: helper.TimeToPtr(10 * time.Minute), + Delay: helper.TimeToPtr(1 * time.Minute), + Mode: helper.StringToPtr("delay"), + }, + Tasks: []*Task{ + &Task{ + Name: "web", + Driver: "exec", + Config: map[string]interface{}{ + "command": "/bin/date", + }, + Env: map[string]string{ + "FOO": "bar", + }, + Services: []Service{ + { + Name: "${TASK}-frontend", + PortLabel: "http", + Tags: []string{"pci:${meta.pci-dss}", "datacenter:${node.datacenter}"}, + Checks: []ServiceCheck{ + { + Name: "check-table", + Type: "script", + Command: "/usr/local/check-table-${meta.database}", + Args: []string{"${meta.version}"}, + Interval: 30 * time.Second, + Timeout: 5 * time.Second, + }, + }, + }, + { + Name: "${TASK}-admin", + PortLabel: "admin", + }, + }, + LogConfig: DefaultLogConfig(), + Resources: &Resources{ + CPU: helper.IntToPtr(500), + MemoryMB: helper.IntToPtr(256), + Networks: []*NetworkResource{ + &NetworkResource{ + MBits: helper.IntToPtr(50), + DynamicPorts: []Port{{Label: "http"}, {Label: "admin"}}, + }, + }, + }, + Meta: map[string]string{ + "foo": "bar", + }, + }, + }, + Meta: map[string]string{ + "elb_check_type": "http", + "elb_check_interval": "30s", + "elb_check_min": "3", + }, + }, + }, + Meta: map[string]string{ + "owner": "armon", + }, + } + job.Canonicalize() + return job +} + +func MockPeriodicJob() *Job { + j := MockJob() + j.Type = helper.StringToPtr("batch") + j.Periodic = &PeriodicConfig{ + Enabled: helper.BoolToPtr(true), + SpecType: helper.StringToPtr("cron"), + Spec: helper.StringToPtr("*/30 * * * *"), + } + return j +} diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go index 3c051f1de2eb..0941988cec86 100644 --- a/command/agent/job_endpoint_test.go +++ b/command/agent/job_endpoint_test.go @@ -18,10 +18,10 @@ func TestHTTP_JobsList(t *testing.T) { httpTest(t, nil, func(s *TestServer) { for i := 0; i < 3; i++ { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -70,12 +70,12 @@ func TestHTTP_PrefixJobsList(t *testing.T) { httpTest(t, nil, func(s *TestServer) { for i := 0; i < 3; i++ { // Create the job - job := mock.Job() - job.ID = ids[i] - job.TaskGroups[0].Count = 1 - args := structs.JobRegisterRequest{ + job := api.MockJob() + job.ID = &ids[i] + *job.TaskGroups[0].Count = 1 + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -118,10 +118,10 @@ func TestHTTP_PrefixJobsList(t *testing.T) { func TestHTTP_JobsRegister(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } buf := encodeReq(args) @@ -151,7 +151,7 @@ func TestHTTP_JobsRegister(t *testing.T) { // Check the job is registered getReq := structs.JobSpecificRequest{ - JobID: job.ID, + JobID: *job.ID, QueryOptions: structs.QueryOptions{Region: "global"}, } var getResp structs.SingleJobResponse @@ -165,13 +165,70 @@ func TestHTTP_JobsRegister(t *testing.T) { }) } +func TestHTTP_JobsRegister_Defaulting(t *testing.T) { + httpTest(t, nil, func(s *TestServer) { + // Create the job + job := api.MockJob() + + // Do not set its priority + job.Priority = nil + + args := api.JobRegisterRequest{ + Job: job, + WriteRequest: api.WriteRequest{Region: "global"}, + } + buf := encodeReq(args) + + // Make the HTTP request + req, err := http.NewRequest("PUT", "/v1/jobs", buf) + if err != nil { + t.Fatalf("err: %v", err) + } + respW := httptest.NewRecorder() + + // Make the request + obj, err := s.Server.JobsRequest(respW, req) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Check the response + dereg := obj.(structs.JobRegisterResponse) + if dereg.EvalID == "" { + t.Fatalf("bad: %v", dereg) + } + + // Check for the index + if respW.HeaderMap.Get("X-Nomad-Index") == "" { + t.Fatalf("missing index") + } + + // Check the job is registered + getReq := structs.JobSpecificRequest{ + JobID: *job.ID, + QueryOptions: structs.QueryOptions{Region: "global"}, + } + var getResp structs.SingleJobResponse + if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil { + t.Fatalf("err: %v", err) + } + + if getResp.Job == nil { + t.Fatalf("job does not exist") + } + if getResp.Job.Priority != 50 { + t.Fatalf("job didn't get defaulted") + } + }) +} + func TestHTTP_JobQuery(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -179,7 +236,7 @@ func TestHTTP_JobQuery(t *testing.T) { } // Make the HTTP request - req, err := http.NewRequest("GET", "/v1/job/"+job.ID, nil) + req, err := http.NewRequest("GET", "/v1/job/"+*job.ID, nil) if err != nil { t.Fatalf("err: %v", err) } @@ -204,7 +261,7 @@ func TestHTTP_JobQuery(t *testing.T) { // Check the job j := obj.(*structs.Job) - if j.ID != job.ID { + if j.ID != *job.ID { t.Fatalf("bad: %#v", j) } }) @@ -266,15 +323,15 @@ func TestHTTP_JobQuery_Payload(t *testing.T) { func TestHTTP_JobUpdate(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } buf := encodeReq(args) // Make the HTTP request - req, err := http.NewRequest("PUT", "/v1/job/"+job.ID, buf) + req, err := http.NewRequest("PUT", "/v1/job/"+*job.ID, buf) if err != nil { t.Fatalf("err: %v", err) } @@ -299,7 +356,7 @@ func TestHTTP_JobUpdate(t *testing.T) { // Check the job is registered getReq := structs.JobSpecificRequest{ - JobID: job.ID, + JobID: *job.ID, QueryOptions: structs.QueryOptions{Region: "global"}, } var getResp structs.SingleJobResponse @@ -316,10 +373,10 @@ func TestHTTP_JobUpdate(t *testing.T) { func TestHTTP_JobDelete(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -327,7 +384,7 @@ func TestHTTP_JobDelete(t *testing.T) { } // Make the HTTP request - req, err := http.NewRequest("DELETE", "/v1/job/"+job.ID, nil) + req, err := http.NewRequest("DELETE", "/v1/job/"+*job.ID, nil) if err != nil { t.Fatalf("err: %v", err) } @@ -352,7 +409,7 @@ func TestHTTP_JobDelete(t *testing.T) { // Check the job is gone getReq := structs.JobSpecificRequest{ - JobID: job.ID, + JobID: *job.ID, QueryOptions: structs.QueryOptions{Region: "global"}, } var getResp structs.SingleJobResponse @@ -368,10 +425,10 @@ func TestHTTP_JobDelete(t *testing.T) { func TestHTTP_JobForceEvaluate(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -379,7 +436,7 @@ func TestHTTP_JobForceEvaluate(t *testing.T) { } // Make the HTTP request - req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/evaluate", nil) + req, err := http.NewRequest("POST", "/v1/job/"+*job.ID+"/evaluate", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -407,10 +464,10 @@ func TestHTTP_JobForceEvaluate(t *testing.T) { func TestHTTP_JobEvaluations(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -418,7 +475,7 @@ func TestHTTP_JobEvaluations(t *testing.T) { } // Make the HTTP request - req, err := http.NewRequest("GET", "/v1/job/"+job.ID+"/evaluations", nil) + req, err := http.NewRequest("GET", "/v1/job/"+*job.ID+"/evaluations", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -506,10 +563,10 @@ func TestHTTP_JobAllocations(t *testing.T) { func TestHTTP_PeriodicForce(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create and register a periodic job. - job := mock.PeriodicJob() - args := structs.JobRegisterRequest{ + job := api.MockPeriodicJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -517,7 +574,7 @@ func TestHTTP_PeriodicForce(t *testing.T) { } // Make the HTTP request - req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/periodic/force", nil) + req, err := http.NewRequest("POST", "/v1/job/"+*job.ID+"/periodic/force", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -581,13 +638,13 @@ func TestHTTP_JobPlan(t *testing.T) { func TestHTTP_JobDispatch(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the parameterized job - job := mock.Job() - job.Type = structs.JobTypeBatch - job.ParameterizedJob = &structs.ParameterizedJobConfig{} + job := api.MockJob() + job.Type = helper.StringToPtr("batch") + job.ParameterizedJob = &api.ParameterizedJobConfig{} - args := structs.JobRegisterRequest{ + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -602,7 +659,7 @@ func TestHTTP_JobDispatch(t *testing.T) { buf := encodeReq(args2) // Make the HTTP request - req2, err := http.NewRequest("PUT", "/v1/job/"+job.ID+"/dispatch", buf) + req2, err := http.NewRequest("PUT", "/v1/job/"+*job.ID+"/dispatch", buf) if err != nil { t.Fatalf("err: %v", err) } From 7cef0d7a6b25600d702ebf1e1eb00fc75b521fb5 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Mon, 20 Feb 2017 16:22:31 -0800 Subject: [PATCH 16/16] fix pointer --- command/alloc_status.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/alloc_status.go b/command/alloc_status.go index 130292ea2b07..27b7cd0c4044 100644 --- a/command/alloc_status.go +++ b/command/alloc_status.go @@ -426,7 +426,7 @@ func (c *AllocStatusCommand) outputTaskResources(alloc *api.Allocation, task str cpuUsage, memUsage, humanize.IBytes(uint64(*resource.DiskMB*bytesPerMegabyte)), - resource.IOPS, + *resource.IOPS, firstAddr)) for i := 1; i < len(addr); i++ { resourcesOutput = append(resourcesOutput, fmt.Sprintf("||||%v", addr[i]))