diff --git a/client/alloc_runner.go b/client/alloc_runner.go index f1c33fb72375..c8fa460cb60a 100644 --- a/client/alloc_runner.go +++ b/client/alloc_runner.go @@ -31,16 +31,15 @@ type AllocRunner struct { logger *log.Logger consulService *ConsulService - alloc *structs.Allocation - allocLock sync.Mutex - - // Explicit status of allocation. Set when there are failures - allocClientStatus string + alloc *structs.Allocation + allocClientStatus string // Explicit status of allocation. Set when there are failures allocClientDescription string + allocLock sync.Mutex dirtyCh chan struct{} ctx *driver.ExecContext + ctxLock sync.Mutex tasks map[string]*TaskRunner taskStates map[string]*structs.TaskState restored map[string]struct{} @@ -76,7 +75,7 @@ func NewAllocRunner(logger *log.Logger, config *config.Config, updater AllocStat consulService: consulService, dirtyCh: make(chan struct{}, 1), tasks: make(map[string]*TaskRunner), - taskStates: alloc.TaskStates, + taskStates: copyTaskStates(alloc.TaskStates), restored: make(map[string]struct{}), updateCh: make(chan *structs.Allocation, 8), destroyCh: make(chan struct{}), @@ -87,7 +86,10 @@ func NewAllocRunner(logger *log.Logger, config *config.Config, updater AllocStat // stateFilePath returns the path to our state file func (r *AllocRunner) stateFilePath() string { - return filepath.Join(r.config.StateDir, "alloc", r.alloc.ID, "state.json") + r.allocLock.Lock() + defer r.allocLock.Unlock() + path := filepath.Join(r.config.StateDir, "alloc", r.alloc.ID, "state.json") + return path } // RestoreState is used to restore the state of the alloc runner @@ -112,7 +114,7 @@ func (r *AllocRunner) RestoreState() error { r.restored[name] = struct{}{} task := &structs.Task{Name: name} - tr := NewTaskRunner(r.logger, r.config, r.setTaskState, r.ctx, r.alloc, + tr := NewTaskRunner(r.logger, r.config, r.setTaskState, r.ctx, r.Alloc(), task, r.consulService) r.tasks[name] = tr @@ -153,16 +155,27 @@ func (r *AllocRunner) SaveState() error { } func (r *AllocRunner) saveAllocRunnerState() error { + // Create the snapshot. r.taskStatusLock.RLock() - defer r.taskStatusLock.RUnlock() + states := copyTaskStates(r.taskStates) + r.taskStatusLock.RUnlock() + + alloc := r.Alloc() r.allocLock.Lock() - defer r.allocLock.Unlock() + allocClientStatus := r.allocClientStatus + allocClientDescription := r.allocClientDescription + r.allocLock.Unlock() + + r.ctxLock.Lock() + ctx := r.ctx + r.ctxLock.Unlock() + snap := allocRunnerState{ - Alloc: r.alloc, - Context: r.ctx, - AllocClientStatus: r.allocClientStatus, - AllocClientDescription: r.allocClientDescription, - TaskStates: r.taskStates, + Alloc: alloc, + Context: ctx, + AllocClientStatus: allocClientStatus, + AllocClientDescription: allocClientDescription, + TaskStates: states, } return persistState(r.stateFilePath(), &snap) } @@ -186,16 +199,33 @@ func (r *AllocRunner) DestroyContext() error { return r.ctx.AllocDir.Destroy() } +// copyTaskStates returns a copy of the passed task states. +func copyTaskStates(states map[string]*structs.TaskState) map[string]*structs.TaskState { + copy := make(map[string]*structs.TaskState, len(states)) + for task, state := range states { + copy[task] = state.Copy() + } + return copy +} + // Alloc returns the associated allocation func (r *AllocRunner) Alloc() *structs.Allocation { r.allocLock.Lock() alloc := r.alloc.Copy() + + // The status has explicitely been set. + if r.allocClientStatus != "" || r.allocClientDescription != "" { + alloc.ClientStatus = r.allocClientStatus + alloc.ClientDescription = r.allocClientDescription + r.allocLock.Unlock() + return alloc + } r.allocLock.Unlock() // Scan the task states to determine the status of the alloc var pending, running, dead, failed bool r.taskStatusLock.RLock() - alloc.TaskStates = r.taskStates + alloc.TaskStates = copyTaskStates(r.taskStates) for _, state := range r.taskStates { switch state.State { case structs.TaskStateRunning: @@ -213,13 +243,6 @@ func (r *AllocRunner) Alloc() *structs.Allocation { } r.taskStatusLock.RUnlock() - // The status has explicitely been set. - if r.allocClientStatus != "" || r.allocClientDescription != "" { - alloc.ClientStatus = r.allocClientStatus - alloc.ClientDescription = r.allocClientDescription - return alloc - } - // Determine the alloc status if failed { alloc.ClientStatus = structs.AllocClientStatusFailed @@ -276,8 +299,10 @@ func (r *AllocRunner) syncStatus() error { // setStatus is used to update the allocation status func (r *AllocRunner) setStatus(status, desc string) { - r.alloc.ClientStatus = status - r.alloc.ClientDescription = desc + r.allocLock.Lock() + r.allocClientStatus = status + r.allocClientDescription = desc + r.allocLock.Unlock() select { case r.dirtyCh <- struct{}{}: default: @@ -336,15 +361,18 @@ func (r *AllocRunner) Run() { } // Create the execution context + r.ctxLock.Lock() if r.ctx == nil { allocDir := allocdir.NewAllocDir(filepath.Join(r.config.AllocDir, r.alloc.ID)) if err := allocDir.Build(tg.Tasks); err != nil { r.logger.Printf("[WARN] client: failed to build task directories: %v", err) r.setStatus(structs.AllocClientStatusFailed, fmt.Sprintf("failed to build task dirs for '%s'", alloc.TaskGroup)) + r.ctxLock.Unlock() return } r.ctx = driver.NewExecContext(allocDir, r.alloc.ID) } + r.ctxLock.Unlock() // Check if the allocation is in a terminal status. In this case, we don't // start any of the task runners and directly wait for the destroy signal to @@ -364,8 +392,8 @@ func (r *AllocRunner) Run() { continue } - tr := NewTaskRunner(r.logger, r.config, r.setTaskState, r.ctx, r.alloc, - task, r.consulService) + tr := NewTaskRunner(r.logger, r.config, r.setTaskState, r.ctx, r.Alloc(), + task.Copy(), r.consulService) r.tasks[task.Name] = tr go tr.Run() } diff --git a/client/client.go b/client/client.go index d235a5ff614c..bdcde6227aca 100644 --- a/client/client.go +++ b/client/client.go @@ -57,7 +57,7 @@ const ( // nodeUpdateRetryIntv is how often the client checks for updates to the // node attributes or meta map. - nodeUpdateRetryIntv = 30 * time.Second + nodeUpdateRetryIntv = 5 * time.Second ) // DefaultConfig returns the default configuration @@ -75,6 +75,10 @@ type Client struct { config *config.Config start time.Time + // configCopy is a copy that should be passed to alloc-runners. + configCopy *config.Config + configLock sync.RWMutex + logger *log.Logger consulService *ConsulService @@ -90,6 +94,7 @@ type Client struct { lastHeartbeat time.Time heartbeatTTL time.Duration + heartbeatLock sync.Mutex // allocs is the current set of allocations allocs map[string]*AllocRunner @@ -143,6 +148,10 @@ func NewClient(cfg *config.Config) (*Client, error) { // Set up the known servers list c.SetServers(c.config.Servers) + // Store the config copy before restoring state but after it has been + // initialized. + c.configCopy = c.config.Copy() + // Restore the state if err := c.restoreState(); err != nil { return nil, fmt.Errorf("failed to restore state: %v", err) @@ -408,7 +417,9 @@ func (c *Client) restoreState() error { for _, entry := range list { id := entry.Name() alloc := &structs.Allocation{ID: id} - ar := NewAllocRunner(c.logger, c.config, c.updateAllocStatus, alloc, c.consulService) + c.configLock.RLock() + ar := NewAllocRunner(c.logger, c.configCopy, c.updateAllocStatus, alloc, c.consulService) + c.configLock.RUnlock() c.allocs[id] = ar if err := ar.RestoreState(); err != nil { c.logger.Printf("[ERR] client: failed to restore state for alloc %s: %v", id, err) @@ -523,7 +534,10 @@ func (c *Client) fingerprint() error { if err != nil { return err } + + c.configLock.Lock() applies, err := f.Fingerprint(c.config, c.config.Node) + c.configLock.Unlock() if err != nil { return err } @@ -551,9 +565,11 @@ func (c *Client) fingerprintPeriodic(name string, f fingerprint.Fingerprint, d t for { select { case <-time.After(d): + c.configLock.Lock() if _, err := f.Fingerprint(c.config, c.config.Node); err != nil { c.logger.Printf("[DEBUG] client: periodic fingerprinting for %v failed: %v", name, err) } + c.configLock.Unlock() case <-c.shutdownCh: return } @@ -581,7 +597,9 @@ func (c *Client) setupDrivers() error { if err != nil { return err } + c.configLock.Lock() applies, err := d.Fingerprint(c.config, c.config.Node) + c.configLock.Unlock() if err != nil { return err } @@ -647,7 +665,9 @@ func (c *Client) run() { if err := c.updateNodeStatus(); err != nil { heartbeat = time.After(c.retryIntv(registerRetryIntv)) } else { + c.heartbeatLock.Lock() heartbeat = time.After(c.heartbeatTTL) + c.heartbeatLock.Unlock() } case <-c.shutdownCh: @@ -661,6 +681,8 @@ func (c *Client) run() { // determine if the node properties have changed. It returns the new hash values // in case they are different from the old hash values. func (c *Client) hasNodeChanged(oldAttrHash uint64, oldMetaHash uint64) (bool, uint64, uint64) { + c.configLock.RLock() + defer c.configLock.RUnlock() newAttrHash, err := hashstructure.Hash(c.config.Node.Attributes, nil) if err != nil { c.logger.Printf("[DEBUG] client: unable to calculate node attributes hash: %v", err) @@ -711,6 +733,9 @@ func (c *Client) registerNode() error { if len(resp.EvalIDs) != 0 { c.logger.Printf("[DEBUG] client: %d evaluations triggered by node registration", len(resp.EvalIDs)) } + + c.heartbeatLock.Lock() + defer c.heartbeatLock.Unlock() c.lastHeartbeat = time.Now() c.heartbeatTTL = resp.HeartbeatTTL return nil @@ -736,6 +761,9 @@ func (c *Client) updateNodeStatus() error { if resp.Index != 0 { c.logger.Printf("[DEBUG] client: state updated to %s", req.Status) } + + c.heartbeatLock.Lock() + defer c.heartbeatLock.Unlock() c.lastHeartbeat = time.Now() c.heartbeatTTL = resp.HeartbeatTTL return nil @@ -896,6 +924,13 @@ func (c *Client) watchNodeUpdates() { changed, attrHash, metaHash = c.hasNodeChanged(attrHash, metaHash) if changed { c.logger.Printf("[DEBUG] client: state changed, updating node.") + + // Update the config copy. + c.configLock.Lock() + node := c.config.Node.Copy() + c.configCopy.Node = node + c.configLock.Unlock() + c.retryRegisterNode() } case <-c.shutdownCh: @@ -910,7 +945,7 @@ func (c *Client) runAllocs(update *allocUpdates) { c.allocLock.RLock() exist := make([]*structs.Allocation, 0, len(c.allocs)) for _, ar := range c.allocs { - exist = append(exist, ar.Alloc()) + exist = append(exist, ar.alloc) } c.allocLock.RUnlock() @@ -979,7 +1014,9 @@ func (c *Client) updateAlloc(exist, update *structs.Allocation) error { func (c *Client) addAlloc(alloc *structs.Allocation) error { c.allocLock.Lock() defer c.allocLock.Unlock() - ar := NewAllocRunner(c.logger, c.config, c.updateAllocStatus, alloc, c.consulService) + c.configLock.RLock() + ar := NewAllocRunner(c.logger, c.configCopy, c.updateAllocStatus, alloc, c.consulService) + c.configLock.RUnlock() c.allocs[alloc.ID] = ar go ar.Run() return nil diff --git a/client/config/config.go b/client/config/config.go index dd2c2b967714..b78dc24f2bab 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -72,6 +72,15 @@ type Config struct { Options map[string]string } +func (c *Config) Copy() *Config { + nc := new(Config) + *nc = *c + nc.Node = nc.Node.Copy() + nc.Servers = structs.CopySliceString(nc.Servers) + nc.Options = structs.CopyMapStringString(nc.Options) + return nc +} + // Read returns the specified configuration value or "". func (c *Config) Read(id string) string { val, ok := c.Options[id] diff --git a/client/task_runner.go b/client/task_runner.go index 83a29bb65d39..cbf4618eca4d 100644 --- a/client/task_runner.go +++ b/client/task_runner.go @@ -29,9 +29,10 @@ type TaskRunner struct { restartTracker *RestartTracker consulService *ConsulService - task *structs.Task - updateCh chan *structs.Allocation - handle driver.DriverHandle + task *structs.Task + updateCh chan *structs.Allocation + handle driver.DriverHandle + handleLock sync.Mutex destroy bool destroyCh chan struct{} @@ -127,7 +128,9 @@ func (r *TaskRunner) RestoreState() error { r.task.Name, r.alloc.ID, err) return nil } + r.handleLock.Lock() r.handle = handle + r.handleLock.Unlock() } return nil } @@ -139,9 +142,11 @@ func (r *TaskRunner) SaveState() error { snap := taskRunnerState{ Task: r.task, } + r.handleLock.Lock() if r.handle != nil { snap.HandleID = r.handle.ID() } + r.handleLock.Unlock() return persistState(r.stateFilePath(), &snap) } @@ -203,7 +208,9 @@ func (r *TaskRunner) startTask() error { r.setState(structs.TaskStateDead, e) return err } + r.handleLock.Lock() r.handle = handle + r.handleLock.Unlock() r.setState(structs.TaskStateRunning, structs.NewTaskEvent(structs.TaskStarted)) return nil } @@ -222,7 +229,10 @@ func (r *TaskRunner) run() { var forceStart bool for { // Start the task if not yet started or it is being forced. - if r.handle == nil || forceStart { + r.handleLock.Lock() + handleEmpty := r.handle == nil + r.handleLock.Unlock() + if handleEmpty || forceStart { forceStart = false if err := r.startTask(); err != nil { return @@ -339,11 +349,13 @@ func (r *TaskRunner) handleUpdate(update *structs.Allocation) error { // Update will update resources and store the new kill timeout. var mErr multierror.Error + r.handleLock.Lock() if r.handle != nil { if err := r.handle.Update(updatedTask); err != nil { mErr.Errors = append(mErr.Errors, fmt.Errorf("updating task resources failed: %v", err)) } } + r.handleLock.Unlock() // Update the restart policy. if r.restartTracker != nil { diff --git a/demo/vagrant/client1.hcl b/demo/vagrant/client1.hcl index 3e8a2a0c7ab3..61a46ebdf12a 100644 --- a/demo/vagrant/client1.hcl +++ b/demo/vagrant/client1.hcl @@ -1,9 +1,11 @@ # Increase log verbosity -log_level = "DEBUG" +log_level = "INFO" # Setup data dir data_dir = "/tmp/client1" +enable_debug = true + # Enable the client client { enabled = true @@ -13,6 +15,9 @@ client { # like Consul used for service discovery. servers = ["127.0.0.1:4647"] node_class = "foo" + options { + "driver.raw_exec.enable" = "1" + } } # Modify our port to avoid a collision with server1 diff --git a/demo/vagrant/client2.hcl b/demo/vagrant/client2.hcl index 1b1372ae2a89..4708c7cbc06e 100644 --- a/demo/vagrant/client2.hcl +++ b/demo/vagrant/client2.hcl @@ -1,5 +1,5 @@ # Increase log verbosity -log_level = "DEBUG" +log_level = "INFO" # Setup data dir data_dir = "/tmp/client2" diff --git a/demo/vagrant/server.hcl b/demo/vagrant/server.hcl index 653b2c0379e5..280f44a66850 100644 --- a/demo/vagrant/server.hcl +++ b/demo/vagrant/server.hcl @@ -1,5 +1,5 @@ # Increase log verbosity -log_level = "DEBUG" +log_level = "INFO" # Setup data dir data_dir = "/tmp/server1" diff --git a/nomad/structs/funcs.go b/nomad/structs/funcs.go index edc30319a945..09b391fe3f5e 100644 --- a/nomad/structs/funcs.go +++ b/nomad/structs/funcs.go @@ -137,3 +137,69 @@ func GenerateUUID() string { buf[8:10], buf[10:16]) } + +// Helpers for copying generic structures. +func CopyMapStringString(m map[string]string) map[string]string { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]string, l) + for k, v := range m { + c[k] = v + } + return c +} + +func CopyMapStringInt(m map[string]int) map[string]int { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]int, l) + for k, v := range m { + c[k] = v + } + return c +} + +func CopyMapStringFloat64(m map[string]float64) map[string]float64 { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]float64, l) + for k, v := range m { + c[k] = v + } + return c +} + +func CopySliceString(s []string) []string { + l := len(s) + if l == 0 { + return nil + } + + c := make([]string, l) + for i, v := range s { + c[i] = v + } + return c +} + +func CopySliceConstraints(s []*Constraint) []*Constraint { + l := len(s) + if l == 0 { + return nil + } + + c := make([]*Constraint, l) + for i, v := range s { + c[i] = v.Copy() + } + return c +} diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index e1625094127e..fe81774d27b9 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -542,6 +542,20 @@ type Node struct { ModifyIndex uint64 } +func (n *Node) Copy() *Node { + if n == nil { + return nil + } + nn := new(Node) + *nn = *n + nn.Attributes = CopyMapStringString(nn.Attributes) + nn.Resources = nn.Resources.Copy() + nn.Reserved = nn.Reserved.Copy() + nn.Links = CopyMapStringString(nn.Links) + nn.Meta = CopyMapStringString(nn.Meta) + return nn +} + // TerminalStatus returns if the current status is terminal and // will no longer transition. func (n *Node) TerminalStatus() bool { @@ -648,6 +662,9 @@ func (r *Resources) MeetsMinResources() error { // Copy returns a deep copy of the resources func (r *Resources) Copy() *Resources { + if r == nil { + return nil + } newR := new(Resources) *newR = *r n := len(r.Networks) @@ -742,6 +759,9 @@ func (n *NetworkResource) MeetsMinResources() error { // Copy returns a deep copy of the network resource func (n *NetworkResource) Copy() *NetworkResource { + if n == nil { + return nil + } newR := new(NetworkResource) *newR = *n if n.ReservedPorts != nil { @@ -901,12 +921,23 @@ func (j *Job) InitFields() { // Copy returns a deep copy of the Job. It is expected that callers use recover. // This job can panic if the deep copy failed as it uses reflection. func (j *Job) Copy() *Job { - i, err := copystructure.Copy(j) - if err != nil { - panic(err) + if j == nil { + return nil + } + nj := new(Job) + *nj = *j + nj.Datacenters = CopySliceString(nj.Datacenters) + nj.Constraints = CopySliceConstraints(nj.Constraints) + + tgs := make([]*TaskGroup, len(nj.TaskGroups)) + for i, tg := range nj.TaskGroups { + tgs[i] = tg.Copy() } + nj.TaskGroups = tgs - return i.(*Job) + nj.Periodic = nj.Periodic.Copy() + nj.Meta = CopyMapStringString(nj.Meta) + return nj } // Validate is used to sanity check a job input @@ -1066,6 +1097,15 @@ type PeriodicConfig struct { ProhibitOverlap bool `mapstructure:"prohibit_overlap"` } +func (p *PeriodicConfig) Copy() *PeriodicConfig { + if p == nil { + return nil + } + np := new(PeriodicConfig) + *np = *p + return np +} + func (p *PeriodicConfig) Validate() error { if !p.Enabled { return nil @@ -1186,6 +1226,15 @@ type RestartPolicy struct { Mode string } +func (r *RestartPolicy) Copy() *RestartPolicy { + if r == nil { + return nil + } + nrp := new(RestartPolicy) + *nrp = *r + return nrp +} + func (r *RestartPolicy) Validate() error { switch r.Mode { case RestartPolicyModeDelay, RestartPolicyModeFail: @@ -1246,12 +1295,23 @@ type TaskGroup struct { } func (tg *TaskGroup) Copy() *TaskGroup { - i, err := copystructure.Copy(tg) - if err != nil { - panic(err) + if tg == nil { + return nil } + ntg := new(TaskGroup) + *ntg = *tg + ntg.Constraints = CopySliceConstraints(ntg.Constraints) + + ntg.RestartPolicy = ntg.RestartPolicy.Copy() - return i.(*TaskGroup) + tasks := make([]*Task, len(ntg.Tasks)) + for i, t := range ntg.Tasks { + tasks[i] = t.Copy() + } + ntg.Tasks = tasks + + ntg.Meta = CopyMapStringString(ntg.Meta) + return ntg } // InitFields is used to initialize fields in the TaskGroup. @@ -1348,6 +1408,15 @@ type ServiceCheck struct { Timeout time.Duration // Timeout of the response from the check before consul fails the check } +func (sc *ServiceCheck) Copy() *ServiceCheck { + if sc == nil { + return nil + } + nsc := new(ServiceCheck) + *nsc = *sc + return nsc +} + func (sc *ServiceCheck) Validate() error { t := strings.ToLower(sc.Type) if t != ServiceCheckTCP && t != ServiceCheckHTTP { @@ -1393,6 +1462,25 @@ type Service struct { Checks []*ServiceCheck // List of checks associated with the service } +func (s *Service) Copy() *Service { + if s == nil { + return nil + } + ns := new(Service) + *ns = *s + ns.Tags = CopySliceString(ns.Tags) + + var checks []*ServiceCheck + if l := len(ns.Checks); l != 0 { + checks = make([]*ServiceCheck, len(ns.Checks)) + for i, c := range ns.Checks { + checks[i] = c.Copy() + } + } + ns.Checks = checks + return ns +} + // InitFields interpolates values of Job, Task Group and Task in the Service // Name. This also generates check names, service id and check ids. func (s *Service) InitFields(job string, taskGroup string, task string) { @@ -1478,6 +1566,31 @@ type Task struct { KillTimeout time.Duration `mapstructure:"kill_timeout"` } +func (t *Task) Copy() *Task { + if t == nil { + return nil + } + nt := new(Task) + *nt = *t + nt.Env = CopyMapStringString(nt.Env) + + services := make([]*Service, len(nt.Services)) + for i, s := range nt.Services { + services[i] = s.Copy() + } + nt.Services = services + nt.Constraints = CopySliceConstraints(nt.Constraints) + + nt.Resources = nt.Resources.Copy() + nt.Meta = CopyMapStringString(nt.Meta) + + if i, err := copystructure.Copy(nt.Config); err != nil { + nt.Config = i.(map[string]interface{}) + } + + return nt +} + // InitFields initializes fields in the task. func (t *Task) InitFields(job *Job, tg *TaskGroup) { t.InitServiceFields(job.Name, tg.Name) @@ -1528,6 +1641,9 @@ type TaskState struct { } func (ts *TaskState) Copy() *TaskState { + if ts == nil { + return nil + } copy := new(TaskState) copy.State = ts.State copy.Events = make([]*TaskEvent, len(ts.Events)) @@ -1572,6 +1688,9 @@ type TaskEvent struct { } func (te *TaskEvent) Copy() *TaskEvent { + if te == nil { + return nil + } copy := new(TaskEvent) *copy = *te return copy @@ -1664,6 +1783,15 @@ type Constraint struct { str string // Memoized string } +func (c *Constraint) Copy() *Constraint { + if c == nil { + return nil + } + nc := new(Constraint) + *nc = *c + return nc +} + func (c *Constraint) String() string { if c.str != "" { return c.str @@ -1772,12 +1900,35 @@ type Allocation struct { } func (a *Allocation) Copy() *Allocation { - i, err := copystructure.Copy(a) - if err != nil { - panic(err) + if a == nil { + return nil + } + na := new(Allocation) + *na = *a + + na.Job = na.Job.Copy() + na.Resources = na.Resources.Copy() + + tr := make(map[string]*Resources, len(na.TaskResources)) + for task, resource := range na.TaskResources { + tr[task] = resource.Copy() + } + na.TaskResources = tr + + s := make(map[string]string, len(na.Services)) + for service, id := range na.Services { + s[service] = id } + na.Services = s - return i.(*Allocation) + na.Metrics = na.Metrics.Copy() + + ts := make(map[string]*TaskState, len(na.TaskStates)) + for task, state := range na.TaskStates { + ts[task] = state.Copy() + } + na.TaskStates = ts + return na } // TerminalStatus returns if the desired or actual status is terminal and @@ -1910,6 +2061,21 @@ type AllocMetric struct { CoalescedFailures int } +func (a *AllocMetric) Copy() *AllocMetric { + if a == nil { + return nil + } + na := new(AllocMetric) + *na = *a + na.NodesAvailable = CopyMapStringInt(na.NodesAvailable) + na.ClassFiltered = CopyMapStringInt(na.ClassFiltered) + na.ConstraintFiltered = CopyMapStringInt(na.ConstraintFiltered) + na.ClassExhausted = CopyMapStringInt(na.ClassExhausted) + na.DimensionExhausted = CopyMapStringInt(na.DimensionExhausted) + na.Scores = CopyMapStringFloat64(na.Scores) + return na +} + func (a *AllocMetric) EvaluateNode() { a.NodesEvaluated += 1 } @@ -2074,6 +2240,9 @@ func (e *Evaluation) GoString() string { } func (e *Evaluation) Copy() *Evaluation { + if e == nil { + return nil + } ne := new(Evaluation) *ne = *e return ne diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index dfbca2ad1cc0..a5cce7730c8a 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -163,7 +163,7 @@ func TestJob_Copy(t *testing.T) { c := j.Copy() if !reflect.DeepEqual(j, c) { - t.Fatalf("Copy() returned an unequal Job; got %v; want %v", c, j) + t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j) } }