diff --git a/api/jobs.go b/api/jobs.go index fcd103b52625..25b2d914f04b 100644 --- a/api/jobs.go +++ b/api/jobs.go @@ -168,6 +168,21 @@ func (j *Jobs) Summary(jobID string, q *QueryOptions) (*JobSummary, *QueryMeta, return &resp, qm, nil } +func (j *Jobs) Dispatch(jobID string, meta map[string]string, + payload []byte, q *WriteOptions) (*JobDispatchResponse, *WriteMeta, error) { + var resp JobDispatchResponse + req := &JobDispatchRequest{ + JobID: jobID, + Meta: meta, + Payload: payload, + } + wm, err := j.client.write("/v1/job/"+jobID+"/dispatch", req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + // periodicForceResponse is used to deserialize a force response type periodicForceResponse struct { EvalID string @@ -187,6 +202,13 @@ type PeriodicConfig struct { ProhibitOverlap bool } +// ConstructorConfig is used to configure the constructor job +type ConstructorConfig struct { + Payload string + MetaRequired []string + MetaOptional []string +} + // Job is used to serialize a job. type Job struct { Region string @@ -201,6 +223,8 @@ type Job struct { TaskGroups []*TaskGroup Update *UpdateStrategy Periodic *PeriodicConfig + Constructor *ConstructorConfig + Payload []byte Meta map[string]string VaultToken string Status string @@ -212,14 +236,30 @@ type Job struct { // JobSummary summarizes the state of the allocations of a job type JobSummary struct { - JobID string - Summary map[string]TaskGroupSummary + JobID string + Summary map[string]TaskGroupSummary + Children *JobChildrenSummary // Raft Indexes CreateIndex uint64 ModifyIndex uint64 } +// JobChildrenSummary contains the summary of children job status +type JobChildrenSummary struct { + Pending int64 + Running int64 + Dead int64 +} + +func (jc *JobChildrenSummary) Sum() int { + if jc == nil { + return 0 + } + + return int(jc.Pending + jc.Running + jc.Dead) +} + // TaskGroup summarizes the state of all the allocations of a particular // TaskGroup type TaskGroupSummary struct { @@ -402,3 +442,17 @@ type DesiredUpdates struct { InPlaceUpdate uint64 DestructiveUpdate uint64 } + +type JobDispatchRequest struct { + JobID string + Payload []byte + Meta map[string]string +} + +type JobDispatchResponse struct { + DispatchedJobID string + EvalID string + EvalCreateIndex uint64 + JobCreateIndex uint64 + QueryMeta +} diff --git a/api/tasks.go b/api/tasks.go index 2ca804e6267b..6fb76100082d 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -141,22 +141,28 @@ type LogConfig struct { MaxFileSizeMB int } +// DispatchInputConfig configures how a task gets its input from a job dispatch +type DispatchInputConfig struct { + File string +} + // Task is a single process in a task group. type Task struct { - Name string - Driver string - User string - Config map[string]interface{} - Constraints []*Constraint - Env map[string]string - Services []Service - Resources *Resources - Meta map[string]string - KillTimeout time.Duration - LogConfig *LogConfig - Artifacts []*TaskArtifact - Vault *Vault - Templates []*Template + Name string + Driver string + User string + Config map[string]interface{} + Constraints []*Constraint + Env map[string]string + Services []Service + Resources *Resources + Meta map[string]string + KillTimeout time.Duration + LogConfig *LogConfig + Artifacts []*TaskArtifact + Vault *Vault + Templates []*Template + DispatchInput *DispatchInputConfig } // TaskArtifact is used to download artifacts before running a task. diff --git a/client/allocdir/alloc_dir.go b/client/allocdir/alloc_dir.go index 1f0377714484..82d7c98a793b 100644 --- a/client/allocdir/alloc_dir.go +++ b/client/allocdir/alloc_dir.go @@ -411,7 +411,7 @@ func (d *AllocDir) LogDir() string { // List returns the list of files at a path relative to the alloc dir func (d *AllocDir) List(path string) ([]*AllocFileInfo, error) { - if escapes, err := structs.PathEscapesAllocDir(path); err != nil { + if escapes, err := structs.PathEscapesAllocDir("", path); err != nil { return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") @@ -437,7 +437,7 @@ func (d *AllocDir) List(path string) ([]*AllocFileInfo, error) { // Stat returns information about the file at a path relative to the alloc dir func (d *AllocDir) Stat(path string) (*AllocFileInfo, error) { - if escapes, err := structs.PathEscapesAllocDir(path); err != nil { + if escapes, err := structs.PathEscapesAllocDir("", path); err != nil { return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") @@ -460,7 +460,7 @@ func (d *AllocDir) Stat(path string) (*AllocFileInfo, error) { // ReadAt returns a reader for a file at the path relative to the alloc dir func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) { - if escapes, err := structs.PathEscapesAllocDir(path); err != nil { + if escapes, err := structs.PathEscapesAllocDir("", path); err != nil { return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") @@ -489,7 +489,7 @@ func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) { // BlockUntilExists blocks until the passed file relative the allocation // directory exists. The block can be cancelled with the passed tomb. func (d *AllocDir) BlockUntilExists(path string, t *tomb.Tomb) (chan error, error) { - if escapes, err := structs.PathEscapesAllocDir(path); err != nil { + if escapes, err := structs.PathEscapesAllocDir("", path); err != nil { return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") @@ -510,7 +510,7 @@ func (d *AllocDir) BlockUntilExists(path string, t *tomb.Tomb) (chan error, erro // allocation directory. The offset should be the last read offset. The tomb is // used to clean up the watch. func (d *AllocDir) ChangeEvents(path string, curOffset int64, t *tomb.Tomb) (*watch.FileChanges, error) { - if escapes, err := structs.PathEscapesAllocDir(path); err != nil { + if escapes, err := structs.PathEscapesAllocDir("", path); err != nil { return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") diff --git a/client/driver/driver.go b/client/driver/driver.go index 6e7a9d0103b9..aa8872d2eeea 100644 --- a/client/driver/driver.go +++ b/client/driver/driver.go @@ -150,11 +150,8 @@ func NewExecContext(alloc *allocdir.AllocDir, allocID string) *ExecContext { func GetTaskEnv(allocDir *allocdir.AllocDir, node *structs.Node, task *structs.Task, alloc *structs.Allocation, vaultToken string) (*env.TaskEnvironment, error) { - tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup) env := env.NewTaskEnvironment(node). - SetTaskMeta(task.Meta). - SetTaskGroupMeta(tg.Meta). - SetJobMeta(alloc.Job.Meta). + SetTaskMeta(alloc.Job.CombinedTaskMeta(alloc.TaskGroup, task.Name)). SetJobName(alloc.Job.Name). SetEnvvars(task.Env). SetTaskName(task.Name) diff --git a/client/driver/driver_test.go b/client/driver/driver_test.go index 6ca271301999..740e78e67630 100644 --- a/client/driver/driver_test.go +++ b/client/driver/driver_test.go @@ -118,6 +118,7 @@ func TestDriver_GetTaskEnv(t *testing.T) { } alloc := mock.Alloc() + alloc.Job.TaskGroups[0].Tasks[0] = task alloc.Name = "Bar" env, err := GetTaskEnv(nil, nil, task, alloc, "") if err != nil { diff --git a/client/driver/env/env.go b/client/driver/env/env.go index 2f9fc5c8cf82..5bed18bb1c77 100644 --- a/client/driver/env/env.go +++ b/client/driver/env/env.go @@ -85,8 +85,6 @@ const ( type TaskEnvironment struct { Env map[string]string TaskMeta map[string]string - TaskGroupMeta map[string]string - JobMeta map[string]string AllocDir string TaskDir string SecretsDir string @@ -138,11 +136,9 @@ func (t *TaskEnvironment) Build() *TaskEnvironment { t.NodeValues = make(map[string]string) t.TaskEnv = make(map[string]string) - // Build the meta with the following precedence: task, task group, job. - for _, meta := range []map[string]string{t.JobMeta, t.TaskGroupMeta, t.TaskMeta} { - for k, v := range meta { - t.TaskEnv[fmt.Sprintf("%s%s", MetaPrefix, strings.ToUpper(k))] = v - } + // Build the meta + for k, v := range t.TaskMeta { + t.TaskEnv[fmt.Sprintf("%s%s", MetaPrefix, strings.ToUpper(k))] = v } // Build the ports @@ -336,26 +332,6 @@ func (t *TaskEnvironment) ClearTaskMeta() *TaskEnvironment { return t } -func (t *TaskEnvironment) SetTaskGroupMeta(m map[string]string) *TaskEnvironment { - t.TaskGroupMeta = m - return t -} - -func (t *TaskEnvironment) ClearTaskGroupMeta() *TaskEnvironment { - t.TaskGroupMeta = nil - return t -} - -func (t *TaskEnvironment) SetJobMeta(m map[string]string) *TaskEnvironment { - t.JobMeta = m - return t -} - -func (t *TaskEnvironment) ClearJobMeta() *TaskEnvironment { - t.JobMeta = nil - return t -} - func (t *TaskEnvironment) SetEnvvars(m map[string]string) *TaskEnvironment { t.Env = m return t diff --git a/client/driver/env/env_test.go b/client/driver/env/env_test.go index 88c5986e095f..51307eb5ff16 100644 --- a/client/driver/env/env_test.go +++ b/client/driver/env/env_test.go @@ -140,7 +140,6 @@ func TestEnvironment_AsList(t *testing.T) { env := NewTaskEnvironment(n). SetNetworks(networks). SetPortMap(portMap). - SetTaskGroupMeta(map[string]string{"foo": "bar", "baz": "bam"}). SetTaskMeta(map[string]string{"foo": "baz"}).Build() act := env.EnvList() @@ -154,7 +153,6 @@ func TestEnvironment_AsList(t *testing.T) { "NOMAD_HOST_PORT_http=80", "NOMAD_HOST_PORT_https=8080", "NOMAD_META_FOO=baz", - "NOMAD_META_BAZ=bam", } sort.Strings(act) sort.Strings(exp) @@ -259,23 +257,3 @@ func TestEnvironment_AppendHostEnvVars(t *testing.T) { t.Fatalf("Didn't filter environment variable %q", skip) } } - -func TestEnvironment_MetaPrecedence(t *testing.T) { - n := mock.Node() - env := NewTaskEnvironment(n). - SetJobMeta(map[string]string{"foo": "job", "bar": "job", "baz": "job"}). - SetTaskGroupMeta(map[string]string{"foo": "tg", "bar": "tg"}). - SetTaskMeta(map[string]string{"foo": "task"}).Build() - - act := env.EnvList() - exp := []string{ - "NOMAD_META_FOO=task", - "NOMAD_META_BAR=tg", - "NOMAD_META_BAZ=job", - } - sort.Strings(act) - sort.Strings(exp) - if !reflect.DeepEqual(act, exp) { - t.Fatalf("env.List() returned %v; want %v", act, exp) - } -} diff --git a/client/task_runner.go b/client/task_runner.go index 4c068c570724..78c609d61e56 100644 --- a/client/task_runner.go +++ b/client/task_runner.go @@ -13,8 +13,10 @@ import ( "time" "github.com/armon/go-metrics" + "github.com/golang/snappy" "github.com/hashicorp/consul-template/signals" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/driver" "github.com/hashicorp/nomad/client/getter" @@ -85,6 +87,9 @@ type TaskRunner struct { // downloaded artifactsDownloaded bool + // payloadRendered tracks whether the payload has been rendered to disk + payloadRendered bool + // vaultFuture is the means to wait for and get a Vault token vaultFuture *tokenFuture @@ -129,6 +134,7 @@ type taskRunnerState struct { Task *structs.Task HandleID string ArtifactDownloaded bool + PayloadRendered bool } // TaskStateUpdater is used to signal that tasks state has changed. @@ -231,6 +237,7 @@ func (r *TaskRunner) RestoreState() error { r.task = snap.Task } r.artifactsDownloaded = snap.ArtifactDownloaded + r.payloadRendered = snap.PayloadRendered if err := r.setTaskEnv(); err != nil { return fmt.Errorf("client: failed to create task environment for task %q in allocation %q: %v", @@ -293,6 +300,7 @@ func (r *TaskRunner) SaveState() error { Task: r.task, Version: r.config.Version, ArtifactDownloaded: r.artifactsDownloaded, + PayloadRendered: r.payloadRendered, } r.handleLock.Lock() if r.handle != nil { @@ -704,6 +712,31 @@ func (r *TaskRunner) prestart(resultCh chan bool) { return } + // If the job is a dispatch job and there is a payload write it to disk + requirePayload := len(r.alloc.Job.Payload) != 0 && + (r.task.DispatchInput != nil && r.task.DispatchInput.File != "") + if !r.payloadRendered && requirePayload { + renderTo := filepath.Join(r.taskDir, allocdir.TaskLocal, r.task.DispatchInput.File) + decoded, err := snappy.Decode(nil, r.alloc.Job.Payload) + if err != nil { + r.setState( + structs.TaskStateDead, + structs.NewTaskEvent(structs.TaskSetupFailure).SetSetupError(err).SetFailsTask()) + resultCh <- false + return + } + + if err := ioutil.WriteFile(renderTo, decoded, 0777); err != nil { + r.setState( + structs.TaskStateDead, + structs.NewTaskEvent(structs.TaskSetupFailure).SetSetupError(err).SetFailsTask()) + resultCh <- false + return + } + + r.payloadRendered = true + } + for { // Download the task's artifacts if !r.artifactsDownloaded && len(r.task.Artifacts) > 0 { diff --git a/client/task_runner_test.go b/client/task_runner_test.go index f34c9460e7c9..abed11996a48 100644 --- a/client/task_runner_test.go +++ b/client/task_runner_test.go @@ -8,10 +8,12 @@ import ( "net/http/httptest" "os" "path/filepath" + "reflect" "syscall" "testing" "time" + "github.com/golang/snappy" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/driver" @@ -1244,3 +1246,66 @@ func TestTaskRunner_VaultManager_Signal(t *testing.T) { t.Fatalf("err: %v", err) }) } + +// Test that the payload is written to disk +func TestTaskRunner_SimpleRun_Dispatch(t *testing.T) { + alloc := mock.Alloc() + task := alloc.Job.TaskGroups[0].Tasks[0] + task.Driver = "mock_driver" + task.Config = map[string]interface{}{ + "exit_code": "0", + "run_for": "1s", + } + fileName := "test" + task.DispatchInput = &structs.DispatchInputConfig{ + File: fileName, + } + alloc.Job.Constructor = &structs.ConstructorConfig{} + + // Add an encrypted payload + expected := []byte("hello world") + compressed := snappy.Encode(nil, expected) + alloc.Job.Payload = compressed + + upd, tr := testTaskRunnerFromAlloc(false, alloc) + tr.MarkReceived() + defer tr.Destroy(structs.NewTaskEvent(structs.TaskKilled)) + defer tr.ctx.AllocDir.Destroy() + go tr.Run() + + select { + case <-tr.WaitCh(): + case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second): + t.Fatalf("timeout") + } + + if len(upd.events) != 3 { + t.Fatalf("should have 3 updates: %#v", upd.events) + } + + if upd.state != structs.TaskStateDead { + t.Fatalf("TaskState %v; want %v", upd.state, structs.TaskStateDead) + } + + if upd.events[0].Type != structs.TaskReceived { + t.Fatalf("First Event was %v; want %v", upd.events[0].Type, structs.TaskReceived) + } + + if upd.events[1].Type != structs.TaskStarted { + t.Fatalf("Second Event was %v; want %v", upd.events[1].Type, structs.TaskStarted) + } + + if upd.events[2].Type != structs.TaskTerminated { + t.Fatalf("Third Event was %v; want %v", upd.events[2].Type, structs.TaskTerminated) + } + + // Check that the file was written to disk properly + payloadPath := filepath.Join(tr.taskDir, allocdir.TaskLocal, fileName) + data, err := ioutil.ReadFile(payloadPath) + if err != nil { + t.Fatalf("Failed to read file: %v", err) + } + if !reflect.DeepEqual(data, expected) { + t.Fatalf("Bad; got %v; want %v", string(data), string(expected)) + } +} diff --git a/command/agent/agent_test.go b/command/agent/agent_test.go index 735dfca97e92..26d61e17a89e 100644 --- a/command/agent/agent_test.go +++ b/command/agent/agent_test.go @@ -14,7 +14,7 @@ import ( ) func getPort() int { - addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + addr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0") if err != nil { panic(err) } diff --git a/command/agent/alloc_endpoint.go b/command/agent/alloc_endpoint.go index 19e2f67ed546..963cc47f0f02 100644 --- a/command/agent/alloc_endpoint.go +++ b/command/agent/alloc_endpoint.go @@ -5,6 +5,7 @@ import ( "net/http" "strings" + "github.com/golang/snappy" "github.com/hashicorp/nomad/nomad/structs" ) @@ -57,7 +58,19 @@ func (s *HTTPServer) AllocSpecificRequest(resp http.ResponseWriter, req *http.Re if out.Alloc == nil { return nil, CodedError(404, "alloc not found") } - return out.Alloc, nil + + // Decode the payload if there is any + alloc := out.Alloc + if alloc.Job != nil && len(alloc.Job.Payload) != 0 { + decoded, err := snappy.Decode(nil, alloc.Job.Payload) + if err != nil { + return nil, err + } + alloc = alloc.Copy() + alloc.Job.Payload = decoded + } + + return alloc, nil } func (s *HTTPServer) ClientAllocRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { diff --git a/command/agent/alloc_endpoint_test.go b/command/agent/alloc_endpoint_test.go index f996799b7b76..111548bce8ec 100644 --- a/command/agent/alloc_endpoint_test.go +++ b/command/agent/alloc_endpoint_test.go @@ -3,9 +3,11 @@ package agent import ( "net/http" "net/http/httptest" + "reflect" "strings" "testing" + "github.com/golang/snappy" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" ) @@ -161,6 +163,62 @@ func TestHTTP_AllocQuery(t *testing.T) { }) } +func TestHTTP_AllocQuery_Payload(t *testing.T) { + httpTest(t, nil, func(s *TestServer) { + // Directly manipulate the state + state := s.Agent.server.State() + alloc := mock.Alloc() + if err := state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)); err != nil { + t.Fatal(err) + } + + // Insert Payload compressed + expected := []byte("hello world") + compressed := snappy.Encode(nil, expected) + alloc.Job.Payload = compressed + + err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Make the HTTP request + req, err := http.NewRequest("GET", "/v1/allocation/"+alloc.ID, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + respW := httptest.NewRecorder() + + // Make the request + obj, err := s.Server.AllocSpecificRequest(respW, req) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Check for the index + if respW.HeaderMap.Get("X-Nomad-Index") == "" { + t.Fatalf("missing index") + } + if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { + t.Fatalf("missing known leader") + } + if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { + t.Fatalf("missing last contact") + } + + // Check the job + a := obj.(*structs.Allocation) + if a.ID != alloc.ID { + t.Fatalf("bad: %#v", a) + } + + // Check the payload is decompressed + if !reflect.DeepEqual(a.Job.Payload, expected) { + t.Fatalf("Payload not decompressed properly; got %#v; want %#v", a.Job.Payload, expected) + } + }) +} + func TestHTTP_AllocStats(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Make the HTTP request diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index 6fd0814a5774..8cf33ff2f29c 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -4,6 +4,7 @@ import ( "net/http" "strings" + "github.com/golang/snappy" "github.com/hashicorp/nomad/nomad/structs" ) @@ -57,6 +58,9 @@ func (s *HTTPServer) JobSpecificRequest(resp http.ResponseWriter, req *http.Requ case strings.HasSuffix(path, "/summary"): jobName := strings.TrimSuffix(path, "/summary") return s.jobSummaryRequest(resp, req, jobName) + case strings.HasSuffix(path, "/dispatch"): + jobName := strings.TrimSuffix(path, "/dispatch") + return s.jobDispatchRequest(resp, req, jobName) default: return s.jobCRUD(resp, req, path) } @@ -205,7 +209,19 @@ func (s *HTTPServer) jobQuery(resp http.ResponseWriter, req *http.Request, if out.Job == nil { return nil, CodedError(404, "job not found") } - return out.Job, nil + + // Decode the payload if there is any + job := out.Job + if len(job.Payload) != 0 { + decoded, err := snappy.Decode(nil, out.Job.Payload) + if err != nil { + return nil, err + } + job = job.Copy() + job.Payload = decoded + } + + return job, nil } func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request, @@ -265,3 +281,28 @@ func (s *HTTPServer) jobSummaryRequest(resp http.ResponseWriter, req *http.Reque setIndex(resp, out.Index) return out.JobSummary, nil } + +func (s *HTTPServer) jobDispatchRequest(resp http.ResponseWriter, req *http.Request, name string) (interface{}, error) { + if req.Method != "PUT" && req.Method != "POST" { + return nil, CodedError(405, ErrInvalidMethod) + } + args := structs.JobDispatchRequest{} + if err := decodeBody(req, &args); err != nil { + return nil, CodedError(400, err.Error()) + } + if args.JobID != "" && args.JobID != name { + return nil, CodedError(400, "Job ID does not match") + } + if args.JobID == "" { + args.JobID = name + } + + s.parseRegion(req, &args.Region) + + var out structs.JobDispatchResponse + if err := s.agent.RPC("Job.Dispatch", &args, &out); err != nil { + return nil, err + } + setIndex(resp, out.Index) + return out, nil +} diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go index 1f718cbc3d88..219b879df26a 100644 --- a/command/agent/job_endpoint_test.go +++ b/command/agent/job_endpoint_test.go @@ -3,8 +3,10 @@ package agent import ( "net/http" "net/http/httptest" + "reflect" "testing" + "github.com/golang/snappy" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" ) @@ -205,6 +207,59 @@ func TestHTTP_JobQuery(t *testing.T) { }) } +func TestHTTP_JobQuery_Payload(t *testing.T) { + httpTest(t, nil, func(s *TestServer) { + // Create the job + job := mock.Job() + + // Insert Payload compressed + expected := []byte("hello world") + compressed := snappy.Encode(nil, expected) + job.Payload = compressed + + // Directly manipulate the state + state := s.Agent.server.State() + if err := state.UpsertJob(1000, job); err != nil { + t.Fatalf("Failed to upsert job: %v", err) + } + + // Make the HTTP request + req, err := http.NewRequest("GET", "/v1/job/"+job.ID, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + respW := httptest.NewRecorder() + + // Make the request + obj, err := s.Server.JobSpecificRequest(respW, req) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Check for the index + if respW.HeaderMap.Get("X-Nomad-Index") == "" { + t.Fatalf("missing index") + } + if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { + t.Fatalf("missing known leader") + } + if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { + t.Fatalf("missing last contact") + } + + // Check the job + j := obj.(*structs.Job) + if j.ID != job.ID { + t.Fatalf("bad: %#v", j) + } + + // Check the payload is decompressed + if !reflect.DeepEqual(j.Payload, expected) { + t.Fatalf("Payload not decompressed properly; got %#v; want %#v", j.Payload, expected) + } + }) +} + func TestHTTP_JobUpdate(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job @@ -521,3 +576,51 @@ func TestHTTP_JobPlan(t *testing.T) { } }) } + +func TestHTTP_JobDispatch(t *testing.T) { + httpTest(t, nil, func(s *TestServer) { + // Create the constructor job + job := mock.Job() + job.Type = structs.JobTypeBatch + job.Constructor = &structs.ConstructorConfig{} + + args := structs.JobRegisterRequest{ + Job: job, + WriteRequest: structs.WriteRequest{Region: "global"}, + } + var resp structs.JobRegisterResponse + if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + // Make the request + respW := httptest.NewRecorder() + args2 := structs.JobDispatchRequest{ + WriteRequest: structs.WriteRequest{Region: "global"}, + } + buf := encodeReq(args2) + + // Make the HTTP request + req2, err := http.NewRequest("PUT", "/v1/job/"+job.ID+"/dispatch", buf) + if err != nil { + t.Fatalf("err: %v", err) + } + respW.Flush() + + // Make the request + obj, err := s.Server.JobSpecificRequest(respW, req2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Check the response + dispatch := obj.(structs.JobDispatchResponse) + if dispatch.EvalID == "" { + t.Fatalf("bad: %v", dispatch) + } + + if dispatch.DispatchedJobID == "" { + t.Fatalf("bad: %v", dispatch) + } + }) +} diff --git a/command/job.go b/command/job.go new file mode 100644 index 000000000000..35477146fa6f --- /dev/null +++ b/command/job.go @@ -0,0 +1,19 @@ +package command + +import "github.com/mitchellh/cli" + +type JobCommand struct { + Meta +} + +func (f *JobCommand) Help() string { + return "This command is accessed by using one of the subcommands below." +} + +func (f *JobCommand) Synopsis() string { + return "Interact with jobs" +} + +func (f *JobCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/job_dispatch.go b/command/job_dispatch.go new file mode 100644 index 000000000000..371191f143a6 --- /dev/null +++ b/command/job_dispatch.go @@ -0,0 +1,134 @@ +package command + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + + flaghelper "github.com/hashicorp/nomad/helper/flag-helpers" +) + +type JobDispatchCommand struct { + Meta +} + +func (c *JobDispatchCommand) Help() string { + helpText := ` +Usage: nomad job dispatch [options] [input source] + +Dispatch creates an instance of a constructor job. A data payload to the +dispatched instance can be provided via stdin by using "-" or by specifiying a +path to a file. Metadata can be supplied by using the meta flag one or more +times. + +Upon successfully creation, the dispatched job ID will be printed and the +triggered evaluation will be monitored. This can be disabled by supplying the +detach flag. + +General Options: + + ` + generalOptionsUsage() + ` + +Dispatch Options: + + -detach + Return immediately instead of entering monitor mode. After job dispatch, + the evaluation ID will be printed to the screen, which can be used to + examine the evaluation using the eval-status command. + + -verbose + Display full information. +` + return strings.TrimSpace(helpText) +} + +func (c *JobDispatchCommand) Synopsis() string { + return "Dispatch an instance of a constructor job" +} + +func (c *JobDispatchCommand) Run(args []string) int { + var detach, verbose bool + var meta []string + + flags := c.Meta.FlagSet("job dispatch", FlagSetClient) + flags.Usage = func() { c.Ui.Output(c.Help()) } + flags.BoolVar(&detach, "detach", false, "") + flags.BoolVar(&verbose, "verbose", false, "") + flags.Var((*flaghelper.StringFlag)(&meta), "meta", "") + + if err := flags.Parse(args); err != nil { + return 1 + } + + // Truncate the id unless full length is requested + length := shortId + if verbose { + length = fullId + } + + // Check that we got exactly one node + args = flags.Args() + if l := len(args); l < 1 || l > 2 { + c.Ui.Error(c.Help()) + return 1 + } + + constructor := args[0] + var payload []byte + var readErr error + + // Read the input + if len(args) == 2 { + switch args[1] { + case "-": + payload, readErr = ioutil.ReadAll(os.Stdin) + default: + payload, readErr = ioutil.ReadFile(args[1]) + } + if readErr != nil { + c.Ui.Error(fmt.Sprintf("Error reading input data: %v", readErr)) + return 1 + } + } + + // Build the meta + metaMap := make(map[string]string, len(meta)) + for _, m := range meta { + split := strings.SplitN(m, "=", 2) + if len(split) != 2 { + c.Ui.Error(fmt.Sprintf("Error parsing meta value: %v", m)) + return 1 + } + + metaMap[split[0]] = split[1] + } + + // Get the HTTP client + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + return 1 + } + + // Dispatch the job + resp, _, err := client.Jobs().Dispatch(constructor, metaMap, payload, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to dispatch job: %s", err)) + return 1 + } + + basic := []string{ + fmt.Sprintf("Dispatched Job ID|%s", resp.DispatchedJobID), + fmt.Sprintf("Evaluation ID|%s", limit(resp.EvalID, length)), + } + c.Ui.Output(formatKV(basic)) + + if detach { + return 0 + } + + c.Ui.Output("") + mon := newMonitor(c.Ui, client, length) + return mon.monitor(resp.EvalID, false) +} diff --git a/command/job_dispatch_test.go b/command/job_dispatch_test.go new file mode 100644 index 000000000000..b8e80a6aa30c --- /dev/null +++ b/command/job_dispatch_test.go @@ -0,0 +1,43 @@ +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func TestJobDispatchCommand_Implements(t *testing.T) { + var _ cli.Command = &JobDispatchCommand{} +} + +func TestJobDispatchCommand_Fails(t *testing.T) { + ui := new(cli.MockUi) + cmd := &JobDispatchCommand{Meta: Meta{Ui: ui}} + + // Fails on misuse + if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 { + t.Fatalf("expected exit code 1, got: %d", code) + } + if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) { + t.Fatalf("expected help output, got: %s", out) + } + ui.ErrorWriter.Reset() + + // Fails when specified file does not exist + if code := cmd.Run([]string{"foo", "/unicorns/leprechauns"}); code != 1 { + t.Fatalf("expect exit 1, got: %d", code) + } + if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error reading input data") { + t.Fatalf("expect error reading input data", out) + } + ui.ErrorWriter.Reset() + + if code := cmd.Run([]string{"-address=nope", "foo"}); code != 1 { + t.Fatalf("expected exit code 1, got: %d", code) + } + if out := ui.ErrorWriter.String(); !strings.Contains(out, "Failed to dispatch") { + t.Fatalf("expected failed query error, got: %s", out) + } + ui.ErrorWriter.Reset() +} diff --git a/command/run.go b/command/run.go index e2e4f0f3a68b..9d7d33765dff 100644 --- a/command/run.go +++ b/command/run.go @@ -147,8 +147,9 @@ func (c *RunCommand) Run(args []string) int { return 1 } - // Check if the job is periodic. + // Check if the job is periodic or is a constructor job periodic := job.IsPeriodic() + constructor := job.IsConstructor() // Parse the Vault token if vaultToken == "" { @@ -239,14 +240,14 @@ func (c *RunCommand) Run(args []string) int { } // Check if we should enter monitor mode - if detach || periodic { + if detach || periodic || constructor { c.Ui.Output("Job registration successful") if periodic { now := time.Now().UTC() next := job.Periodic.Next(now) c.Ui.Output(fmt.Sprintf("Approximate next launch time: %s (%s from now)", formatTime(next), formatTimeDifference(now, next, time.Second))) - } else { + } else if !constructor { c.Ui.Output("Evaluation ID: " + evalID) } diff --git a/command/status.go b/command/status.go index 8b94997a5859..72fe5ec7b491 100644 --- a/command/status.go +++ b/command/status.go @@ -127,13 +127,14 @@ func (c *StatusCommand) Run(args []string) int { return 1 } - // Check if it is periodic + // Check if it is periodic or a constructor job sJob, err := convertApiJob(job) if err != nil { c.Ui.Error(fmt.Sprintf("Error converting job: %s", err)) return 1 } periodic := sJob.IsPeriodic() + constructor := sJob.IsConstructor() // Format the job info basic := []string{ @@ -144,6 +145,7 @@ func (c *StatusCommand) Run(args []string) int { fmt.Sprintf("Datacenters|%s", strings.Join(job.Datacenters, ",")), fmt.Sprintf("Status|%s", job.Status), fmt.Sprintf("Periodic|%v", periodic), + fmt.Sprintf("Constructor|%v", constructor), } if periodic { @@ -167,13 +169,16 @@ func (c *StatusCommand) Run(args []string) int { c.Ui.Error(err.Error()) return 1 } - - return 0 - } - - if err := c.outputJobInfo(client, job); err != nil { - c.Ui.Error(err.Error()) - return 1 + } else if constructor { + if err := c.outputConstructorInfo(client, job); err != nil { + c.Ui.Error(err.Error()) + return 1 + } + } else { + if err := c.outputJobInfo(client, job); err != nil { + c.Ui.Error(err.Error()) + return 1 + } } return 0 @@ -182,6 +187,11 @@ func (c *StatusCommand) Run(args []string) int { // outputPeriodicInfo prints information about the passed periodic job. If a // request fails, an error is returned. func (c *StatusCommand) outputPeriodicInfo(client *api.Client, job *api.Job) error { + // Output the summary + if err := c.outputJobSummary(client, job); err != nil { + return err + } + // Generate the prefix that matches launched jobs from the periodic job. prefix := fmt.Sprintf("%s%s", job.ID, structs.PeriodicLaunchSuffix) children, _, err := client.Jobs().PrefixList(prefix) @@ -208,7 +218,55 @@ func (c *StatusCommand) outputPeriodicInfo(client *api.Client, job *api.Job) err child.Status)) } - c.Ui.Output(fmt.Sprintf("\nPreviously launched jobs:\n%s", formatList(out))) + c.Ui.Output(c.Colorize().Color("\n[bold]Previously Launched Jobs[reset]")) + c.Ui.Output(formatList(out)) + return nil +} + +// outputConstructorInfo prints information about the passed constructor job. If a +// request fails, an error is returned. +func (c *StatusCommand) outputConstructorInfo(client *api.Client, job *api.Job) error { + // Output constructor details + c.Ui.Output(c.Colorize().Color("\n[bold]Constructor[reset]")) + constructor := make([]string, 3) + constructor[0] = fmt.Sprintf("Payload|%s", job.Constructor.Payload) + constructor[1] = fmt.Sprintf("Required Metadata|%v", strings.Join(job.Constructor.MetaRequired, ", ")) + constructor[2] = fmt.Sprintf("Optional Metadata|%v", strings.Join(job.Constructor.MetaOptional, ", ")) + c.Ui.Output(formatKV(constructor)) + + // Output the summary + if err := c.outputJobSummary(client, job); err != nil { + return err + } + + // Generate the prefix that matches launched jobs from the periodic job. + prefix := fmt.Sprintf("%s%s", job.ID, structs.DispatchLaunchSuffic) + children, _, err := client.Jobs().PrefixList(prefix) + if err != nil { + return fmt.Errorf("Error querying job: %s", err) + } + + if len(children) == 0 { + c.Ui.Output("\nNo dispatched instances of constructor job found") + return nil + } + + out := make([]string, 1) + out[0] = "ID|Status" + for _, child := range children { + // Ensure that we are only showing jobs whose parent is the requested + // job. + if child.ParentID != job.ID { + continue + } + + out = append(out, fmt.Sprintf("%s|%s", + child.ID, + child.Status)) + } + + c.Ui.Output(c.Colorize().Color("\n[bold]Dispatched Jobs[reset]")) + c.Ui.Output(formatList(out)) return nil } @@ -229,31 +287,9 @@ func (c *StatusCommand) outputJobInfo(client *api.Client, job *api.Job) error { return fmt.Errorf("Error querying job evaluations: %s", err) } - // Query the summary - summary, _, err := client.Jobs().Summary(job.ID, nil) - if err != nil { - return fmt.Errorf("Error querying job summary: %s", err) - } - - // Format the summary - c.Ui.Output(c.Colorize().Color("\n[bold]Summary[reset]")) - if summary != nil { - summaries := make([]string, len(summary.Summary)+1) - summaries[0] = "Task Group|Queued|Starting|Running|Failed|Complete|Lost" - taskGroups := make([]string, 0, len(summary.Summary)) - for taskGroup := range summary.Summary { - taskGroups = append(taskGroups, taskGroup) - } - sort.Strings(taskGroups) - for idx, taskGroup := range taskGroups { - tgs := summary.Summary[taskGroup] - summaries[idx+1] = fmt.Sprintf("%s|%d|%d|%d|%d|%d|%d", - taskGroup, tgs.Queued, tgs.Starting, - tgs.Running, tgs.Failed, - tgs.Complete, tgs.Lost, - ) - } - c.Ui.Output(formatList(summaries)) + // Output the summary + if err := c.outputJobSummary(client, job); err != nil { + return err } // Determine latest evaluation with failures whose follow up hasn't @@ -320,6 +356,66 @@ func (c *StatusCommand) outputJobInfo(client *api.Client, job *api.Job) error { return nil } +// outputJobSummary displays the given jobs summary and children job summary +// where appropriate +func (c *StatusCommand) outputJobSummary(client *api.Client, job *api.Job) error { + // Query the summary + summary, _, err := client.Jobs().Summary(job.ID, nil) + if err != nil { + return fmt.Errorf("Error querying job summary: %s", err) + } + + if summary == nil { + return nil + } + + sJob, err := convertApiJob(job) + if err != nil { + return fmt.Errorf("Error converting job: %s", err) + } + + periodic := sJob.IsPeriodic() + constructor := sJob.IsConstructor() + + // Print the summary + if !periodic && !constructor { + c.Ui.Output(c.Colorize().Color("\n[bold]Summary[reset]")) + summaries := make([]string, len(summary.Summary)+1) + summaries[0] = "Task Group|Queued|Starting|Running|Failed|Complete|Lost" + taskGroups := make([]string, 0, len(summary.Summary)) + for taskGroup := range summary.Summary { + taskGroups = append(taskGroups, taskGroup) + } + sort.Strings(taskGroups) + for idx, taskGroup := range taskGroups { + tgs := summary.Summary[taskGroup] + summaries[idx+1] = fmt.Sprintf("%s|%d|%d|%d|%d|%d|%d", + taskGroup, tgs.Queued, tgs.Starting, + tgs.Running, tgs.Failed, + tgs.Complete, tgs.Lost, + ) + } + c.Ui.Output(formatList(summaries)) + } + + // Always display the summary if we are periodic or a constructor job + // but only display if the summary is non-zero on normal jobs + if summary.Children != nil && (constructor || periodic || summary.Children.Sum() > 0) { + if constructor { + c.Ui.Output(c.Colorize().Color("\n[bold]Dispatched Job Summary[reset]")) + } else { + c.Ui.Output(c.Colorize().Color("\n[bold]Children Job Summary[reset]")) + } + summaries := make([]string, 2) + summaries[0] = "Pending|Running|Dead" + summaries[1] = fmt.Sprintf("%d|%d|%d", + summary.Children.Pending, summary.Children.Running, summary.Children.Dead) + c.Ui.Output(formatList(summaries)) + } + + return nil +} + func (c *StatusCommand) outputFailedPlacements(failedEval *api.Evaluation) { if failedEval == nil || len(failedEval.FailedTGAllocs) == 0 { return diff --git a/commands.go b/commands.go index 36f8e92c5f7c..08bad16eaeef 100644 --- a/commands.go +++ b/commands.go @@ -89,6 +89,16 @@ func Commands(metaPtr *command.Meta) map[string]cli.CommandFactory { Meta: meta, }, nil }, + "job": func() (cli.Command, error) { + return &command.JobCommand{ + Meta: meta, + }, nil + }, + "job dispatch": func() (cli.Command, error) { + return &command.JobDispatchCommand{ + Meta: meta, + }, nil + }, "logs": func() (cli.Command, error) { return &command.LogsCommand{ Meta: meta, diff --git a/jobspec/parse.go b/jobspec/parse.go index f9defa7cf437..0ac0ae0b40a6 100644 --- a/jobspec/parse.go +++ b/jobspec/parse.go @@ -102,6 +102,7 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error { delete(m, "update") delete(m, "periodic") delete(m, "vault") + delete(m, "constructor") // Set the ID and name to the object key result.ID = obj.Keys[0].Token.Value().(string) @@ -127,19 +128,20 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error { // Check for invalid keys valid := []string{ + "all_at_once", + "constraint", + "datacenters", + "constructor", + "group", "id", + "meta", "name", + "periodic", + "priority", "region", - "all_at_once", + "task", "type", - "priority", - "datacenters", - "constraint", "update", - "periodic", - "meta", - "task", - "group", "vault", "vault_token", } @@ -168,6 +170,13 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error { } } + // If we have a constructor definition, then parse that + if o := listVal.Filter("constructor"); len(o.Items) > 0 { + if err := parseConstructor(&result.Constructor, o); err != nil { + return multierror.Prefix(err, "constructor ->") + } + } + // Parse out meta fields. These are in HCL as a list so we need // to iterate over them and merge them. if metaO := listVal.Filter("meta"); len(metaO.Items) > 0 { @@ -552,6 +561,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l "artifact", "config", "constraint", + "dispatch_input", "driver", "env", "kill_timeout", @@ -574,6 +584,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l delete(m, "artifact") delete(m, "config") delete(m, "constraint") + delete(m, "dispatch_input") delete(m, "env") delete(m, "logs") delete(m, "meta") @@ -733,6 +744,32 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l t.Vault = v } + // If we have a dispatch_input block parse that + if o := listVal.Filter("dispatch_input"); len(o.Items) > 0 { + if len(o.Items) > 1 { + return fmt.Errorf("only one dispatch_input block is allowed in a task. Number of dispatch_input blocks found: %d", len(o.Items)) + } + var m map[string]interface{} + dispatchBlock := o.Items[0] + + // Check for invalid keys + valid := []string{ + "file", + } + if err := checkHCLKeys(dispatchBlock.Val, valid); err != nil { + return multierror.Prefix(err, fmt.Sprintf("'%s', dispatch_input ->", n)) + } + + if err := hcl.DecodeObject(&m, dispatchBlock.Val); err != nil { + return err + } + + t.DispatchInput = &structs.DispatchInputConfig{} + if err := mapstructure.WeakDecode(m, t.DispatchInput); err != nil { + return err + } + } + *result = append(*result, &t) } @@ -1205,6 +1242,72 @@ func parseVault(result *structs.Vault, list *ast.ObjectList) error { return nil } +func parseConstructor(result **structs.ConstructorConfig, list *ast.ObjectList) error { + list = list.Elem() + if len(list.Items) > 1 { + return fmt.Errorf("only one 'constructor' block allowed per job") + } + + // Get our resource object + o := list.Items[0] + + var m map[string]interface{} + if err := hcl.DecodeObject(&m, o.Val); err != nil { + return err + } + + delete(m, "meta") + + // Check for invalid keys + valid := []string{ + "payload", + "meta_keys", + } + if err := checkHCLKeys(o.Val, valid); err != nil { + return err + } + + // Build the constructor block + var d structs.ConstructorConfig + if err := mapstructure.WeakDecode(m, &d); err != nil { + return err + } + + var listVal *ast.ObjectList + if ot, ok := o.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return fmt.Errorf("constructor block should be an object") + } + + // Parse the meta block + if metaList := listVal.Filter("meta_keys"); len(metaList.Items) > 0 { + // Get our resource object + o := metaList.Items[0] + + var m map[string]interface{} + if err := hcl.DecodeObject(&m, o.Val); err != nil { + return err + } + + // Check for invalid keys + valid := []string{ + "optional", + "required", + } + if err := checkHCLKeys(o.Val, valid); err != nil { + return err + } + + if err := mapstructure.WeakDecode(m, &d); err != nil { + return err + } + } + + *result = &d + return nil +} + func checkHCLKeys(node ast.Node, valid []string) error { var list *ast.ObjectList switch n := node.(type) { diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index 1e2a282a0f97..192203697615 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -537,6 +537,50 @@ func TestParse(t *testing.T) { }, false, }, + + { + "constructor.hcl", + &structs.Job{ + ID: "constructor", + Name: "constructor", + Type: "service", + Priority: 50, + Region: "global", + + Constructor: &structs.ConstructorConfig{ + Payload: "required", + MetaRequired: []string{"foo", "bar"}, + MetaOptional: []string{"baz", "bam"}, + }, + + TaskGroups: []*structs.TaskGroup{ + &structs.TaskGroup{ + Name: "foo", + Count: 1, + EphemeralDisk: structs.DefaultEphemeralDisk(), + Tasks: []*structs.Task{ + &structs.Task{ + Name: "bar", + Driver: "docker", + Resources: &structs.Resources{ + CPU: 100, + MemoryMB: 10, + IOPS: 0, + }, + LogConfig: &structs.LogConfig{ + MaxFiles: 10, + MaxFileSizeMB: 10, + }, + DispatchInput: &structs.DispatchInputConfig{ + File: "foo/bar", + }, + }, + }, + }, + }, + }, + false, + }, } for _, tc := range cases { diff --git a/jobspec/test-fixtures/constructor.hcl b/jobspec/test-fixtures/constructor.hcl new file mode 100644 index 000000000000..1bb4fcf04506 --- /dev/null +++ b/jobspec/test-fixtures/constructor.hcl @@ -0,0 +1,19 @@ +job "constructor" { + constructor { + payload = "required" + meta_keys { + required = ["foo", "bar"] + optional = ["baz", "bam"] + } + } + group "foo" { + task "bar" { + driver = "docker" + resources {} + + dispatch_input { + file = "foo/bar" + } + } + } +} diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index 2e28341d6972..3db0489d595b 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -7,6 +7,7 @@ import ( "time" "github.com/armon/go-metrics" + "github.com/golang/snappy" "github.com/hashicorp/consul/lib" "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-multierror" @@ -20,6 +21,10 @@ const ( // RegisterEnforceIndexErrPrefix is the prefix to use in errors caused by // enforcing the job modify index during registers. RegisterEnforceIndexErrPrefix = "Enforcing job modify index" + + // DispatchPayloadSizeLimit is the maximum size of the uncompressed input + // data payload. + DispatchPayloadSizeLimit = 16 * 1024 ) var ( @@ -133,8 +138,8 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis // Populate the reply with job information reply.JobModifyIndex = index - // If the job is periodic, we don't create an eval. - if args.Job.IsPeriodic() { + // If the job is periodic or a constructor, we don't create an eval. + if args.Job.IsPeriodic() || args.Job.IsConstructor() { return nil } @@ -311,6 +316,8 @@ func (j *Job) Evaluate(args *structs.JobEvaluateRequest, reply *structs.JobRegis if job.IsPeriodic() { return fmt.Errorf("can't evaluate periodic job") + } else if job.IsConstructor() { + return fmt.Errorf("can't evaluate constructor job") } // Create a new evaluation @@ -375,8 +382,8 @@ func (j *Job) Deregister(args *structs.JobDeregisterRequest, reply *structs.JobD // Populate the reply with job information reply.JobModifyIndex = index - // If the job is periodic, we don't create an eval. - if job != nil && job.IsPeriodic() { + // If the job is periodic or a construcotr, we don't create an eval. + if job != nil && (job.IsPeriodic() || job.IsConstructor()) { return nil } @@ -766,5 +773,169 @@ func validateJob(job *structs.Job) error { multierror.Append(validationErrors, fmt.Errorf("job type cannot be core")) } + if len(job.Payload) != 0 { + multierror.Append(validationErrors, fmt.Errorf("job can't be submitted with a payload, only dispatched")) + } + return validationErrors.ErrorOrNil() } + +// Dispatch is used to dispatch a job based on a constructor job. +func (j *Job) Dispatch(args *structs.JobDispatchRequest, reply *structs.JobDispatchResponse) error { + if done, err := j.srv.forward("Job.Dispatch", args, args, reply); done { + return err + } + defer metrics.MeasureSince([]string{"nomad", "job", "dispatch"}, time.Now()) + + // Lookup the job + if args.JobID == "" { + return fmt.Errorf("missing constructor job ID") + } + + snap, err := j.srv.fsm.State().Snapshot() + if err != nil { + return err + } + constructor, err := snap.JobByID(args.JobID) + if err != nil { + return err + } + if constructor == nil { + return fmt.Errorf("constructor job not found") + } + + if !constructor.IsConstructor() { + return fmt.Errorf("Specified job %q is not a constructor job", args.JobID) + } + + // Validate the arguments + if err := validateDispatchRequest(args, constructor); err != nil { + return err + } + + // Derive the child job and commit it via Raft + dispatchJob := constructor.Copy() + dispatchJob.Constructor = nil + dispatchJob.ID = structs.DispatchedID(constructor.ID, time.Now()) + dispatchJob.ParentID = constructor.ID + dispatchJob.Name = dispatchJob.ID + + // Merge in the meta data + for k, v := range args.Meta { + if dispatchJob.Meta == nil { + dispatchJob.Meta = make(map[string]string, len(args.Meta)) + } + dispatchJob.Meta[k] = v + } + + // Compress the payload + dispatchJob.Payload = snappy.Encode(nil, args.Payload) + + regReq := &structs.JobRegisterRequest{ + Job: dispatchJob, + WriteRequest: args.WriteRequest, + } + + // Commit this update via Raft + _, jobCreateIndex, err := j.srv.raftApply(structs.JobRegisterRequestType, regReq) + if err != nil { + j.srv.logger.Printf("[ERR] nomad.job: Dispatched job register failed: %v", err) + return err + } + + // Create a new evaluation + eval := &structs.Evaluation{ + ID: structs.GenerateUUID(), + Priority: dispatchJob.Priority, + Type: dispatchJob.Type, + TriggeredBy: structs.EvalTriggerJobRegister, + JobID: dispatchJob.ID, + JobModifyIndex: jobCreateIndex, + Status: structs.EvalStatusPending, + } + update := &structs.EvalUpdateRequest{ + Evals: []*structs.Evaluation{eval}, + WriteRequest: structs.WriteRequest{Region: args.Region}, + } + + // Commit this evaluation via Raft + _, evalIndex, err := j.srv.raftApply(structs.EvalUpdateRequestType, update) + if err != nil { + j.srv.logger.Printf("[ERR] nomad.job: Eval create failed: %v", err) + return err + } + + // Setup the reply + reply.EvalID = eval.ID + reply.EvalCreateIndex = evalIndex + reply.JobCreateIndex = jobCreateIndex + reply.DispatchedJobID = dispatchJob.ID + reply.Index = evalIndex + return nil +} + +// validateDispatchRequest returns whether the request is valid given the +// jobs constructor +func validateDispatchRequest(req *structs.JobDispatchRequest, job *structs.Job) error { + // Check the payload constraint is met + hasInputData := len(req.Payload) != 0 + if job.Constructor.Payload == structs.DispatchPayloadRequired && !hasInputData { + return fmt.Errorf("Payload is not provided but required by constructor") + } else if job.Constructor.Payload == structs.DispatchPayloadForbidden && hasInputData { + return fmt.Errorf("Payload provided but forbidden by constructor") + } + + // Check the payload doesn't exceed the size limit + if l := len(req.Payload); l > DispatchPayloadSizeLimit { + return fmt.Errorf("Payload exceeds maximum size; %d > %d", l, DispatchPayloadSizeLimit) + } + + // Check if the metadata is a set + keys := make(map[string]struct{}, len(req.Meta)) + for k := range keys { + if _, ok := keys[k]; ok { + return fmt.Errorf("Duplicate key %q in passed metadata", k) + } + keys[k] = struct{}{} + } + + required := structs.SliceStringToSet(job.Constructor.MetaRequired) + optional := structs.SliceStringToSet(job.Constructor.MetaOptional) + + // Check the metadata key constraints are met + unpermitted := make(map[string]struct{}) + for k := range req.Meta { + _, req := required[k] + _, opt := optional[k] + if !req && !opt { + unpermitted[k] = struct{}{} + } + } + + if len(unpermitted) != 0 { + flat := make([]string, 0, len(unpermitted)) + for k := range unpermitted { + flat = append(flat, k) + } + + return fmt.Errorf("Dispatch request included unpermitted metadata keys: %v", flat) + } + + missing := make(map[string]struct{}) + for _, k := range job.Constructor.MetaRequired { + if _, ok := req.Meta[k]; !ok { + missing[k] = struct{}{} + } + } + + if len(missing) != 0 { + flat := make([]string, 0, len(missing)) + for k := range missing { + flat = append(flat, k) + } + + return fmt.Errorf("Dispatch did not provide required meta keys: %v", flat) + } + + return nil +} diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 3405fa101001..ed8f1443a351 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -116,6 +116,35 @@ func TestJobEndpoint_Register_InvalidDriverConfig(t *testing.T) { } } +func TestJobEndpoint_Register_Payload(t *testing.T) { + s1 := testServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer s1.Shutdown() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + // Create the register request with a job containing an invalid driver + // config + job := mock.Job() + job.Payload = []byte{0x1} + req := &structs.JobRegisterRequest{ + Job: job, + WriteRequest: structs.WriteRequest{Region: "global"}, + } + + // Fetch the response + var resp structs.JobRegisterResponse + err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp) + if err == nil { + t.Fatalf("expected a validation error") + } + + if !strings.Contains(err.Error(), "payload") { + t.Fatalf("expected a payload error but got: %v", err) + } +} + func TestJobEndpoint_Register_Existing(t *testing.T) { s1 := testServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -249,6 +278,49 @@ func TestJobEndpoint_Register_Periodic(t *testing.T) { } } +func TestJobEndpoint_Register_Constructor(t *testing.T) { + s1 := testServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer s1.Shutdown() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + // Create the register request for a constructor job. + job := mock.Job() + job.Type = structs.JobTypeBatch + job.Constructor = &structs.ConstructorConfig{} + req := &structs.JobRegisterRequest{ + Job: job, + WriteRequest: structs.WriteRequest{Region: "global"}, + } + + // Fetch the response + var resp structs.JobRegisterResponse + if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + if resp.JobModifyIndex == 0 { + t.Fatalf("bad index: %d", resp.Index) + } + + // Check for the job in the FSM + state := s1.fsm.State() + out, err := state.JobByID(job.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("expected job") + } + if out.CreateIndex != resp.JobModifyIndex { + t.Fatalf("index mis-match") + } + if resp.EvalID != "" { + t.Fatalf("Register created an eval for a constructor job") + } +} + func TestJobEndpoint_Register_EnforceIndex(t *testing.T) { s1 := testServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -714,6 +786,44 @@ func TestJobEndpoint_Evaluate_Periodic(t *testing.T) { } } +func TestJobEndpoint_Evaluate_Constructor(t *testing.T) { + s1 := testServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer s1.Shutdown() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + // Create the register request + job := mock.Job() + job.Type = structs.JobTypeBatch + job.Constructor = &structs.ConstructorConfig{} + req := &structs.JobRegisterRequest{ + Job: job, + WriteRequest: structs.WriteRequest{Region: "global"}, + } + + // Fetch the response + var resp structs.JobRegisterResponse + if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + if resp.JobModifyIndex == 0 { + t.Fatalf("bad index: %d", resp.Index) + } + + // Force a re-evaluation + reEval := &structs.JobEvaluateRequest{ + JobID: job.ID, + WriteRequest: structs.WriteRequest{Region: "global"}, + } + + // Fetch the response + if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &resp); err == nil { + t.Fatal("expect an err") + } +} + func TestJobEndpoint_Deregister(t *testing.T) { s1 := testServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -894,6 +1004,57 @@ func TestJobEndpoint_Deregister_Periodic(t *testing.T) { } } +func TestJobEndpoint_Deregister_Constructor(t *testing.T) { + s1 := testServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer s1.Shutdown() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + // Create the register request + job := mock.Job() + job.Type = structs.JobTypeBatch + job.Constructor = &structs.ConstructorConfig{} + reg := &structs.JobRegisterRequest{ + Job: job, + WriteRequest: structs.WriteRequest{Region: "global"}, + } + + // Fetch the response + var resp structs.JobRegisterResponse + if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + // Deregister + dereg := &structs.JobDeregisterRequest{ + JobID: job.ID, + WriteRequest: structs.WriteRequest{Region: "global"}, + } + var resp2 structs.JobDeregisterResponse + if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil { + t.Fatalf("err: %v", err) + } + if resp2.JobModifyIndex == 0 { + t.Fatalf("bad index: %d", resp2.Index) + } + + // Check for the node in the FSM + state := s1.fsm.State() + out, err := state.JobByID(job.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if out != nil { + t.Fatalf("unexpected job") + } + + if resp.EvalID != "" { + t.Fatalf("Deregister created an eval for a constructor job") + } +} + func TestJobEndpoint_GetJob(t *testing.T) { s1 := testServer(t, nil) defer s1.Shutdown() @@ -1003,6 +1164,7 @@ func TestJobEndpoint_GetJobSummary(t *testing.T) { Summary: map[string]structs.TaskGroupSummary{ "web": structs.TaskGroupSummary{}, }, + Children: new(structs.JobChildrenSummary), CreateIndex: job.CreateIndex, ModifyIndex: job.CreateIndex, } @@ -1725,3 +1887,229 @@ func TestJobEndpoint_ValidateJob_InvalidSignals(t *testing.T) { t.Fatalf("Expected signal feasibility error; got %v", err) } } + +func TestJobEndpoint_Dispatch(t *testing.T) { + + // No requirements + d1 := mock.Job() + d1.Type = structs.JobTypeBatch + d1.Constructor = &structs.ConstructorConfig{} + + // Require input data + d2 := mock.Job() + d2.Type = structs.JobTypeBatch + d2.Constructor = &structs.ConstructorConfig{ + Payload: structs.DispatchPayloadRequired, + } + + // Disallow input data + d3 := mock.Job() + d3.Type = structs.JobTypeBatch + d3.Constructor = &structs.ConstructorConfig{ + Payload: structs.DispatchPayloadForbidden, + } + + // Require meta + d4 := mock.Job() + d4.Type = structs.JobTypeBatch + d4.Constructor = &structs.ConstructorConfig{ + MetaRequired: []string{"foo", "bar"}, + } + + // Optional meta + d5 := mock.Job() + d5.Type = structs.JobTypeBatch + d5.Constructor = &structs.ConstructorConfig{ + MetaOptional: []string{"foo", "bar"}, + } + + reqNoInputNoMeta := &structs.JobDispatchRequest{} + reqInputDataNoMeta := &structs.JobDispatchRequest{ + Payload: []byte("hello world"), + } + reqNoInputDataMeta := &structs.JobDispatchRequest{ + Meta: map[string]string{ + "foo": "f1", + "bar": "f2", + }, + } + reqInputDataMeta := &structs.JobDispatchRequest{ + Payload: []byte("hello world"), + Meta: map[string]string{ + "foo": "f1", + "bar": "f2", + }, + } + reqBadMeta := &structs.JobDispatchRequest{ + Payload: []byte("hello world"), + Meta: map[string]string{ + "foo": "f1", + "bar": "f2", + "baz": "f3", + }, + } + reqInputDataTooLarge := &structs.JobDispatchRequest{ + Payload: make([]byte, DispatchPayloadSizeLimit+100), + } + + type testCase struct { + name string + constructor *structs.Job + dispatchReq *structs.JobDispatchRequest + err bool + errStr string + } + cases := []testCase{ + { + name: "optional input data w/ data", + constructor: d1, + dispatchReq: reqInputDataNoMeta, + err: false, + }, + { + name: "optional input data w/o data", + constructor: d1, + dispatchReq: reqNoInputNoMeta, + err: false, + }, + { + name: "require input data w/ data", + constructor: d2, + dispatchReq: reqInputDataNoMeta, + err: false, + }, + { + name: "require input data w/o data", + constructor: d2, + dispatchReq: reqNoInputNoMeta, + err: true, + errStr: "not provided but required", + }, + { + name: "disallow input data w/o data", + constructor: d3, + dispatchReq: reqNoInputNoMeta, + err: false, + }, + { + name: "disallow input data w/ data", + constructor: d3, + dispatchReq: reqInputDataNoMeta, + err: true, + errStr: "provided but forbidden", + }, + { + name: "require meta w/ meta", + constructor: d4, + dispatchReq: reqInputDataMeta, + err: false, + }, + { + name: "require meta w/o meta", + constructor: d4, + dispatchReq: reqNoInputNoMeta, + err: true, + errStr: "did not provide required meta keys", + }, + { + name: "optional meta w/ meta", + constructor: d5, + dispatchReq: reqNoInputDataMeta, + err: false, + }, + { + name: "optional meta w/o meta", + constructor: d5, + dispatchReq: reqNoInputNoMeta, + err: false, + }, + { + name: "optional meta w/ bad meta", + constructor: d5, + dispatchReq: reqBadMeta, + err: true, + errStr: "unpermitted metadata keys", + }, + { + name: "optional input w/ too big of input", + constructor: d1, + dispatchReq: reqInputDataTooLarge, + err: true, + errStr: "Payload exceeds maximum size", + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + s1 := testServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer s1.Shutdown() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + // Create the register request + regReq := &structs.JobRegisterRequest{ + Job: tc.constructor, + WriteRequest: structs.WriteRequest{Region: "global"}, + } + + // Fetch the response + var regResp structs.JobRegisterResponse + if err := msgpackrpc.CallWithCodec(codec, "Job.Register", regReq, ®Resp); err != nil { + t.Fatalf("err: %v", err) + } + + // Now try to dispatch + tc.dispatchReq.JobID = tc.constructor.ID + tc.dispatchReq.WriteRequest = structs.WriteRequest{Region: "global"} + + var dispatchResp structs.JobDispatchResponse + dispatchErr := msgpackrpc.CallWithCodec(codec, "Job.Dispatch", tc.dispatchReq, &dispatchResp) + + if dispatchErr == nil { + if tc.err { + t.Fatalf("Expected error: %v", dispatchErr) + } + + // Check that we got an eval and job id back + if dispatchResp.EvalID == "" || dispatchResp.DispatchedJobID == "" { + t.Fatalf("Bad response") + } + + state := s1.fsm.State() + out, err := state.JobByID(dispatchResp.DispatchedJobID) + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("expected job") + } + if out.CreateIndex != dispatchResp.JobCreateIndex { + t.Fatalf("index mis-match") + } + if out.ParentID != tc.constructor.ID { + t.Fatalf("bad parent ID") + } + + // Lookup the evaluation + eval, err := state.EvalByID(dispatchResp.EvalID) + if err != nil { + t.Fatalf("err: %v", err) + } + if eval == nil { + t.Fatalf("expected eval") + } + if eval.CreateIndex != dispatchResp.EvalCreateIndex { + t.Fatalf("index mis-match") + } + } else { + if !tc.err { + t.Fatalf("Got unexpected error: %v", dispatchErr) + } else if !strings.Contains(dispatchErr.Error(), tc.errStr) { + t.Fatalf("Expected err to include %q; got %v", tc.errStr, dispatchErr) + } + } + }) + } +} diff --git a/nomad/mock/mock.go b/nomad/mock/mock.go index dd64cd2f0b24..8214c8ca9bcc 100644 --- a/nomad/mock/mock.go +++ b/nomad/mock/mock.go @@ -211,6 +211,7 @@ func SystemJob() *structs.Job { CreateIndex: 42, ModifyIndex: 99, } + job.Canonicalize() return job } @@ -222,6 +223,7 @@ func PeriodicJob() *structs.Job { SpecType: structs.PeriodicSpecCron, Spec: "*/30 * * * *", } + job.Status = structs.JobStatusRunning return job } diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index d1882824b171..be114acab4fb 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -731,6 +731,7 @@ func TestClientEndpoint_Drain_Down(t *testing.T) { Lost: 1, }, }, + Children: new(structs.JobChildrenSummary), CreateIndex: jobResp.JobModifyIndex, ModifyIndex: summary.ModifyIndex, } @@ -749,6 +750,7 @@ func TestClientEndpoint_Drain_Down(t *testing.T) { Lost: 1, }, }, + Children: new(structs.JobChildrenSummary), CreateIndex: jobResp1.JobModifyIndex, ModifyIndex: summary1.ModifyIndex, } diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 9b9b1da17bde..28a0a16bcfc9 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -344,12 +344,17 @@ func (s *StateStore) UpsertJob(index uint64, job *structs.Job) error { job.ModifyIndex = index job.JobModifyIndex = index - // If we are inserting the job for the first time, we don't need to - // calculate the jobs status as it is known. - if job.IsPeriodic() { - job.Status = structs.JobStatusRunning - } else { - job.Status = structs.JobStatusPending + if err := s.setJobStatus(index, watcher, txn, job, false, ""); err != nil { + return fmt.Errorf("setting job status for %q failed: %v", job.ID, err) + } + + // Have to get the job again since it could have been updated + updated, err := txn.First("jobs", "id", job.ID) + if err != nil { + return fmt.Errorf("job lookup failed: %v", err) + } + if updated != nil { + job = updated.(*structs.Job) } } @@ -394,7 +399,48 @@ func (s *StateStore) DeleteJob(index uint64, jobID string) error { watcher.Add(watch.Item{Table: "job_summary"}) watcher.Add(watch.Item{JobSummary: jobID}) - // Delete the node + // Check if we should update a parent job summary + job := existing.(*structs.Job) + if job.ParentID != "" { + summaryRaw, err := txn.First("job_summary", "id", job.ParentID) + if err != nil { + return fmt.Errorf("unable to retrieve summary for parent job: %v", err) + } + + // Only continue if the summary exists. It could not exist if the parent + // job was removed + if summaryRaw != nil { + existing := summaryRaw.(structs.JobSummary) + pSummary := existing.Copy() + if pSummary.Children != nil { + + switch job.Status { + case structs.JobStatusPending: + pSummary.Children.Pending-- + pSummary.Children.Dead++ + case structs.JobStatusRunning: + pSummary.Children.Running-- + pSummary.Children.Dead++ + case structs.JobStatusDead: + default: + return fmt.Errorf("unknown old job status %q", job.Status) + } + + watcher.Add(watch.Item{Table: "job_summary"}) + watcher.Add(watch.Item{JobSummary: job.ParentID}) + + // Insert the summary + if err := txn.Insert("job_summary", *pSummary); err != nil { + return fmt.Errorf("job summary insert failed: %v", err) + } + if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + } + } + } + + // Delete the job if err := txn.Delete("jobs", existing); err != nil { return fmt.Errorf("job delete failed: %v", err) } @@ -881,6 +927,7 @@ func (s *StateStore) nestedUpdateAllocFromClient(txn *memdb.Txn, watcher watch.I return nil } exist := existing.(*structs.Allocation) + // Trigger the watcher watcher.Add(watch.Item{Alloc: alloc.ID}) watcher.Add(watch.Item{AllocEval: exist.EvalID}) @@ -1422,6 +1469,9 @@ func (s *StateStore) setJobStatus(index uint64, watcher watch.Items, txn *memdb. // Capture the current status so we can check if there is a change oldStatus := job.Status + if index == job.CreateIndex { + oldStatus = "" + } newStatus := forceStatus // If forceStatus is not set, compute the jobs status. @@ -1454,6 +1504,69 @@ func (s *StateStore) setJobStatus(index uint64, watcher watch.Items, txn *memdb. if err := txn.Insert("index", &IndexEntry{"jobs", index}); err != nil { return fmt.Errorf("index update failed: %v", err) } + + // Update the children summary + if updated.ParentID != "" { + // Try to update the summary of the parent job summary + summaryRaw, err := txn.First("job_summary", "id", updated.ParentID) + if err != nil { + return fmt.Errorf("unable to retrieve summary for parent job: %v", err) + } + + // Only continue if the summary exists. It could not exist if the parent + // job was removed + if summaryRaw != nil { + existing := summaryRaw.(structs.JobSummary) + pSummary := existing.Copy() + if pSummary.Children == nil { + pSummary.Children = new(structs.JobChildrenSummary) + } + + // Determine the transistion and update the correct fields + children := pSummary.Children + + // Decrement old status + if oldStatus != "" { + switch oldStatus { + case structs.JobStatusPending: + children.Pending-- + case structs.JobStatusRunning: + children.Running-- + case structs.JobStatusDead: + children.Dead-- + default: + return fmt.Errorf("unknown old job status %q", oldStatus) + } + } + + // Increment new status + switch newStatus { + case structs.JobStatusPending: + children.Pending++ + case structs.JobStatusRunning: + children.Running++ + case structs.JobStatusDead: + children.Dead++ + default: + return fmt.Errorf("unknown new job status %q", newStatus) + } + + // Update the index + pSummary.ModifyIndex = index + + watcher.Add(watch.Item{Table: "job_summary"}) + watcher.Add(watch.Item{JobSummary: updated.ParentID}) + + // Insert the summary + if err := txn.Insert("job_summary", *pSummary); err != nil { + return fmt.Errorf("job summary insert failed: %v", err) + } + if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + } + } + return nil } @@ -1492,9 +1605,9 @@ func (s *StateStore) getJobStatus(txn *memdb.Txn, job *structs.Job, evalDelete b } // If there are no allocations or evaluations it is a new job. If the job is - // periodic, we mark it as running as it will never have an - // allocation/evaluation against it. - if job.IsPeriodic() { + // periodic or is a constructor, we mark it as running as it will never have + // an allocation/evaluation against it. + if job.IsPeriodic() || job.IsConstructor() { return structs.JobStatusRunning, nil } return structs.JobStatusPending, nil @@ -1514,6 +1627,7 @@ func (s *StateStore) updateSummaryWithJob(index uint64, job *structs.Job, existing = &structs.JobSummary{ JobID: job.ID, Summary: make(map[string]structs.TaskGroupSummary), + Children: new(structs.JobChildrenSummary), CreateIndex: index, } hasSummaryChanged = true @@ -1786,6 +1900,8 @@ func (r *StateRestore) PeriodicLaunchRestore(launch *structs.PeriodicLaunch) err // JobSummaryRestore is used to restore a job summary func (r *StateRestore) JobSummaryRestore(jobSummary *structs.JobSummary) error { + r.items.Add(watch.Item{Table: "job_summary"}) + r.items.Add(watch.Item{JobSummary: jobSummary.JobID}) if err := r.txn.Insert("job_summary", *jobSummary); err != nil { return fmt.Errorf("job summary insert failed: %v", err) } diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index bc61213a16b7..5c756290b5bf 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -467,6 +467,47 @@ func TestStateStore_UpsertJob_NoEphemeralDisk(t *testing.T) { } } +// Upsert a job that is the child of a parent job and ensures its summary gets +// updated. +func TestStateStore_UpsertJob_ChildJob(t *testing.T) { + state := testStateStore(t) + parent := mock.Job() + if err := state.UpsertJob(1000, parent); err != nil { + t.Fatalf("err: %v", err) + } + + child := mock.Job() + child.ParentID = parent.ID + + notify := setupNotifyTest( + state, + watch.Item{Table: "job_summary"}, + watch.Item{JobSummary: parent.ID}) + + err := state.UpsertJob(1001, child) + if err != nil { + t.Fatalf("err: %v", err) + } + + summary, err := state.JobSummaryByID(parent.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if summary == nil { + t.Fatalf("nil summary") + } + if summary.JobID != parent.ID { + t.Fatalf("bad summary id: %v", parent.ID) + } + if summary.Children == nil { + t.Fatalf("nil children summary") + } + if summary.Children.Pending != 1 || summary.Children.Running != 0 || summary.Children.Dead != 0 { + t.Fatalf("bad children summary: %v", summary.Children) + } + notify.verify(t) +} + func TestStateStore_DeleteJob_Job(t *testing.T) { state := testStateStore(t) job := mock.Job() @@ -514,6 +555,50 @@ func TestStateStore_DeleteJob_Job(t *testing.T) { notify.verify(t) } +func TestStateStore_DeleteJob_ChildJob(t *testing.T) { + state := testStateStore(t) + + parent := mock.Job() + if err := state.UpsertJob(998, parent); err != nil { + t.Fatalf("err: %v", err) + } + + child := mock.Job() + child.ParentID = parent.ID + + if err := state.UpsertJob(999, child); err != nil { + t.Fatalf("err: %v", err) + } + + notify := setupNotifyTest( + state, + watch.Item{Table: "job_summary"}, + watch.Item{JobSummary: parent.ID}) + + err := state.DeleteJob(1001, child.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + + summary, err := state.JobSummaryByID(parent.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if summary == nil { + t.Fatalf("nil summary") + } + if summary.JobID != parent.ID { + t.Fatalf("bad summary id: %v", parent.ID) + } + if summary.Children == nil { + t.Fatalf("nil children summary") + } + if summary.Children.Pending != 0 || summary.Children.Running != 0 || summary.Children.Dead != 1 { + t.Fatalf("bad children summary: %v", summary.Children) + } + notify.verify(t) +} + func TestStateStore_Jobs(t *testing.T) { state := testStateStore(t) var jobs []*structs.Job @@ -1305,6 +1390,75 @@ func TestStateStore_Update_UpsertEvals_Eval(t *testing.T) { notify.verify(t) } +func TestStateStore_UpsertEvals_Eval_ChildJob(t *testing.T) { + state := testStateStore(t) + + parent := mock.Job() + if err := state.UpsertJob(998, parent); err != nil { + t.Fatalf("err: %v", err) + } + + child := mock.Job() + child.ParentID = parent.ID + + if err := state.UpsertJob(999, child); err != nil { + t.Fatalf("err: %v", err) + } + + eval := mock.Eval() + eval.Status = structs.EvalStatusComplete + eval.JobID = child.ID + + notify := setupNotifyTest( + state, + watch.Item{Table: "job_summary"}, + watch.Item{JobSummary: parent.ID}, + watch.Item{Table: "evals"}, + watch.Item{Eval: eval.ID}, + watch.Item{EvalJob: eval.JobID}) + + err := state.UpsertEvals(1000, []*structs.Evaluation{eval}) + if err != nil { + t.Fatalf("err: %v", err) + } + + out, err := state.EvalByID(eval.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + + if !reflect.DeepEqual(eval, out) { + t.Fatalf("bad: %#v %#v", eval, out) + } + + index, err := state.Index("evals") + if err != nil { + t.Fatalf("err: %v", err) + } + if index != 1000 { + t.Fatalf("bad: %d", index) + } + + summary, err := state.JobSummaryByID(parent.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if summary == nil { + t.Fatalf("nil summary") + } + if summary.JobID != parent.ID { + t.Fatalf("bad summary id: %v", parent.ID) + } + if summary.Children == nil { + t.Fatalf("nil children summary") + } + if summary.Children.Pending != 0 || summary.Children.Running != 0 || summary.Children.Dead != 1 { + t.Fatalf("bad children summary: %v", summary.Children) + } + + notify.verify(t) +} + func TestStateStore_DeleteEval_Eval(t *testing.T) { state := testStateStore(t) eval1 := mock.Eval() @@ -1403,6 +1557,66 @@ func TestStateStore_DeleteEval_Eval(t *testing.T) { notify.verify(t) } +func TestStateStore_DeleteEval_ChildJob(t *testing.T) { + state := testStateStore(t) + + parent := mock.Job() + if err := state.UpsertJob(998, parent); err != nil { + t.Fatalf("err: %v", err) + } + + child := mock.Job() + child.ParentID = parent.ID + + if err := state.UpsertJob(999, child); err != nil { + t.Fatalf("err: %v", err) + } + + eval1 := mock.Eval() + eval1.JobID = child.ID + alloc1 := mock.Alloc() + alloc1.JobID = child.ID + + err := state.UpsertEvals(1000, []*structs.Evaluation{eval1}) + if err != nil { + t.Fatalf("err: %v", err) + } + + err = state.UpsertAllocs(1001, []*structs.Allocation{alloc1}) + if err != nil { + t.Fatalf("err: %v", err) + } + + notify := setupNotifyTest( + state, + watch.Item{Table: "job_summary"}, + watch.Item{JobSummary: parent.ID}) + + err = state.DeleteEval(1002, []string{eval1.ID}, []string{alloc1.ID}) + if err != nil { + t.Fatalf("err: %v", err) + } + + summary, err := state.JobSummaryByID(parent.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if summary == nil { + t.Fatalf("nil summary") + } + if summary.JobID != parent.ID { + t.Fatalf("bad summary id: %v", parent.ID) + } + if summary.Children == nil { + t.Fatalf("nil children summary") + } + if summary.Children.Pending != 0 || summary.Children.Running != 0 || summary.Children.Dead != 1 { + t.Fatalf("bad children summary: %v", summary.Children) + } + + notify.verify(t) +} + func TestStateStore_EvalsByJob(t *testing.T) { state := testStateStore(t) @@ -1572,6 +1786,68 @@ func TestStateStore_RestoreEval(t *testing.T) { func TestStateStore_UpdateAllocsFromClient(t *testing.T) { state := testStateStore(t) + + parent := mock.Job() + if err := state.UpsertJob(998, parent); err != nil { + t.Fatalf("err: %v", err) + } + + child := mock.Job() + child.ParentID = parent.ID + if err := state.UpsertJob(999, child); err != nil { + t.Fatalf("err: %v", err) + } + + alloc := mock.Alloc() + alloc.JobID = child.ID + alloc.Job = child + + notify := setupNotifyTest( + state, + watch.Item{Table: "job_summary"}, + watch.Item{JobSummary: parent.ID}) + + err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Create the delta updates + ts := map[string]*structs.TaskState{"web": &structs.TaskState{State: structs.TaskStatePending}} + update := &structs.Allocation{ + ID: alloc.ID, + ClientStatus: structs.AllocClientStatusRunning, + TaskStates: ts, + JobID: alloc.JobID, + TaskGroup: alloc.TaskGroup, + } + err = state.UpdateAllocsFromClient(1001, []*structs.Allocation{update}) + if err != nil { + t.Fatalf("err: %v", err) + } + + summary, err := state.JobSummaryByID(parent.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if summary == nil { + t.Fatalf("nil summary") + } + if summary.JobID != parent.ID { + t.Fatalf("bad summary id: %v", parent.ID) + } + if summary.Children == nil { + t.Fatalf("nil children summary") + } + if summary.Children.Pending != 0 || summary.Children.Running != 1 || summary.Children.Dead != 0 { + t.Fatalf("bad children summary: %v", summary.Children) + } + + notify.verify(t) +} + +func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { + state := testStateStore(t) alloc := mock.Alloc() alloc2 := mock.Alloc() @@ -1732,6 +2008,7 @@ func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { Starting: 1, }, }, + Children: new(structs.JobChildrenSummary), CreateIndex: 999, ModifyIndex: 1001, } @@ -1824,6 +2101,55 @@ func TestStateStore_UpsertAlloc_NoEphemeralDisk(t *testing.T) { } } +func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) { + state := testStateStore(t) + + parent := mock.Job() + if err := state.UpsertJob(998, parent); err != nil { + t.Fatalf("err: %v", err) + } + + child := mock.Job() + child.ParentID = parent.ID + + if err := state.UpsertJob(999, child); err != nil { + t.Fatalf("err: %v", err) + } + + alloc := mock.Alloc() + alloc.JobID = child.ID + alloc.Job = child + + notify := setupNotifyTest( + state, + watch.Item{Table: "job_summary"}, + watch.Item{JobSummary: parent.ID}) + + err := state.UpsertAllocs(1000, []*structs.Allocation{alloc}) + if err != nil { + t.Fatalf("err: %v", err) + } + + summary, err := state.JobSummaryByID(parent.ID) + if err != nil { + t.Fatalf("err: %v", err) + } + if summary == nil { + t.Fatalf("nil summary") + } + if summary.JobID != parent.ID { + t.Fatalf("bad summary id: %v", parent.ID) + } + if summary.Children == nil { + t.Fatalf("nil children summary") + } + if summary.Children.Pending != 0 || summary.Children.Running != 1 || summary.Children.Dead != 0 { + t.Fatalf("bad children summary: %v", summary.Children) + } + + notify.verify(t) +} + func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { state := testStateStore(t) alloc := mock.Alloc() @@ -2031,6 +2357,7 @@ func TestStateStore_JobSummary(t *testing.T) { Running: 1, }, }, + Children: new(structs.JobChildrenSummary), CreateIndex: 900, ModifyIndex: 930, } @@ -2081,6 +2408,7 @@ func TestStateStore_JobSummary(t *testing.T) { Summary: map[string]structs.TaskGroupSummary{ "web": structs.TaskGroupSummary{}, }, + Children: new(structs.JobChildrenSummary), CreateIndex: 1000, ModifyIndex: 1000, } @@ -2214,6 +2542,7 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { Summary: map[string]structs.TaskGroupSummary{ "web": structs.TaskGroupSummary{}, }, + Children: new(structs.JobChildrenSummary), CreateIndex: 500, ModifyIndex: 500, } @@ -2841,6 +3170,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { Starting: 1, }, }, + Children: new(structs.JobChildrenSummary), CreateIndex: 1000, ModifyIndex: 1001, } @@ -2870,6 +3200,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { Starting: 3, }, }, + Children: new(structs.JobChildrenSummary), CreateIndex: job.CreateIndex, ModifyIndex: outA.ModifyIndex, } @@ -2902,6 +3233,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { Starting: 1, }, }, + Children: new(structs.JobChildrenSummary), CreateIndex: job.CreateIndex, ModifyIndex: outA.ModifyIndex, } diff --git a/nomad/structs/diff.go b/nomad/structs/diff.go index 719e1b67f1b1..3398f08a3cb8 100644 --- a/nomad/structs/diff.go +++ b/nomad/structs/diff.go @@ -130,6 +130,12 @@ func (j *Job) Diff(other *Job, contextual bool) (*JobDiff, error) { diff.Objects = append(diff.Objects, pDiff) } + // Constructor diff + // Vault diff + if cDiff := constructorDiff(j.Constructor, other.Constructor, contextual); cDiff != nil { + diff.Objects = append(diff.Objects, cDiff) + } + return diff, nil } @@ -370,6 +376,12 @@ func (t *Task) Diff(other *Task, contextual bool) (*TaskDiff, error) { diff.Objects = append(diff.Objects, lDiff) } + // Dispatch Input diff + dDiff := primitiveObjectDiff(t.DispatchInput, other.DispatchInput, nil, "DispatchInput", contextual) + if dDiff != nil { + diff.Objects = append(diff.Objects, dDiff) + } + // Artifacts diff diffs := primitiveObjectSetDiff( interfaceSlice(t.Artifacts), @@ -629,6 +641,43 @@ func vaultDiff(old, new *Vault, contextual bool) *ObjectDiff { return diff } +// constructorDiff returns the diff of two constructor objects. If contextual +// diff is enabled, all fields will be returned, even if no diff occurred. +func constructorDiff(old, new *ConstructorConfig, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Constructor"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &ConstructorConfig{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &ConstructorConfig{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Meta diffs + if optionalDiff := stringSetDiff(old.MetaOptional, new.MetaOptional, "OptionalMeta", contextual); optionalDiff != nil { + diff.Objects = append(diff.Objects, optionalDiff) + } + + if requiredDiff := stringSetDiff(old.MetaRequired, new.MetaRequired, "RequiredMeta", contextual); requiredDiff != nil { + diff.Objects = append(diff.Objects, requiredDiff) + } + + return diff +} + // Diff returns a diff of two resource objects. If contextual diff is enabled, // non-changed fields will still be returned. func (r *Resources) Diff(other *Resources, contextual bool) *ObjectDiff { diff --git a/nomad/structs/diff_test.go b/nomad/structs/diff_test.go index cb186a1b88fd..89b0dac563d1 100644 --- a/nomad/structs/diff_test.go +++ b/nomad/structs/diff_test.go @@ -877,6 +877,247 @@ func TestJobDiff(t *testing.T) { }, }, }, + { + // Constructor added + Old: &Job{}, + New: &Job{ + Constructor: &ConstructorConfig{ + Payload: DispatchPayloadRequired, + MetaOptional: []string{"foo"}, + MetaRequired: []string{"bar"}, + }, + }, + Expected: &JobDiff{ + Type: DiffTypeEdited, + Objects: []*ObjectDiff{ + { + Type: DiffTypeAdded, + Name: "Constructor", + Fields: []*FieldDiff{ + { + Type: DiffTypeAdded, + Name: "Payload", + Old: "", + New: DispatchPayloadRequired, + }, + }, + Objects: []*ObjectDiff{ + { + Type: DiffTypeAdded, + Name: "OptionalMeta", + Fields: []*FieldDiff{ + { + Type: DiffTypeAdded, + Name: "OptionalMeta", + Old: "", + New: "foo", + }, + }, + }, + { + Type: DiffTypeAdded, + Name: "RequiredMeta", + Fields: []*FieldDiff{ + { + Type: DiffTypeAdded, + Name: "RequiredMeta", + Old: "", + New: "bar", + }, + }, + }, + }, + }, + }, + }, + }, + { + // Constructor deleted + Old: &Job{ + Constructor: &ConstructorConfig{ + Payload: DispatchPayloadRequired, + MetaOptional: []string{"foo"}, + MetaRequired: []string{"bar"}, + }, + }, + New: &Job{}, + Expected: &JobDiff{ + Type: DiffTypeEdited, + Objects: []*ObjectDiff{ + { + Type: DiffTypeDeleted, + Name: "Constructor", + Fields: []*FieldDiff{ + { + Type: DiffTypeDeleted, + Name: "Payload", + Old: DispatchPayloadRequired, + New: "", + }, + }, + Objects: []*ObjectDiff{ + { + Type: DiffTypeDeleted, + Name: "OptionalMeta", + Fields: []*FieldDiff{ + { + Type: DiffTypeDeleted, + Name: "OptionalMeta", + Old: "foo", + New: "", + }, + }, + }, + { + Type: DiffTypeDeleted, + Name: "RequiredMeta", + Fields: []*FieldDiff{ + { + Type: DiffTypeDeleted, + Name: "RequiredMeta", + Old: "bar", + New: "", + }, + }, + }, + }, + }, + }, + }, + }, + { + // Constructor edited + Old: &Job{ + Constructor: &ConstructorConfig{ + Payload: DispatchPayloadRequired, + MetaOptional: []string{"foo"}, + MetaRequired: []string{"bar"}, + }, + }, + New: &Job{ + Constructor: &ConstructorConfig{ + Payload: DispatchPayloadOptional, + MetaOptional: []string{"bam"}, + MetaRequired: []string{"bang"}, + }, + }, + Expected: &JobDiff{ + Type: DiffTypeEdited, + Objects: []*ObjectDiff{ + { + Type: DiffTypeEdited, + Name: "Constructor", + Fields: []*FieldDiff{ + { + Type: DiffTypeEdited, + Name: "Payload", + Old: DispatchPayloadRequired, + New: DispatchPayloadOptional, + }, + }, + Objects: []*ObjectDiff{ + { + Type: DiffTypeEdited, + Name: "OptionalMeta", + Fields: []*FieldDiff{ + { + Type: DiffTypeAdded, + Name: "OptionalMeta", + Old: "", + New: "bam", + }, + { + Type: DiffTypeDeleted, + Name: "OptionalMeta", + Old: "foo", + New: "", + }, + }, + }, + { + Type: DiffTypeEdited, + Name: "RequiredMeta", + Fields: []*FieldDiff{ + { + Type: DiffTypeAdded, + Name: "RequiredMeta", + Old: "", + New: "bang", + }, + { + Type: DiffTypeDeleted, + Name: "RequiredMeta", + Old: "bar", + New: "", + }, + }, + }, + }, + }, + }, + }, + }, + { + // Constructor edited with context + Contextual: true, + Old: &Job{ + Constructor: &ConstructorConfig{ + Payload: DispatchPayloadRequired, + MetaOptional: []string{"foo"}, + MetaRequired: []string{"bar"}, + }, + }, + New: &Job{ + Constructor: &ConstructorConfig{ + Payload: DispatchPayloadOptional, + MetaOptional: []string{"foo"}, + MetaRequired: []string{"bar"}, + }, + }, + Expected: &JobDiff{ + Type: DiffTypeEdited, + Objects: []*ObjectDiff{ + { + Type: DiffTypeEdited, + Name: "Constructor", + Fields: []*FieldDiff{ + { + Type: DiffTypeEdited, + Name: "Payload", + Old: DispatchPayloadRequired, + New: DispatchPayloadOptional, + }, + }, + Objects: []*ObjectDiff{ + { + Type: DiffTypeNone, + Name: "OptionalMeta", + Fields: []*FieldDiff{ + { + Type: DiffTypeNone, + Name: "OptionalMeta", + Old: "foo", + New: "foo", + }, + }, + }, + { + Type: DiffTypeNone, + Name: "RequiredMeta", + Fields: []*FieldDiff{ + { + Type: DiffTypeNone, + Name: "RequiredMeta", + Old: "bar", + New: "bar", + }, + }, + }, + }, + }, + }, + }, + }, } for i, c := range cases { @@ -3424,6 +3665,120 @@ func TestTaskDiff(t *testing.T) { }, }, }, + { + // DispatchInput added + Old: &Task{}, + New: &Task{ + DispatchInput: &DispatchInputConfig{ + File: "foo", + }, + }, + Expected: &TaskDiff{ + Type: DiffTypeEdited, + Objects: []*ObjectDiff{ + { + Type: DiffTypeAdded, + Name: "DispatchInput", + Fields: []*FieldDiff{ + { + Type: DiffTypeAdded, + Name: "File", + Old: "", + New: "foo", + }, + }, + }, + }, + }, + }, + { + // DispatchInput deleted + Old: &Task{ + DispatchInput: &DispatchInputConfig{ + File: "foo", + }, + }, + New: &Task{}, + Expected: &TaskDiff{ + Type: DiffTypeEdited, + Objects: []*ObjectDiff{ + { + Type: DiffTypeDeleted, + Name: "DispatchInput", + Fields: []*FieldDiff{ + { + Type: DiffTypeDeleted, + Name: "File", + Old: "foo", + New: "", + }, + }, + }, + }, + }, + }, + { + // Dispatch input edited + Old: &Task{ + DispatchInput: &DispatchInputConfig{ + File: "foo", + }, + }, + New: &Task{ + DispatchInput: &DispatchInputConfig{ + File: "bar", + }, + }, + Expected: &TaskDiff{ + Type: DiffTypeEdited, + Objects: []*ObjectDiff{ + { + Type: DiffTypeEdited, + Name: "DispatchInput", + Fields: []*FieldDiff{ + { + Type: DiffTypeEdited, + Name: "File", + Old: "foo", + New: "bar", + }, + }, + }, + }, + }, + }, + { + // DispatchInput edited with context. Place holder for if more + // fields are added + Contextual: true, + Old: &Task{ + DispatchInput: &DispatchInputConfig{ + File: "foo", + }, + }, + New: &Task{ + DispatchInput: &DispatchInputConfig{ + File: "bar", + }, + }, + Expected: &TaskDiff{ + Type: DiffTypeEdited, + Objects: []*ObjectDiff{ + { + Type: DiffTypeEdited, + Name: "DispatchInput", + Fields: []*FieldDiff{ + { + Type: DiffTypeEdited, + Name: "File", + Old: "foo", + New: "bar", + }, + }, + }, + }, + }, + }, } for i, c := range cases { diff --git a/nomad/structs/funcs.go b/nomad/structs/funcs.go index 104bb58b4772..8d7bbac4fc1a 100644 --- a/nomad/structs/funcs.go +++ b/nomad/structs/funcs.go @@ -269,6 +269,30 @@ func SliceStringIsSubset(larger, smaller []string) (bool, []string) { return subset, offending } +func SliceSetDisjoint(first, second []string) (bool, []string) { + contained := make(map[string]struct{}, len(first)) + for _, k := range first { + contained[k] = struct{}{} + } + + offending := make(map[string]struct{}) + for _, k := range second { + if _, ok := contained[k]; ok { + offending[k] = struct{}{} + } + } + + if len(offending) == 0 { + return true, nil + } + + flattened := make([]string, 0, len(offending)) + for k := range offending { + flattened = append(flattened, k) + } + return false, flattened +} + // VaultPoliciesSet takes the structure returned by VaultPolicies and returns // the set of required policies func VaultPoliciesSet(policies map[string]map[string]*Vault) []string { @@ -304,3 +328,11 @@ func MapStringStringSliceValueSet(m map[string][]string) []string { } return flat } + +func SliceStringToSet(s []string) map[string]struct{} { + m := make(map[string]struct{}, (len(s)+1)/2) + for _, k := range s { + m[k] = struct{}{} + } + return m +} diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 9f94bcacfbfc..0ba3e2b50ba0 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -272,6 +272,14 @@ type JobSummaryRequest struct { QueryOptions } +// JobDispatchRequest is used to dispatch a job based on a constructor job +type JobDispatchRequest struct { + JobID string + Payload []byte + Meta map[string]string + WriteRequest +} + // NodeListRequest is used to parameterize a list request type NodeListRequest struct { QueryOptions @@ -525,6 +533,14 @@ type JobSummaryResponse struct { QueryMeta } +type JobDispatchResponse struct { + DispatchedJobID string + EvalID string + EvalCreateIndex uint64 + JobCreateIndex uint64 + QueryMeta +} + // JobListResponse is used for a list request type JobListResponse struct { Jobs []*JobListStub @@ -1062,39 +1078,6 @@ const ( CoreJobPriority = JobMaxPriority * 2 ) -// JobSummary summarizes the state of the allocations of a job -type JobSummary struct { - JobID string - Summary map[string]TaskGroupSummary - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 -} - -// Copy returns a new copy of JobSummary -func (js *JobSummary) Copy() *JobSummary { - newJobSummary := new(JobSummary) - *newJobSummary = *js - newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary)) - for k, v := range js.Summary { - newTGSummary[k] = v - } - newJobSummary.Summary = newTGSummary - return newJobSummary -} - -// TaskGroup summarizes the state of all the allocations of a particular -// TaskGroup -type TaskGroupSummary struct { - Queued int - Complete int - Failed int - Running int - Starting int - Lost int -} - // Job is the scope of a scheduling request to Nomad. It is the largest // scoped object, and is a named collection of task groups. Each task group // is further composed of tasks. A task group (TG) is the unit of scheduling @@ -1146,6 +1129,12 @@ type Job struct { // Periodic is used to define the interval the job is run at. Periodic *PeriodicConfig + // Constructor is used to specify the job as a constructor job for dispatching. + Constructor *ConstructorConfig + + // Payload is the payload supplied when the job was dispatched. + Payload []byte + // Meta is used to associate arbitrary metadata with this // job. This is opaque to Nomad. Meta map[string]string @@ -1179,6 +1168,10 @@ func (j *Job) Canonicalize() { for _, tg := range j.TaskGroups { tg.Canonicalize(j) } + + if j.Constructor != nil { + j.Constructor.Canonicalize() + } } // Copy returns a deep copy of the Job. It is expected that callers use recover. @@ -1202,6 +1195,7 @@ func (j *Job) Copy() *Job { nj.Periodic = nj.Periodic.Copy() nj.Meta = CopyMapStringString(nj.Meta) + nj.Constructor = nj.Constructor.Copy() return nj } @@ -1276,6 +1270,17 @@ func (j *Job) Validate() error { } } + if j.IsConstructor() { + if j.Type != JobTypeBatch { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Constructor job can only be used with %q scheduler", JobTypeBatch)) + } + + if err := j.Constructor.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + return mErr.ErrorOrNil() } @@ -1289,6 +1294,42 @@ func (j *Job) LookupTaskGroup(name string) *TaskGroup { return nil } +// CombinedTaskMeta takes a TaskGroup and Task name and returns the combined +// meta data for the task. When joining Job, Group and Task Meta, the precedence +// is by deepest scope (Task > Group > Job). +func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string { + group := j.LookupTaskGroup(groupName) + if group == nil { + return nil + } + + task := group.LookupTask(taskName) + if task == nil { + return nil + } + + meta := CopyMapStringString(task.Meta) + if meta == nil { + meta = make(map[string]string, len(group.Meta)+len(j.Meta)) + } + + // Add the group specific meta + for k, v := range group.Meta { + if _, ok := meta[k]; !ok { + meta[k] = v + } + } + + // Add the job specific meta + for k, v := range j.Meta { + if _, ok := meta[k]; !ok { + meta[k] = v + } + } + + return meta +} + // Stub is used to return a summary of the job func (j *Job) Stub(summary *JobSummary) *JobListStub { return &JobListStub{ @@ -1311,6 +1352,11 @@ func (j *Job) IsPeriodic() bool { return j.Periodic != nil } +// IsConstructor returns whether a job is constructor job. +func (j *Job) IsConstructor() bool { + return j.Constructor != nil +} + // VaultPolicies returns the set of Vault policies per task group, per task func (j *Job) VaultPolicies() map[string]map[string]*Vault { policies := make(map[string]map[string]*Vault, len(j.TaskGroups)) @@ -1399,6 +1445,63 @@ type JobListStub struct { JobModifyIndex uint64 } +// JobSummary summarizes the state of the allocations of a job +type JobSummary struct { + JobID string + + // Summmary contains the summary per task group for the Job + Summary map[string]TaskGroupSummary + + // Children contains a summary for the children of this job. + Children *JobChildrenSummary + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +// Copy returns a new copy of JobSummary +func (js *JobSummary) Copy() *JobSummary { + newJobSummary := new(JobSummary) + *newJobSummary = *js + newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary)) + for k, v := range js.Summary { + newTGSummary[k] = v + } + newJobSummary.Summary = newTGSummary + newJobSummary.Children = newJobSummary.Children.Copy() + return newJobSummary +} + +// JobChildrenSummary contains the summary of children job statuses +type JobChildrenSummary struct { + Pending int64 + Running int64 + Dead int64 +} + +// Copy returns a new copy of a JobChildrenSummary +func (jc *JobChildrenSummary) Copy() *JobChildrenSummary { + if jc == nil { + return nil + } + + njc := new(JobChildrenSummary) + *njc = *jc + return njc +} + +// TaskGroup summarizes the state of all the allocations of a particular +// TaskGroup +type TaskGroupSummary struct { + Queued int + Complete int + Failed int + Running int + Starting int + Lost int +} + // UpdateStrategy is used to modify how updates are done type UpdateStrategy struct { // Stagger is the amount of time between the updates @@ -1525,6 +1628,96 @@ type PeriodicLaunch struct { ModifyIndex uint64 } +const ( + DispatchPayloadForbidden = "forbidden" + DispatchPayloadOptional = "optional" + DispatchPayloadRequired = "required" + + // DispatchLaunchSuffic is the string appended to the constructor job's ID + // when dispatching instances of it. + DispatchLaunchSuffic = "/dispatch-" +) + +// ConstructorConfig is used to configure the constructor job +type ConstructorConfig struct { + // Payload configure the payload requirements + Payload string + + // MetaRequired is metadata keys that must be specified by the dispatcher + MetaRequired []string `mapstructure:"required"` + + // MetaOptional is metadata keys that may be specified by the dispatcher + MetaOptional []string `mapstructure:"optional"` +} + +func (d *ConstructorConfig) Validate() error { + var mErr multierror.Error + switch d.Payload { + case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden: + default: + multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload)) + } + + // Check that the meta configurations are disjoint sets + disjoint, offending := SliceSetDisjoint(d.MetaRequired, d.MetaOptional) + if !disjoint { + multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending)) + } + + return mErr.ErrorOrNil() +} + +func (d *ConstructorConfig) Canonicalize() { + if d.Payload == "" { + d.Payload = DispatchPayloadOptional + } +} + +func (d *ConstructorConfig) Copy() *ConstructorConfig { + if d == nil { + return nil + } + nd := new(ConstructorConfig) + *nd = *d + nd.MetaOptional = CopySliceString(nd.MetaOptional) + nd.MetaRequired = CopySliceString(nd.MetaRequired) + return nd +} + +// DispatchedID returns an ID appropriate for a job dispatched against a +// particular constructor +func DispatchedID(templateID string, t time.Time) string { + u := GenerateUUID()[:8] + return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffic, t.Unix(), u) +} + +// DispatchInputConfig configures how a task gets its input from a job dispatch +type DispatchInputConfig struct { + // File specifies a relative path to where the input data should be written + File string +} + +func (d *DispatchInputConfig) Copy() *DispatchInputConfig { + if d == nil { + return nil + } + nd := new(DispatchInputConfig) + *nd = *d + return nd +} + +func (d *DispatchInputConfig) Validate() error { + // Verify the destination doesn't escape + escaped, err := PathEscapesAllocDir("task/local/", d.File) + if err != nil { + return fmt.Errorf("invalid destination path: %v", err) + } else if escaped { + return fmt.Errorf("destination escapes allocation directory") + } + + return nil +} + var ( defaultServiceJobRestartPolicy = RestartPolicy{ Delay: 15 * time.Second, @@ -2076,6 +2269,9 @@ type Task struct { // Resources is the resources needed by this task Resources *Resources + // DispatchInput configures how the task retrieves its input from a dispatch + DispatchInput *DispatchInputConfig + // Meta is used to associate arbitrary metadata with this // task. This is opaque to Nomad. Meta map[string]string @@ -2113,6 +2309,7 @@ func (t *Task) Copy() *Task { nt.Vault = nt.Vault.Copy() nt.Resources = nt.Resources.Copy() nt.Meta = CopyMapStringString(nt.Meta) + nt.DispatchInput = nt.DispatchInput.Copy() if t.Artifacts != nil { artifacts := make([]*TaskArtifact, 0, len(t.Artifacts)) @@ -2277,6 +2474,13 @@ func (t *Task) Validate(ephemeralDisk *EphemeralDisk) error { } } + // Validate the dispatch input block if there + if t.DispatchInput != nil { + if err := t.DispatchInput.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Input validation failed: %v", err)) + } + } + return mErr.ErrorOrNil() } @@ -2418,7 +2622,7 @@ func (t *Template) Validate() error { } // Verify the destination doesn't escape - escaped, err := PathEscapesAllocDir(t.DestPath) + escaped, err := PathEscapesAllocDir("task", t.DestPath) if err != nil { mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err)) } else if escaped { @@ -2770,14 +2974,16 @@ func (ta *TaskArtifact) GoString() string { } // PathEscapesAllocDir returns if the given path escapes the allocation -// directory -func PathEscapesAllocDir(path string) (bool, error) { +// directory. The prefix allows adding a prefix if the path will be joined, for +// example a "task/local" prefix may be provided if the path will be joined +// against that prefix. +func PathEscapesAllocDir(prefix, path string) (bool, error) { // Verify the destination doesn't escape the tasks directory - alloc, err := filepath.Abs(filepath.Join("/", "foo/", "bar/")) + alloc, err := filepath.Abs(filepath.Join("/", "alloc-dir/", "alloc-id/")) if err != nil { return false, err } - abs, err := filepath.Abs(filepath.Join(alloc, path)) + abs, err := filepath.Abs(filepath.Join(alloc, prefix, path)) if err != nil { return false, err } @@ -2796,11 +3002,11 @@ func (ta *TaskArtifact) Validate() error { mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified")) } - escaped, err := PathEscapesAllocDir(ta.RelativeDest) + escaped, err := PathEscapesAllocDir("task", ta.RelativeDest) if err != nil { mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err)) } else if escaped { - mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes task's directory")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory")) } // Verify the checksum diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index f8c8ee4b4d29..4ba18b65ce92 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -1267,7 +1267,7 @@ func TestTaskArtifact_Validate_Dest(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - valid.RelativeDest = "local/../.." + valid.RelativeDest = "local/../../.." if err := valid.Validate(); err == nil { t.Fatalf("expected error: %v", err) } @@ -1444,3 +1444,64 @@ func TestVault_Validate(t *testing.T) { t.Fatalf("Expected signal empty error") } } + +func TestConstructorConfig_Validate(t *testing.T) { + d := &ConstructorConfig{ + Payload: "foo", + } + + if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") { + t.Fatalf("Expected unknown payload requirement: %v", err) + } + + d.Payload = DispatchPayloadOptional + d.MetaOptional = []string{"foo", "bar"} + d.MetaRequired = []string{"bar", "baz"} + + if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") { + t.Fatalf("Expected meta not being disjoint error: %v", err) + } +} + +func TestConstructorConfig_Validate_NonBatch(t *testing.T) { + job := testJob() + job.Constructor = &ConstructorConfig{ + Payload: DispatchPayloadOptional, + } + job.Type = JobTypeSystem + + if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") { + t.Fatalf("Expected bad scheduler tpye: %v", err) + } +} + +func TestConstructorConfig_Canonicalize(t *testing.T) { + d := &ConstructorConfig{} + d.Canonicalize() + if d.Payload != DispatchPayloadOptional { + t.Fatalf("Canonicalize failed") + } +} + +func TestDispatchInputConfig_Validate(t *testing.T) { + d := &DispatchInputConfig{ + File: "foo", + } + + // task/local/haha + if err := d.Validate(); err != nil { + t.Fatalf("bad: %v", err) + } + + // task/haha + d.File = "../haha" + if err := d.Validate(); err != nil { + t.Fatalf("bad: %v", err) + } + + // ../haha + d.File = "../../../haha" + if err := d.Validate(); err == nil { + t.Fatalf("bad: %v", err) + } +} diff --git a/scheduler/system_sched_test.go b/scheduler/system_sched_test.go index 550fbc91b400..c173d3936467 100644 --- a/scheduler/system_sched_test.go +++ b/scheduler/system_sched_test.go @@ -1179,6 +1179,7 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { h1 := NewHarnessWithState(t, h.State) job1 := mock.SystemJob() job1.ID = job.ID + job1.TaskGroups[0].Tasks[0].Env = make(map[string]string) job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" noErr(t, h1.State.UpsertJob(h1.NextIndex(), job1)) diff --git a/scheduler/util.go b/scheduler/util.go index d3219360119d..4b66be2e048c 100644 --- a/scheduler/util.go +++ b/scheduler/util.go @@ -328,8 +328,12 @@ func shuffleNodes(nodes []*structs.Node) { } // tasksUpdated does a diff between task groups to see if the -// tasks, their drivers, environment variables or config have updated. -func tasksUpdated(a, b *structs.TaskGroup) bool { +// tasks, their drivers, environment variables or config have updated. The +// inputs are the task group name to diff and two jobs to diff. +func tasksUpdated(jobA, jobB *structs.Job, taskGroup string) bool { + a := jobA.LookupTaskGroup(taskGroup) + b := jobB.LookupTaskGroup(taskGroup) + // If the number of tasks do not match, clearly there is an update if len(a.Tasks) != len(b.Tasks) { return true @@ -358,9 +362,6 @@ func tasksUpdated(a, b *structs.TaskGroup) bool { if !reflect.DeepEqual(at.Env, bt.Env) { return true } - if !reflect.DeepEqual(at.Meta, bt.Meta) { - return true - } if !reflect.DeepEqual(at.Artifacts, bt.Artifacts) { return true } @@ -371,6 +372,13 @@ func tasksUpdated(a, b *structs.TaskGroup) bool { return true } + // Check the metadata + if !reflect.DeepEqual( + jobA.CombinedTaskMeta(taskGroup, at.Name), + jobB.CombinedTaskMeta(taskGroup, bt.Name)) { + return true + } + // Inspect the network to see if the dynamic ports are different if len(at.Resources.Networks) != len(bt.Resources.Networks) { return true @@ -452,8 +460,13 @@ func inplaceUpdate(ctx Context, eval *structs.Evaluation, job *structs.Job, // Check if the task drivers or config has changed, requires // a rolling upgrade since that cannot be done in-place. - existing := update.Alloc.Job.LookupTaskGroup(update.TaskGroup.Name) - if tasksUpdated(update.TaskGroup, existing) { + //existing := update.Alloc.Job.LookupTaskGroup(update.TaskGroup.Name) + //if tasksUpdated(update.TaskGroup, existing) { + //continue + //} + + existing := update.Alloc.Job + if tasksUpdated(job, existing, update.TaskGroup.Name) { continue } diff --git a/scheduler/util_test.go b/scheduler/util_test.go index 7a6c7a63f0ce..73627c1c88e5 100644 --- a/scheduler/util_test.go +++ b/scheduler/util_test.go @@ -454,50 +454,51 @@ func TestShuffleNodes(t *testing.T) { func TestTasksUpdated(t *testing.T) { j1 := mock.Job() j2 := mock.Job() + name := j1.TaskGroups[0].Name - if tasksUpdated(j1.TaskGroups[0], j2.TaskGroups[0]) { + if tasksUpdated(j1, j2, name) { t.Fatalf("bad") } j2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - if !tasksUpdated(j1.TaskGroups[0], j2.TaskGroups[0]) { + if !tasksUpdated(j1, j2, name) { t.Fatalf("bad") } j3 := mock.Job() j3.TaskGroups[0].Tasks[0].Name = "foo" - if !tasksUpdated(j1.TaskGroups[0], j3.TaskGroups[0]) { + if !tasksUpdated(j1, j3, name) { t.Fatalf("bad") } j4 := mock.Job() j4.TaskGroups[0].Tasks[0].Driver = "foo" - if !tasksUpdated(j1.TaskGroups[0], j4.TaskGroups[0]) { + if !tasksUpdated(j1, j4, name) { t.Fatalf("bad") } j5 := mock.Job() j5.TaskGroups[0].Tasks = append(j5.TaskGroups[0].Tasks, j5.TaskGroups[0].Tasks[0]) - if !tasksUpdated(j1.TaskGroups[0], j5.TaskGroups[0]) { + if !tasksUpdated(j1, j5, name) { t.Fatalf("bad") } j6 := mock.Job() j6.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts = []structs.Port{{"http", 0}, {"https", 0}, {"admin", 0}} - if !tasksUpdated(j1.TaskGroups[0], j6.TaskGroups[0]) { + if !tasksUpdated(j1, j6, name) { t.Fatalf("bad") } j7 := mock.Job() j7.TaskGroups[0].Tasks[0].Env["NEW_ENV"] = "NEW_VALUE" - if !tasksUpdated(j1.TaskGroups[0], j7.TaskGroups[0]) { + if !tasksUpdated(j1, j7, name) { t.Fatalf("bad") } j8 := mock.Job() j8.TaskGroups[0].Tasks[0].User = "foo" - if !tasksUpdated(j1.TaskGroups[0], j8.TaskGroups[0]) { + if !tasksUpdated(j1, j8, name) { t.Fatalf("bad") } @@ -507,49 +508,63 @@ func TestTasksUpdated(t *testing.T) { GetterSource: "http://foo.com/bar", }, } - if !tasksUpdated(j1.TaskGroups[0], j9.TaskGroups[0]) { + if !tasksUpdated(j1, j9, name) { t.Fatalf("bad") } j10 := mock.Job() j10.TaskGroups[0].Tasks[0].Meta["baz"] = "boom" - if !tasksUpdated(j1.TaskGroups[0], j10.TaskGroups[0]) { + if !tasksUpdated(j1, j10, name) { t.Fatalf("bad") } j11 := mock.Job() j11.TaskGroups[0].Tasks[0].Resources.CPU = 1337 - if !tasksUpdated(j1.TaskGroups[0], j11.TaskGroups[0]) { + if !tasksUpdated(j1, j11, name) { t.Fatalf("bad") } j12 := mock.Job() j12.TaskGroups[0].Tasks[0].Resources.Networks[0].MBits = 100 - if !tasksUpdated(j1.TaskGroups[0], j12.TaskGroups[0]) { + if !tasksUpdated(j1, j12, name) { t.Fatalf("bad") } j13 := mock.Job() j13.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts[0].Label = "foobar" - if !tasksUpdated(j1.TaskGroups[0], j13.TaskGroups[0]) { + if !tasksUpdated(j1, j13, name) { t.Fatalf("bad") } j14 := mock.Job() j14.TaskGroups[0].Tasks[0].Resources.Networks[0].ReservedPorts = []structs.Port{{Label: "foo", Value: 1312}} - if !tasksUpdated(j1.TaskGroups[0], j14.TaskGroups[0]) { + if !tasksUpdated(j1, j14, name) { t.Fatalf("bad") } j15 := mock.Job() j15.TaskGroups[0].Tasks[0].Vault = &structs.Vault{Policies: []string{"foo"}} - if !tasksUpdated(j1.TaskGroups[0], j15.TaskGroups[0]) { + if !tasksUpdated(j1, j15, name) { t.Fatalf("bad") } j16 := mock.Job() j16.TaskGroups[0].EphemeralDisk.Sticky = true - if !tasksUpdated(j1.TaskGroups[0], j16.TaskGroups[0]) { + if !tasksUpdated(j1, j16, name) { + t.Fatal("bad") + } + + // Change group meta + j17 := mock.Job() + j17.TaskGroups[0].Meta["j17_test"] = "roll_baby_roll" + if !tasksUpdated(j1, j17, name) { + t.Fatal("bad") + } + + // Change job meta + j18 := mock.Job() + j18.Meta["j18_test"] = "roll_baby_roll" + if !tasksUpdated(j1, j18, name) { t.Fatal("bad") } } diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 000000000000..bcfa19520af9 --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 000000000000..931ae31606f8 --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 000000000000..6050c10f4c8b --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README new file mode 100644 index 000000000000..cea12879a0ea --- /dev/null +++ b/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 000000000000..72efb0353ddf --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 000000000000..fcd192b849ed --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 000000000000..e6179f65e351 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 000000000000..8c9f2049bc7b --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 000000000000..874968906090 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer than can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 000000000000..2a56fb504c71 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) \ No newline at end of file diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 000000000000..adfd979fe277 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 000000000000..dbcae905e6e0 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 000000000000..0cf5e379c47f --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,87 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the snappy block-based compression format. +// It aims for very high speeds and reasonable compression. +// +// The C++ snappy implementation is at https://github.com/google/snappy +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/vendor.json b/vendor/vendor.json index e5a66f62c482..7667e51ec4e3 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -255,12 +255,6 @@ "revision": "da39e9a4f920a15683dd0f23923c302d4db6eed5", "revisionTime": "2016-05-28T08:11:04Z" }, - { - "checksumSHA1": "iP5slJJPRZUm0rfdII8OiATAACA=", - "path": "github.com/docker/docker/pkg/idtools", - "revision": "52debcd58ac91bf68503ce60561536911b74ff05", - "revisionTime": "2016-05-20T15:17:10Z" - }, { "checksumSHA1": "iP5slJJPRZUm0rfdII8OiATAACA=", "path": "github.com/docker/docker/pkg/idtools", @@ -268,8 +262,8 @@ "revisionTime": "2016-05-28T10:48:36Z" }, { - "checksumSHA1": "tdhmIGUaoOMEDymMC23qTS7bt0g=", - "path": "github.com/docker/docker/pkg/ioutils", + "checksumSHA1": "iP5slJJPRZUm0rfdII8OiATAACA=", + "path": "github.com/docker/docker/pkg/idtools", "revision": "52debcd58ac91bf68503ce60561536911b74ff05", "revisionTime": "2016-05-20T15:17:10Z" }, @@ -279,6 +273,12 @@ "revision": "da39e9a4f920a15683dd0f23923c302d4db6eed5", "revisionTime": "2016-05-28T08:11:04Z" }, + { + "checksumSHA1": "tdhmIGUaoOMEDymMC23qTS7bt0g=", + "path": "github.com/docker/docker/pkg/ioutils", + "revision": "52debcd58ac91bf68503ce60561536911b74ff05", + "revisionTime": "2016-05-20T15:17:10Z" + }, { "checksumSHA1": "ndnAFCfsGC3upNQ6jAEwzxcurww=", "path": "github.com/docker/docker/pkg/longpath", @@ -484,6 +484,12 @@ "path": "github.com/golang/protobuf/proto/testdata", "revision": "0dfe8f37844c14cb32c7247925270e0f7ba90973" }, + { + "checksumSHA1": "W+E/2xXcE1GmJ0Qb784ald0Fn6I=", + "path": "github.com/golang/snappy", + "revision": "d9eb7a3d35ec988b8585d4a0068e462c27d28380", + "revisionTime": "2016-05-29T05:00:41Z" + }, { "comment": "1.0.0", "path": "github.com/gorhill/cronexpr",